mirror of
https://github.com/blakeblackshear/frigate.git
synced 2026-02-05 10:45:21 +03:00
Merge branch 'dev' of github.com:blakeblackshear/frigate into audio-events
This commit is contained in:
commit
51a09b43d0
11
Dockerfile
11
Dockerfile
@ -18,10 +18,13 @@ WORKDIR /rootfs
|
||||
|
||||
FROM base AS nginx
|
||||
ARG DEBIAN_FRONTEND
|
||||
ENV CCACHE_DIR /root/.ccache
|
||||
ENV CCACHE_MAXSIZE 2G
|
||||
|
||||
# bind /var/cache/apt to tmpfs to speed up nginx build
|
||||
RUN --mount=type=tmpfs,target=/tmp --mount=type=tmpfs,target=/var/cache/apt \
|
||||
--mount=type=bind,source=docker/build_nginx.sh,target=/deps/build_nginx.sh \
|
||||
--mount=type=cache,target=/root/.ccache \
|
||||
/deps/build_nginx.sh
|
||||
|
||||
FROM wget AS go2rtc
|
||||
@ -61,14 +64,16 @@ RUN mkdir /models \
|
||||
FROM wget as libusb-build
|
||||
ARG TARGETARCH
|
||||
ARG DEBIAN_FRONTEND
|
||||
ENV CCACHE_DIR /root/.ccache
|
||||
ENV CCACHE_MAXSIZE 2G
|
||||
|
||||
# Build libUSB without udev. Needed for Openvino NCS2 support
|
||||
WORKDIR /opt
|
||||
RUN apt-get update && apt-get install -y unzip build-essential automake libtool
|
||||
RUN wget -q https://github.com/libusb/libusb/archive/v1.0.25.zip -O v1.0.25.zip && \
|
||||
RUN apt-get update && apt-get install -y unzip build-essential automake libtool ccache
|
||||
RUN --mount=type=cache,target=/root/.ccache wget -q https://github.com/libusb/libusb/archive/v1.0.25.zip -O v1.0.25.zip && \
|
||||
unzip v1.0.25.zip && cd libusb-1.0.25 && \
|
||||
./bootstrap.sh && \
|
||||
./configure --disable-udev --enable-shared && \
|
||||
./configure CC='ccache gcc' CCX='ccache g++' --disable-udev --enable-shared && \
|
||||
make -j $(nproc --all)
|
||||
RUN apt-get update && \
|
||||
apt-get install -y --no-install-recommends libusb-1.0-0-dev && \
|
||||
|
||||
@ -12,16 +12,32 @@ from frigate.util import create_mask
|
||||
# get info on the video
|
||||
# cap = cv2.VideoCapture("debug/front_cam_2023_05_23_08_41__2023_05_23_08_43.mp4")
|
||||
# cap = cv2.VideoCapture("debug/motion_test_clips/rain_1.mp4")
|
||||
cap = cv2.VideoCapture("debug/motion_test_clips/ir_off.mp4")
|
||||
cap = cv2.VideoCapture("debug/motion_test_clips/lawn_mower_night_1.mp4")
|
||||
# cap = cv2.VideoCapture("airport.mp4")
|
||||
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
||||
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
||||
fps = cap.get(cv2.CAP_PROP_FPS)
|
||||
frame_shape = (height, width, 3)
|
||||
# Nick back:
|
||||
# "1280,0,1280,316,1170,216,1146,126,1016,127,979,82,839,0",
|
||||
# "310,350,300,402,224,405,241,354",
|
||||
# "378,0,375,26,0,23,0,0",
|
||||
# Front door:
|
||||
# "1080,0,1080,339,1010,280,1020,169,777,163,452,170,318,299,191,365,186,417,139,470,108,516,40,530,0,514,0,0",
|
||||
# "336,833,438,1024,346,1093,103,1052,24,814",
|
||||
# Back
|
||||
# "1855,0,1851,100,1289,96,1105,161,1045,119,890,121,890,0",
|
||||
# "505,95,506,138,388,153,384,114",
|
||||
# "689,72,689,122,549,134,547,89",
|
||||
# "261,134,264,176,169,195,167,158",
|
||||
# "145,159,146,202,70,220,65,183",
|
||||
|
||||
mask = create_mask(
|
||||
(height, width),
|
||||
[],
|
||||
[
|
||||
"1080,0,1080,339,1010,280,1020,169,777,163,452,170,318,299,191,365,186,417,139,470,108,516,40,530,0,514,0,0",
|
||||
"336,833,438,1024,346,1093,103,1052,24,814",
|
||||
],
|
||||
)
|
||||
|
||||
# create the motion config
|
||||
@ -29,7 +45,7 @@ motion_config_1 = MotionConfig()
|
||||
motion_config_1.mask = np.zeros((height, width), np.uint8)
|
||||
motion_config_1.mask[:] = mask
|
||||
# motion_config_1.improve_contrast = 1
|
||||
# motion_config_1.frame_height = 150
|
||||
motion_config_1.frame_height = 150
|
||||
# motion_config_1.frame_alpha = 0.02
|
||||
# motion_config_1.threshold = 30
|
||||
# motion_config_1.contour_area = 10
|
||||
@ -38,10 +54,11 @@ motion_config_2 = MotionConfig()
|
||||
motion_config_2.mask = np.zeros((height, width), np.uint8)
|
||||
motion_config_2.mask[:] = mask
|
||||
# motion_config_2.improve_contrast = 1
|
||||
# motion_config_2.frame_height = 150
|
||||
motion_config_2.frame_height = 150
|
||||
# motion_config_2.frame_alpha = 0.01
|
||||
# motion_config_2.threshold = 20
|
||||
motion_config_2.threshold = 20
|
||||
# motion_config.contour_area = 10
|
||||
|
||||
save_images = True
|
||||
|
||||
improved_motion_detector_1 = ImprovedMotionDetector(
|
||||
@ -52,8 +69,6 @@ improved_motion_detector_1 = ImprovedMotionDetector(
|
||||
threshold=mp.Value("i", motion_config_1.threshold),
|
||||
contour_area=mp.Value("i", motion_config_1.contour_area),
|
||||
name="default",
|
||||
clipLimit=2.0,
|
||||
tileGridSize=(8, 8),
|
||||
)
|
||||
improved_motion_detector_1.save_images = save_images
|
||||
|
||||
|
||||
@ -15,6 +15,10 @@ apt-get -yqq build-dep nginx
|
||||
|
||||
apt-get -yqq install --no-install-recommends ca-certificates wget
|
||||
update-ca-certificates -f
|
||||
apt install -y ccache
|
||||
|
||||
export PATH="/usr/lib/ccache:$PATH"
|
||||
|
||||
mkdir /tmp/nginx
|
||||
wget -nv https://nginx.org/download/nginx-${NGINX_VERSION}.tar.gz
|
||||
tar -zxf nginx-${NGINX_VERSION}.tar.gz -C /tmp/nginx --strip-components=1
|
||||
@ -62,5 +66,5 @@ cd /tmp/nginx
|
||||
--add-module=../nginx-rtmp-module \
|
||||
--with-cc-opt="-O3 -Wno-error=implicit-fallthrough"
|
||||
|
||||
make -j$(nproc) && make install
|
||||
make CC="ccache gcc" -j$(nproc) && make install
|
||||
rm -rf /usr/local/nginx/html /usr/local/nginx/conf/*.default
|
||||
|
||||
@ -189,6 +189,11 @@ ffmpeg:
|
||||
record: preset-record-generic
|
||||
# Optional: output args for rtmp streams (default: shown below)
|
||||
rtmp: preset-rtmp-generic
|
||||
# Optional: Time in seconds to wait before ffmpeg retries connecting to the camera. (default: shown below)
|
||||
# If set too low, frigate will retry a connection to the camera's stream too frequently, using up the limited streams some cameras can allow at once
|
||||
# If set too high, then if a ffmpeg crash or camera stream timeout occurs, you could potentially lose up to a maximum of retry_interval second(s) of footage
|
||||
# NOTE: this can be a useful setting for Wireless / Battery cameras to reduce how much footage is potentially lost during a connection timeout.
|
||||
retry_interval: 10
|
||||
|
||||
# Optional: Detect configuration
|
||||
# NOTE: Can be overridden at the camera level
|
||||
@ -275,7 +280,7 @@ motion:
|
||||
# Optional: The threshold passed to cv2.threshold to determine if a pixel is different enough to be counted as motion. (default: shown below)
|
||||
# Increasing this value will make motion detection less sensitive and decreasing it will make motion detection more sensitive.
|
||||
# The value should be between 1 and 255.
|
||||
threshold: 20
|
||||
threshold: 30
|
||||
# Optional: The percentage of the image used to detect lightning or other substantial changes where motion detection
|
||||
# needs to recalibrate. (default: shown below)
|
||||
# Increasing this value will make motion detection more likely to consider lightning or ir mode changes as valid motion.
|
||||
|
||||
@ -188,7 +188,7 @@ class RecordConfig(FrigateBaseModel):
|
||||
|
||||
class MotionConfig(FrigateBaseModel):
|
||||
threshold: int = Field(
|
||||
default=20,
|
||||
default=30,
|
||||
title="Motion detection threshold (1-255).",
|
||||
ge=1,
|
||||
le=255,
|
||||
@ -477,6 +477,10 @@ class FfmpegConfig(FrigateBaseModel):
|
||||
default_factory=FfmpegOutputArgsConfig,
|
||||
title="FFmpeg output arguments per role.",
|
||||
)
|
||||
retry_interval: float = Field(
|
||||
default=10.0,
|
||||
title="Time in seconds to wait before FFmpeg retries connecting to the camera.",
|
||||
)
|
||||
|
||||
|
||||
class CameraRoleEnum(str, Enum):
|
||||
|
||||
@ -46,5 +46,4 @@ DRIVER_INTEL_iHD = "iHD"
|
||||
# Record Values
|
||||
|
||||
MAX_SEGMENT_DURATION = 600
|
||||
SECONDS_IN_DAY = 60 * 60 * 24
|
||||
MAX_PLAYLIST_SECONDS = 7200 # support 2 hour segments for a single playlist to account for cameras with inconsistent segment times
|
||||
|
||||
@ -1121,6 +1121,15 @@ def latest_frame(camera_name):
|
||||
height = int(request.args.get("h", str(frame.shape[0])))
|
||||
width = int(height * frame.shape[1] / frame.shape[0])
|
||||
|
||||
if not frame:
|
||||
return "Unable to get valid frame from {}".format(camera_name), 500
|
||||
|
||||
if height < 1 or width < 1:
|
||||
return (
|
||||
"Invalid height / width requested :: {} / {}".format(height, width),
|
||||
400,
|
||||
)
|
||||
|
||||
frame = cv2.resize(frame, dsize=(width, height), interpolation=cv2.INTER_AREA)
|
||||
|
||||
ret, jpg = cv2.imencode(
|
||||
|
||||
@ -1,6 +1,7 @@
|
||||
import cv2
|
||||
import imutils
|
||||
import numpy as np
|
||||
from scipy.ndimage import gaussian_filter
|
||||
|
||||
from frigate.config import MotionConfig
|
||||
from frigate.motion import MotionDetector
|
||||
@ -15,9 +16,10 @@ class ImprovedMotionDetector(MotionDetector):
|
||||
improve_contrast,
|
||||
threshold,
|
||||
contour_area,
|
||||
clipLimit=2.0,
|
||||
tileGridSize=(2, 2),
|
||||
name="improved",
|
||||
blur_radius=1,
|
||||
interpolation=cv2.INTER_NEAREST,
|
||||
contrast_frame_history=50,
|
||||
):
|
||||
self.name = name
|
||||
self.config = config
|
||||
@ -28,13 +30,12 @@ class ImprovedMotionDetector(MotionDetector):
|
||||
config.frame_height * frame_shape[1] // frame_shape[0],
|
||||
)
|
||||
self.avg_frame = np.zeros(self.motion_frame_size, np.float32)
|
||||
self.avg_delta = np.zeros(self.motion_frame_size, np.float32)
|
||||
self.motion_frame_count = 0
|
||||
self.frame_counter = 0
|
||||
resized_mask = cv2.resize(
|
||||
config.mask,
|
||||
dsize=(self.motion_frame_size[1], self.motion_frame_size[0]),
|
||||
interpolation=cv2.INTER_LINEAR,
|
||||
interpolation=cv2.INTER_AREA,
|
||||
)
|
||||
self.mask = np.where(resized_mask == [0])
|
||||
self.save_images = False
|
||||
@ -42,7 +43,11 @@ class ImprovedMotionDetector(MotionDetector):
|
||||
self.improve_contrast = improve_contrast
|
||||
self.threshold = threshold
|
||||
self.contour_area = contour_area
|
||||
self.clahe = cv2.createCLAHE(clipLimit=clipLimit, tileGridSize=tileGridSize)
|
||||
self.blur_radius = blur_radius
|
||||
self.interpolation = interpolation
|
||||
self.contrast_values = np.zeros((contrast_frame_history, 2), np.uint8)
|
||||
self.contrast_values[:, 1:2] = 255
|
||||
self.contrast_values_index = 0
|
||||
|
||||
def detect(self, frame):
|
||||
motion_boxes = []
|
||||
@ -53,27 +58,44 @@ class ImprovedMotionDetector(MotionDetector):
|
||||
resized_frame = cv2.resize(
|
||||
gray,
|
||||
dsize=(self.motion_frame_size[1], self.motion_frame_size[0]),
|
||||
interpolation=cv2.INTER_LINEAR,
|
||||
interpolation=self.interpolation,
|
||||
)
|
||||
|
||||
if self.save_images:
|
||||
resized_saved = resized_frame.copy()
|
||||
|
||||
resized_frame = cv2.GaussianBlur(resized_frame, (3, 3), cv2.BORDER_DEFAULT)
|
||||
|
||||
if self.save_images:
|
||||
blurred_saved = resized_frame.copy()
|
||||
|
||||
# Improve contrast
|
||||
if self.improve_contrast.value:
|
||||
resized_frame = self.clahe.apply(resized_frame)
|
||||
# TODO tracking moving average of min/max to avoid sudden contrast changes
|
||||
minval = np.percentile(resized_frame, 4).astype(np.uint8)
|
||||
maxval = np.percentile(resized_frame, 96).astype(np.uint8)
|
||||
# skip contrast calcs if the image is a single color
|
||||
if minval < maxval:
|
||||
# keep track of the last 50 contrast values
|
||||
self.contrast_values[self.contrast_values_index] = [minval, maxval]
|
||||
self.contrast_values_index += 1
|
||||
if self.contrast_values_index == len(self.contrast_values):
|
||||
self.contrast_values_index = 0
|
||||
|
||||
avg_min, avg_max = np.mean(self.contrast_values, axis=0)
|
||||
|
||||
resized_frame = np.clip(resized_frame, avg_min, avg_max)
|
||||
resized_frame = (
|
||||
((resized_frame - avg_min) / (avg_max - avg_min)) * 255
|
||||
).astype(np.uint8)
|
||||
|
||||
if self.save_images:
|
||||
contrasted_saved = resized_frame.copy()
|
||||
|
||||
# mask frame
|
||||
# this has to come after contrast improvement
|
||||
resized_frame[self.mask] = [255]
|
||||
|
||||
resized_frame = gaussian_filter(resized_frame, sigma=1, radius=self.blur_radius)
|
||||
|
||||
if self.save_images:
|
||||
blurred_saved = resized_frame.copy()
|
||||
|
||||
if self.save_images or self.calibrating:
|
||||
self.frame_counter += 1
|
||||
# compare to average
|
||||
@ -134,8 +156,8 @@ class ImprovedMotionDetector(MotionDetector):
|
||||
)
|
||||
frames = [
|
||||
cv2.cvtColor(resized_saved, cv2.COLOR_GRAY2BGR),
|
||||
cv2.cvtColor(blurred_saved, cv2.COLOR_GRAY2BGR),
|
||||
cv2.cvtColor(contrasted_saved, cv2.COLOR_GRAY2BGR),
|
||||
cv2.cvtColor(blurred_saved, cv2.COLOR_GRAY2BGR),
|
||||
cv2.cvtColor(frameDelta, cv2.COLOR_GRAY2BGR),
|
||||
cv2.cvtColor(thresh, cv2.COLOR_GRAY2BGR),
|
||||
thresh_dilated,
|
||||
|
||||
@ -188,15 +188,14 @@ class TrackedObject:
|
||||
zone_score = self.zone_presence.get(name, 0)
|
||||
# check if the object is in the zone
|
||||
if cv2.pointPolygonTest(contour, bottom_center, False) >= 0:
|
||||
self.zone_presence[name] = zone_score + 1
|
||||
# if the object passed the filters once, dont apply again
|
||||
if name in self.current_zones or not zone_filtered(self, zone.filters):
|
||||
self.zone_presence[name] = zone_score + 1
|
||||
|
||||
# an object is only considered present in a zone if it has a zone inertia of 3+
|
||||
if zone_score >= zone.inertia:
|
||||
# if the object passed the filters once, dont apply again
|
||||
if name in self.current_zones or not zone_filtered(
|
||||
self, zone.filters
|
||||
):
|
||||
# an object is only considered present in a zone if it has a zone inertia of 3+
|
||||
if zone_score >= zone.inertia:
|
||||
current_zones.append(name)
|
||||
|
||||
if name not in self.entered_zones:
|
||||
self.entered_zones.append(name)
|
||||
else:
|
||||
|
||||
@ -8,10 +8,10 @@ import threading
|
||||
from multiprocessing.synchronize import Event as MpEvent
|
||||
from pathlib import Path
|
||||
|
||||
from peewee import DatabaseError, DoesNotExist, chunked
|
||||
from peewee import DatabaseError, chunked
|
||||
|
||||
from frigate.config import FrigateConfig, RetainModeEnum
|
||||
from frigate.const import RECORD_DIR, SECONDS_IN_DAY
|
||||
from frigate.const import RECORD_DIR
|
||||
from frigate.models import Event, Recordings, RecordingsToDelete, Timeline
|
||||
from frigate.record.util import remove_empty_directories
|
||||
|
||||
@ -28,7 +28,7 @@ class RecordingCleanup(threading.Thread):
|
||||
self.stop_event = stop_event
|
||||
|
||||
def clean_tmp_clips(self) -> None:
|
||||
# delete any clips more than 5 minutes old
|
||||
"""delete any clips in the cache that are more than 5 minutes old."""
|
||||
for p in Path("/tmp/cache").rglob("clip_*.mp4"):
|
||||
logger.debug(f"Checking tmp clip {p}.")
|
||||
if p.stat().st_mtime < (datetime.datetime.now().timestamp() - 60 * 1):
|
||||
@ -40,8 +40,8 @@ class RecordingCleanup(threading.Thread):
|
||||
p.unlink(missing_ok=True)
|
||||
|
||||
def expire_recordings(self) -> None:
|
||||
logger.debug("Start expire recordings (new).")
|
||||
|
||||
"""Delete recordings based on retention config."""
|
||||
logger.debug("Start expire recordings.")
|
||||
logger.debug("Start deleted cameras.")
|
||||
# Handle deleted cameras
|
||||
expire_days = self.config.record.retain.days
|
||||
@ -161,59 +161,10 @@ class RecordingCleanup(threading.Thread):
|
||||
logger.debug(f"End camera: {camera}.")
|
||||
|
||||
logger.debug("End all cameras.")
|
||||
logger.debug("End expire recordings (new).")
|
||||
|
||||
def expire_files(self) -> None:
|
||||
logger.debug("Start expire files (legacy).")
|
||||
|
||||
default_expire = (
|
||||
datetime.datetime.now().timestamp()
|
||||
- SECONDS_IN_DAY * self.config.record.retain.days
|
||||
)
|
||||
delete_before = {}
|
||||
|
||||
for name, camera in self.config.cameras.items():
|
||||
delete_before[name] = (
|
||||
datetime.datetime.now().timestamp()
|
||||
- SECONDS_IN_DAY * camera.record.retain.days
|
||||
)
|
||||
|
||||
# find all the recordings older than the oldest recording in the db
|
||||
try:
|
||||
oldest_recording = (
|
||||
Recordings.select().order_by(Recordings.start_time).limit(1).get()
|
||||
)
|
||||
|
||||
p = Path(oldest_recording.path)
|
||||
oldest_timestamp = p.stat().st_mtime - 1
|
||||
except DoesNotExist:
|
||||
oldest_timestamp = datetime.datetime.now().timestamp()
|
||||
except FileNotFoundError:
|
||||
logger.warning(f"Unable to find file from recordings database: {p}")
|
||||
Recordings.delete().where(Recordings.id == oldest_recording.id).execute()
|
||||
return
|
||||
|
||||
logger.debug(f"Oldest recording in the db: {oldest_timestamp}")
|
||||
|
||||
files_to_check = []
|
||||
|
||||
for root, _, files in os.walk(RECORD_DIR):
|
||||
for file in files:
|
||||
file_path = os.path.join(root, file)
|
||||
if os.path.getmtime(file_path) < oldest_timestamp:
|
||||
files_to_check.append(file_path)
|
||||
|
||||
for f in files_to_check:
|
||||
p = Path(f)
|
||||
try:
|
||||
if p.stat().st_mtime < delete_before.get(p.parent.name, default_expire):
|
||||
p.unlink(missing_ok=True)
|
||||
except FileNotFoundError:
|
||||
logger.warning(f"Attempted to expire missing file: {f}")
|
||||
|
||||
logger.debug("End expire files (legacy).")
|
||||
logger.debug("End expire recordings.")
|
||||
|
||||
def sync_recordings(self) -> None:
|
||||
"""Check the db for stale recordings entries that don't exist in the filesystem."""
|
||||
logger.debug("Start sync recordings.")
|
||||
|
||||
# get all recordings in the db
|
||||
@ -283,5 +234,4 @@ class RecordingCleanup(threading.Thread):
|
||||
|
||||
if counter == 0:
|
||||
self.expire_recordings()
|
||||
self.expire_files()
|
||||
remove_empty_directories(RECORD_DIR)
|
||||
|
||||
@ -234,6 +234,7 @@ class CameraWatchdog(threading.Thread):
|
||||
self.frame_shape = self.config.frame_shape_yuv
|
||||
self.frame_size = self.frame_shape[0] * self.frame_shape[1]
|
||||
self.stop_event = stop_event
|
||||
self.sleeptime = self.config.ffmpeg.retry_interval
|
||||
|
||||
def run(self):
|
||||
self.start_ffmpeg_detect()
|
||||
@ -253,8 +254,8 @@ class CameraWatchdog(threading.Thread):
|
||||
}
|
||||
)
|
||||
|
||||
time.sleep(10)
|
||||
while not self.stop_event.wait(10):
|
||||
time.sleep(self.sleeptime)
|
||||
while not self.stop_event.wait(self.sleeptime):
|
||||
now = datetime.datetime.now().timestamp()
|
||||
|
||||
if not self.capture_thread.is_alive():
|
||||
|
||||
@ -6,7 +6,7 @@ matplotlib == 3.7.*
|
||||
mypy == 0.942
|
||||
numpy == 1.23.*
|
||||
onvif_zeep == 0.2.12
|
||||
opencv-python-headless == 4.5.5.*
|
||||
opencv-python-headless == 4.7.0.*
|
||||
paho-mqtt == 1.6.*
|
||||
peewee == 3.16.*
|
||||
peewee_migrate == 1.10.*
|
||||
|
||||
@ -28,13 +28,18 @@ export default function CameraImage({ camera, onload, searchParams = '', stretch
|
||||
|
||||
const scaledHeight = useMemo(() => {
|
||||
const scaledHeight = Math.floor(availableWidth / aspectRatio);
|
||||
return stretch ? scaledHeight : Math.min(scaledHeight, height);
|
||||
const finalHeight = stretch ? scaledHeight : Math.min(scaledHeight, height);
|
||||
|
||||
if (finalHeight > 0) {
|
||||
return finalHeight;
|
||||
}
|
||||
|
||||
return 100;
|
||||
}, [availableWidth, aspectRatio, height, stretch]);
|
||||
const scaledWidth = useMemo(() => Math.ceil(scaledHeight * aspectRatio - scrollBarWidth), [
|
||||
scaledHeight,
|
||||
aspectRatio,
|
||||
scrollBarWidth,
|
||||
]);
|
||||
const scaledWidth = useMemo(
|
||||
() => Math.ceil(scaledHeight * aspectRatio - scrollBarWidth),
|
||||
[scaledHeight, aspectRatio, scrollBarWidth]
|
||||
);
|
||||
|
||||
const img = useMemo(() => new Image(), []);
|
||||
img.onload = useCallback(
|
||||
@ -58,18 +63,16 @@ export default function CameraImage({ camera, onload, searchParams = '', stretch
|
||||
|
||||
return (
|
||||
<div className="relative w-full" ref={containerRef}>
|
||||
{
|
||||
(enabled) ?
|
||||
<canvas data-testid="cameraimage-canvas" height={scaledHeight} ref={canvasRef} width={scaledWidth} />
|
||||
: <div class="text-center pt-6">Camera is disabled in config, no stream or snapshot available!</div>
|
||||
}
|
||||
{
|
||||
(!hasLoaded && enabled) ? (
|
||||
<div className="absolute inset-0 flex justify-center" style={`height: ${scaledHeight}px`}>
|
||||
<ActivityIndicator />
|
||||
</div>
|
||||
) : null
|
||||
}
|
||||
</div >
|
||||
{enabled ? (
|
||||
<canvas data-testid="cameraimage-canvas" height={scaledHeight} ref={canvasRef} width={scaledWidth} />
|
||||
) : (
|
||||
<div class="text-center pt-6">Camera is disabled in config, no stream or snapshot available!</div>
|
||||
)}
|
||||
{!hasLoaded && enabled ? (
|
||||
<div className="absolute inset-0 flex justify-center" style={`height: ${scaledHeight}px`}>
|
||||
<ActivityIndicator />
|
||||
</div>
|
||||
) : null}
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
65
web/src/components/TimelineEventOverlay.jsx
Normal file
65
web/src/components/TimelineEventOverlay.jsx
Normal file
@ -0,0 +1,65 @@
|
||||
import { Fragment, h } from 'preact';
|
||||
import { useState } from 'preact/hooks';
|
||||
|
||||
export default function TimelineEventOverlay({ eventOverlay, cameraConfig }) {
|
||||
const boxLeftEdge = Math.round(eventOverlay.data.box[0] * 100);
|
||||
const boxTopEdge = Math.round(eventOverlay.data.box[1] * 100);
|
||||
const boxRightEdge = Math.round((1 - eventOverlay.data.box[2] - eventOverlay.data.box[0]) * 100);
|
||||
const boxBottomEdge = Math.round((1 - eventOverlay.data.box[3] - eventOverlay.data.box[1]) * 100);
|
||||
|
||||
const [isHovering, setIsHovering] = useState(false);
|
||||
const getHoverStyle = () => {
|
||||
if (boxLeftEdge < 15) {
|
||||
// show object stats on right side
|
||||
return {
|
||||
left: `${boxLeftEdge + eventOverlay.data.box[2] * 100 + 1}%`,
|
||||
top: `${boxTopEdge}%`,
|
||||
};
|
||||
}
|
||||
|
||||
return {
|
||||
right: `${boxRightEdge + eventOverlay.data.box[2] * 100 + 1}%`,
|
||||
top: `${boxTopEdge}%`,
|
||||
};
|
||||
};
|
||||
|
||||
const getObjectArea = () => {
|
||||
const width = eventOverlay.data.box[2] * cameraConfig.detect.width;
|
||||
const height = eventOverlay.data.box[3] * cameraConfig.detect.height;
|
||||
return Math.round(width * height);
|
||||
};
|
||||
|
||||
const getObjectRatio = () => {
|
||||
const width = eventOverlay.data.box[2] * cameraConfig.detect.width;
|
||||
const height = eventOverlay.data.box[3] * cameraConfig.detect.height;
|
||||
return Math.round(100 * (width / height)) / 100;
|
||||
};
|
||||
|
||||
return (
|
||||
<Fragment>
|
||||
<div
|
||||
className="absolute border-4 border-red-600"
|
||||
onMouseEnter={() => setIsHovering(true)}
|
||||
onMouseLeave={() => setIsHovering(false)}
|
||||
onTouchStart={() => setIsHovering(true)}
|
||||
onTouchEnd={() => setIsHovering(false)}
|
||||
style={{
|
||||
left: `${boxLeftEdge}%`,
|
||||
top: `${boxTopEdge}%`,
|
||||
right: `${boxRightEdge}%`,
|
||||
bottom: `${boxBottomEdge}%`,
|
||||
}}
|
||||
>
|
||||
{eventOverlay.class_type == 'entered_zone' ? (
|
||||
<div className="absolute w-2 h-2 bg-yellow-500 left-[50%] -translate-x-1/2 translate-y-3/4 bottom-0" />
|
||||
) : null}
|
||||
</div>
|
||||
{isHovering && (
|
||||
<div className="absolute bg-white dark:bg-slate-800 p-4 block dark:text-white text-lg" style={getHoverStyle()}>
|
||||
<div>{`Area: ${getObjectArea()} px`}</div>
|
||||
<div>{`Ratio: ${getObjectRatio()}`}</div>
|
||||
</div>
|
||||
)}
|
||||
</Fragment>
|
||||
);
|
||||
}
|
||||
@ -29,6 +29,7 @@ import { formatUnixTimestampToDateTime, getDurationFromTimestamps } from '../uti
|
||||
import TimeAgo from '../components/TimeAgo';
|
||||
import Timepicker from '../components/TimePicker';
|
||||
import TimelineSummary from '../components/TimelineSummary';
|
||||
import TimelineEventOverlay from '../components/TimelineEventOverlay';
|
||||
|
||||
const API_LIMIT = 25;
|
||||
|
||||
@ -717,23 +718,10 @@ export default function Events({ path, ...props }) {
|
||||
}}
|
||||
>
|
||||
{eventOverlay ? (
|
||||
<div
|
||||
className="absolute border-4 border-red-600"
|
||||
style={{
|
||||
left: `${Math.round(eventOverlay.data.box[0] * 100)}%`,
|
||||
top: `${Math.round(eventOverlay.data.box[1] * 100)}%`,
|
||||
right: `${Math.round(
|
||||
(1 - eventOverlay.data.box[2] - eventOverlay.data.box[0]) * 100
|
||||
)}%`,
|
||||
bottom: `${Math.round(
|
||||
(1 - eventOverlay.data.box[3] - eventOverlay.data.box[1]) * 100
|
||||
)}%`,
|
||||
}}
|
||||
>
|
||||
{eventOverlay.class_type == 'entered_zone' ? (
|
||||
<div className="absolute w-2 h-2 bg-yellow-500 left-[50%] -translate-x-1/2 translate-y-3/4 bottom-0" />
|
||||
) : null}
|
||||
</div>
|
||||
<TimelineEventOverlay
|
||||
eventOverlay={eventOverlay}
|
||||
cameraConfig={config.cameras[event.camera]}
|
||||
/>
|
||||
) : null}
|
||||
</VideoPlayer>
|
||||
</div>
|
||||
|
||||
Loading…
Reference in New Issue
Block a user