Merge branch 'dev' of github.com:blakeblackshear/frigate into audio-events

This commit is contained in:
Nick Mowen 2023-06-30 06:47:15 -06:00
commit 51a09b43d0
15 changed files with 199 additions and 130 deletions

View File

@ -18,10 +18,13 @@ WORKDIR /rootfs
FROM base AS nginx FROM base AS nginx
ARG DEBIAN_FRONTEND ARG DEBIAN_FRONTEND
ENV CCACHE_DIR /root/.ccache
ENV CCACHE_MAXSIZE 2G
# bind /var/cache/apt to tmpfs to speed up nginx build # bind /var/cache/apt to tmpfs to speed up nginx build
RUN --mount=type=tmpfs,target=/tmp --mount=type=tmpfs,target=/var/cache/apt \ RUN --mount=type=tmpfs,target=/tmp --mount=type=tmpfs,target=/var/cache/apt \
--mount=type=bind,source=docker/build_nginx.sh,target=/deps/build_nginx.sh \ --mount=type=bind,source=docker/build_nginx.sh,target=/deps/build_nginx.sh \
--mount=type=cache,target=/root/.ccache \
/deps/build_nginx.sh /deps/build_nginx.sh
FROM wget AS go2rtc FROM wget AS go2rtc
@ -61,14 +64,16 @@ RUN mkdir /models \
FROM wget as libusb-build FROM wget as libusb-build
ARG TARGETARCH ARG TARGETARCH
ARG DEBIAN_FRONTEND ARG DEBIAN_FRONTEND
ENV CCACHE_DIR /root/.ccache
ENV CCACHE_MAXSIZE 2G
# Build libUSB without udev. Needed for Openvino NCS2 support # Build libUSB without udev. Needed for Openvino NCS2 support
WORKDIR /opt WORKDIR /opt
RUN apt-get update && apt-get install -y unzip build-essential automake libtool RUN apt-get update && apt-get install -y unzip build-essential automake libtool ccache
RUN wget -q https://github.com/libusb/libusb/archive/v1.0.25.zip -O v1.0.25.zip && \ RUN --mount=type=cache,target=/root/.ccache wget -q https://github.com/libusb/libusb/archive/v1.0.25.zip -O v1.0.25.zip && \
unzip v1.0.25.zip && cd libusb-1.0.25 && \ unzip v1.0.25.zip && cd libusb-1.0.25 && \
./bootstrap.sh && \ ./bootstrap.sh && \
./configure --disable-udev --enable-shared && \ ./configure CC='ccache gcc' CCX='ccache g++' --disable-udev --enable-shared && \
make -j $(nproc --all) make -j $(nproc --all)
RUN apt-get update && \ RUN apt-get update && \
apt-get install -y --no-install-recommends libusb-1.0-0-dev && \ apt-get install -y --no-install-recommends libusb-1.0-0-dev && \

View File

@ -12,16 +12,32 @@ from frigate.util import create_mask
# get info on the video # get info on the video
# cap = cv2.VideoCapture("debug/front_cam_2023_05_23_08_41__2023_05_23_08_43.mp4") # cap = cv2.VideoCapture("debug/front_cam_2023_05_23_08_41__2023_05_23_08_43.mp4")
# cap = cv2.VideoCapture("debug/motion_test_clips/rain_1.mp4") # cap = cv2.VideoCapture("debug/motion_test_clips/rain_1.mp4")
cap = cv2.VideoCapture("debug/motion_test_clips/ir_off.mp4") cap = cv2.VideoCapture("debug/motion_test_clips/lawn_mower_night_1.mp4")
# cap = cv2.VideoCapture("airport.mp4") # cap = cv2.VideoCapture("airport.mp4")
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = cap.get(cv2.CAP_PROP_FPS) fps = cap.get(cv2.CAP_PROP_FPS)
frame_shape = (height, width, 3) frame_shape = (height, width, 3)
# Nick back:
# "1280,0,1280,316,1170,216,1146,126,1016,127,979,82,839,0",
# "310,350,300,402,224,405,241,354",
# "378,0,375,26,0,23,0,0",
# Front door:
# "1080,0,1080,339,1010,280,1020,169,777,163,452,170,318,299,191,365,186,417,139,470,108,516,40,530,0,514,0,0",
# "336,833,438,1024,346,1093,103,1052,24,814",
# Back
# "1855,0,1851,100,1289,96,1105,161,1045,119,890,121,890,0",
# "505,95,506,138,388,153,384,114",
# "689,72,689,122,549,134,547,89",
# "261,134,264,176,169,195,167,158",
# "145,159,146,202,70,220,65,183",
mask = create_mask( mask = create_mask(
(height, width), (height, width),
[], [
"1080,0,1080,339,1010,280,1020,169,777,163,452,170,318,299,191,365,186,417,139,470,108,516,40,530,0,514,0,0",
"336,833,438,1024,346,1093,103,1052,24,814",
],
) )
# create the motion config # create the motion config
@ -29,7 +45,7 @@ motion_config_1 = MotionConfig()
motion_config_1.mask = np.zeros((height, width), np.uint8) motion_config_1.mask = np.zeros((height, width), np.uint8)
motion_config_1.mask[:] = mask motion_config_1.mask[:] = mask
# motion_config_1.improve_contrast = 1 # motion_config_1.improve_contrast = 1
# motion_config_1.frame_height = 150 motion_config_1.frame_height = 150
# motion_config_1.frame_alpha = 0.02 # motion_config_1.frame_alpha = 0.02
# motion_config_1.threshold = 30 # motion_config_1.threshold = 30
# motion_config_1.contour_area = 10 # motion_config_1.contour_area = 10
@ -38,10 +54,11 @@ motion_config_2 = MotionConfig()
motion_config_2.mask = np.zeros((height, width), np.uint8) motion_config_2.mask = np.zeros((height, width), np.uint8)
motion_config_2.mask[:] = mask motion_config_2.mask[:] = mask
# motion_config_2.improve_contrast = 1 # motion_config_2.improve_contrast = 1
# motion_config_2.frame_height = 150 motion_config_2.frame_height = 150
# motion_config_2.frame_alpha = 0.01 # motion_config_2.frame_alpha = 0.01
# motion_config_2.threshold = 20 motion_config_2.threshold = 20
# motion_config.contour_area = 10 # motion_config.contour_area = 10
save_images = True save_images = True
improved_motion_detector_1 = ImprovedMotionDetector( improved_motion_detector_1 = ImprovedMotionDetector(
@ -52,8 +69,6 @@ improved_motion_detector_1 = ImprovedMotionDetector(
threshold=mp.Value("i", motion_config_1.threshold), threshold=mp.Value("i", motion_config_1.threshold),
contour_area=mp.Value("i", motion_config_1.contour_area), contour_area=mp.Value("i", motion_config_1.contour_area),
name="default", name="default",
clipLimit=2.0,
tileGridSize=(8, 8),
) )
improved_motion_detector_1.save_images = save_images improved_motion_detector_1.save_images = save_images

View File

@ -15,6 +15,10 @@ apt-get -yqq build-dep nginx
apt-get -yqq install --no-install-recommends ca-certificates wget apt-get -yqq install --no-install-recommends ca-certificates wget
update-ca-certificates -f update-ca-certificates -f
apt install -y ccache
export PATH="/usr/lib/ccache:$PATH"
mkdir /tmp/nginx mkdir /tmp/nginx
wget -nv https://nginx.org/download/nginx-${NGINX_VERSION}.tar.gz wget -nv https://nginx.org/download/nginx-${NGINX_VERSION}.tar.gz
tar -zxf nginx-${NGINX_VERSION}.tar.gz -C /tmp/nginx --strip-components=1 tar -zxf nginx-${NGINX_VERSION}.tar.gz -C /tmp/nginx --strip-components=1
@ -62,5 +66,5 @@ cd /tmp/nginx
--add-module=../nginx-rtmp-module \ --add-module=../nginx-rtmp-module \
--with-cc-opt="-O3 -Wno-error=implicit-fallthrough" --with-cc-opt="-O3 -Wno-error=implicit-fallthrough"
make -j$(nproc) && make install make CC="ccache gcc" -j$(nproc) && make install
rm -rf /usr/local/nginx/html /usr/local/nginx/conf/*.default rm -rf /usr/local/nginx/html /usr/local/nginx/conf/*.default

View File

@ -189,6 +189,11 @@ ffmpeg:
record: preset-record-generic record: preset-record-generic
# Optional: output args for rtmp streams (default: shown below) # Optional: output args for rtmp streams (default: shown below)
rtmp: preset-rtmp-generic rtmp: preset-rtmp-generic
# Optional: Time in seconds to wait before ffmpeg retries connecting to the camera. (default: shown below)
# If set too low, frigate will retry a connection to the camera's stream too frequently, using up the limited streams some cameras can allow at once
# If set too high, then if a ffmpeg crash or camera stream timeout occurs, you could potentially lose up to a maximum of retry_interval second(s) of footage
# NOTE: this can be a useful setting for Wireless / Battery cameras to reduce how much footage is potentially lost during a connection timeout.
retry_interval: 10
# Optional: Detect configuration # Optional: Detect configuration
# NOTE: Can be overridden at the camera level # NOTE: Can be overridden at the camera level
@ -275,7 +280,7 @@ motion:
# Optional: The threshold passed to cv2.threshold to determine if a pixel is different enough to be counted as motion. (default: shown below) # Optional: The threshold passed to cv2.threshold to determine if a pixel is different enough to be counted as motion. (default: shown below)
# Increasing this value will make motion detection less sensitive and decreasing it will make motion detection more sensitive. # Increasing this value will make motion detection less sensitive and decreasing it will make motion detection more sensitive.
# The value should be between 1 and 255. # The value should be between 1 and 255.
threshold: 20 threshold: 30
# Optional: The percentage of the image used to detect lightning or other substantial changes where motion detection # Optional: The percentage of the image used to detect lightning or other substantial changes where motion detection
# needs to recalibrate. (default: shown below) # needs to recalibrate. (default: shown below)
# Increasing this value will make motion detection more likely to consider lightning or ir mode changes as valid motion. # Increasing this value will make motion detection more likely to consider lightning or ir mode changes as valid motion.

View File

@ -188,7 +188,7 @@ class RecordConfig(FrigateBaseModel):
class MotionConfig(FrigateBaseModel): class MotionConfig(FrigateBaseModel):
threshold: int = Field( threshold: int = Field(
default=20, default=30,
title="Motion detection threshold (1-255).", title="Motion detection threshold (1-255).",
ge=1, ge=1,
le=255, le=255,
@ -477,6 +477,10 @@ class FfmpegConfig(FrigateBaseModel):
default_factory=FfmpegOutputArgsConfig, default_factory=FfmpegOutputArgsConfig,
title="FFmpeg output arguments per role.", title="FFmpeg output arguments per role.",
) )
retry_interval: float = Field(
default=10.0,
title="Time in seconds to wait before FFmpeg retries connecting to the camera.",
)
class CameraRoleEnum(str, Enum): class CameraRoleEnum(str, Enum):

View File

@ -46,5 +46,4 @@ DRIVER_INTEL_iHD = "iHD"
# Record Values # Record Values
MAX_SEGMENT_DURATION = 600 MAX_SEGMENT_DURATION = 600
SECONDS_IN_DAY = 60 * 60 * 24
MAX_PLAYLIST_SECONDS = 7200 # support 2 hour segments for a single playlist to account for cameras with inconsistent segment times MAX_PLAYLIST_SECONDS = 7200 # support 2 hour segments for a single playlist to account for cameras with inconsistent segment times

View File

@ -1121,6 +1121,15 @@ def latest_frame(camera_name):
height = int(request.args.get("h", str(frame.shape[0]))) height = int(request.args.get("h", str(frame.shape[0])))
width = int(height * frame.shape[1] / frame.shape[0]) width = int(height * frame.shape[1] / frame.shape[0])
if not frame:
return "Unable to get valid frame from {}".format(camera_name), 500
if height < 1 or width < 1:
return (
"Invalid height / width requested :: {} / {}".format(height, width),
400,
)
frame = cv2.resize(frame, dsize=(width, height), interpolation=cv2.INTER_AREA) frame = cv2.resize(frame, dsize=(width, height), interpolation=cv2.INTER_AREA)
ret, jpg = cv2.imencode( ret, jpg = cv2.imencode(

View File

@ -1,6 +1,7 @@
import cv2 import cv2
import imutils import imutils
import numpy as np import numpy as np
from scipy.ndimage import gaussian_filter
from frigate.config import MotionConfig from frigate.config import MotionConfig
from frigate.motion import MotionDetector from frigate.motion import MotionDetector
@ -15,9 +16,10 @@ class ImprovedMotionDetector(MotionDetector):
improve_contrast, improve_contrast,
threshold, threshold,
contour_area, contour_area,
clipLimit=2.0,
tileGridSize=(2, 2),
name="improved", name="improved",
blur_radius=1,
interpolation=cv2.INTER_NEAREST,
contrast_frame_history=50,
): ):
self.name = name self.name = name
self.config = config self.config = config
@ -28,13 +30,12 @@ class ImprovedMotionDetector(MotionDetector):
config.frame_height * frame_shape[1] // frame_shape[0], config.frame_height * frame_shape[1] // frame_shape[0],
) )
self.avg_frame = np.zeros(self.motion_frame_size, np.float32) self.avg_frame = np.zeros(self.motion_frame_size, np.float32)
self.avg_delta = np.zeros(self.motion_frame_size, np.float32)
self.motion_frame_count = 0 self.motion_frame_count = 0
self.frame_counter = 0 self.frame_counter = 0
resized_mask = cv2.resize( resized_mask = cv2.resize(
config.mask, config.mask,
dsize=(self.motion_frame_size[1], self.motion_frame_size[0]), dsize=(self.motion_frame_size[1], self.motion_frame_size[0]),
interpolation=cv2.INTER_LINEAR, interpolation=cv2.INTER_AREA,
) )
self.mask = np.where(resized_mask == [0]) self.mask = np.where(resized_mask == [0])
self.save_images = False self.save_images = False
@ -42,7 +43,11 @@ class ImprovedMotionDetector(MotionDetector):
self.improve_contrast = improve_contrast self.improve_contrast = improve_contrast
self.threshold = threshold self.threshold = threshold
self.contour_area = contour_area self.contour_area = contour_area
self.clahe = cv2.createCLAHE(clipLimit=clipLimit, tileGridSize=tileGridSize) self.blur_radius = blur_radius
self.interpolation = interpolation
self.contrast_values = np.zeros((contrast_frame_history, 2), np.uint8)
self.contrast_values[:, 1:2] = 255
self.contrast_values_index = 0
def detect(self, frame): def detect(self, frame):
motion_boxes = [] motion_boxes = []
@ -53,27 +58,44 @@ class ImprovedMotionDetector(MotionDetector):
resized_frame = cv2.resize( resized_frame = cv2.resize(
gray, gray,
dsize=(self.motion_frame_size[1], self.motion_frame_size[0]), dsize=(self.motion_frame_size[1], self.motion_frame_size[0]),
interpolation=cv2.INTER_LINEAR, interpolation=self.interpolation,
) )
if self.save_images: if self.save_images:
resized_saved = resized_frame.copy() resized_saved = resized_frame.copy()
resized_frame = cv2.GaussianBlur(resized_frame, (3, 3), cv2.BORDER_DEFAULT)
if self.save_images:
blurred_saved = resized_frame.copy()
# Improve contrast # Improve contrast
if self.improve_contrast.value: if self.improve_contrast.value:
resized_frame = self.clahe.apply(resized_frame) # TODO tracking moving average of min/max to avoid sudden contrast changes
minval = np.percentile(resized_frame, 4).astype(np.uint8)
maxval = np.percentile(resized_frame, 96).astype(np.uint8)
# skip contrast calcs if the image is a single color
if minval < maxval:
# keep track of the last 50 contrast values
self.contrast_values[self.contrast_values_index] = [minval, maxval]
self.contrast_values_index += 1
if self.contrast_values_index == len(self.contrast_values):
self.contrast_values_index = 0
avg_min, avg_max = np.mean(self.contrast_values, axis=0)
resized_frame = np.clip(resized_frame, avg_min, avg_max)
resized_frame = (
((resized_frame - avg_min) / (avg_max - avg_min)) * 255
).astype(np.uint8)
if self.save_images: if self.save_images:
contrasted_saved = resized_frame.copy() contrasted_saved = resized_frame.copy()
# mask frame # mask frame
# this has to come after contrast improvement
resized_frame[self.mask] = [255] resized_frame[self.mask] = [255]
resized_frame = gaussian_filter(resized_frame, sigma=1, radius=self.blur_radius)
if self.save_images:
blurred_saved = resized_frame.copy()
if self.save_images or self.calibrating: if self.save_images or self.calibrating:
self.frame_counter += 1 self.frame_counter += 1
# compare to average # compare to average
@ -134,8 +156,8 @@ class ImprovedMotionDetector(MotionDetector):
) )
frames = [ frames = [
cv2.cvtColor(resized_saved, cv2.COLOR_GRAY2BGR), cv2.cvtColor(resized_saved, cv2.COLOR_GRAY2BGR),
cv2.cvtColor(blurred_saved, cv2.COLOR_GRAY2BGR),
cv2.cvtColor(contrasted_saved, cv2.COLOR_GRAY2BGR), cv2.cvtColor(contrasted_saved, cv2.COLOR_GRAY2BGR),
cv2.cvtColor(blurred_saved, cv2.COLOR_GRAY2BGR),
cv2.cvtColor(frameDelta, cv2.COLOR_GRAY2BGR), cv2.cvtColor(frameDelta, cv2.COLOR_GRAY2BGR),
cv2.cvtColor(thresh, cv2.COLOR_GRAY2BGR), cv2.cvtColor(thresh, cv2.COLOR_GRAY2BGR),
thresh_dilated, thresh_dilated,

View File

@ -188,15 +188,14 @@ class TrackedObject:
zone_score = self.zone_presence.get(name, 0) zone_score = self.zone_presence.get(name, 0)
# check if the object is in the zone # check if the object is in the zone
if cv2.pointPolygonTest(contour, bottom_center, False) >= 0: if cv2.pointPolygonTest(contour, bottom_center, False) >= 0:
# if the object passed the filters once, dont apply again
if name in self.current_zones or not zone_filtered(self, zone.filters):
self.zone_presence[name] = zone_score + 1 self.zone_presence[name] = zone_score + 1
# an object is only considered present in a zone if it has a zone inertia of 3+ # an object is only considered present in a zone if it has a zone inertia of 3+
if zone_score >= zone.inertia: if zone_score >= zone.inertia:
# if the object passed the filters once, dont apply again
if name in self.current_zones or not zone_filtered(
self, zone.filters
):
current_zones.append(name) current_zones.append(name)
if name not in self.entered_zones: if name not in self.entered_zones:
self.entered_zones.append(name) self.entered_zones.append(name)
else: else:

View File

@ -8,10 +8,10 @@ import threading
from multiprocessing.synchronize import Event as MpEvent from multiprocessing.synchronize import Event as MpEvent
from pathlib import Path from pathlib import Path
from peewee import DatabaseError, DoesNotExist, chunked from peewee import DatabaseError, chunked
from frigate.config import FrigateConfig, RetainModeEnum from frigate.config import FrigateConfig, RetainModeEnum
from frigate.const import RECORD_DIR, SECONDS_IN_DAY from frigate.const import RECORD_DIR
from frigate.models import Event, Recordings, RecordingsToDelete, Timeline from frigate.models import Event, Recordings, RecordingsToDelete, Timeline
from frigate.record.util import remove_empty_directories from frigate.record.util import remove_empty_directories
@ -28,7 +28,7 @@ class RecordingCleanup(threading.Thread):
self.stop_event = stop_event self.stop_event = stop_event
def clean_tmp_clips(self) -> None: def clean_tmp_clips(self) -> None:
# delete any clips more than 5 minutes old """delete any clips in the cache that are more than 5 minutes old."""
for p in Path("/tmp/cache").rglob("clip_*.mp4"): for p in Path("/tmp/cache").rglob("clip_*.mp4"):
logger.debug(f"Checking tmp clip {p}.") logger.debug(f"Checking tmp clip {p}.")
if p.stat().st_mtime < (datetime.datetime.now().timestamp() - 60 * 1): if p.stat().st_mtime < (datetime.datetime.now().timestamp() - 60 * 1):
@ -40,8 +40,8 @@ class RecordingCleanup(threading.Thread):
p.unlink(missing_ok=True) p.unlink(missing_ok=True)
def expire_recordings(self) -> None: def expire_recordings(self) -> None:
logger.debug("Start expire recordings (new).") """Delete recordings based on retention config."""
logger.debug("Start expire recordings.")
logger.debug("Start deleted cameras.") logger.debug("Start deleted cameras.")
# Handle deleted cameras # Handle deleted cameras
expire_days = self.config.record.retain.days expire_days = self.config.record.retain.days
@ -161,59 +161,10 @@ class RecordingCleanup(threading.Thread):
logger.debug(f"End camera: {camera}.") logger.debug(f"End camera: {camera}.")
logger.debug("End all cameras.") logger.debug("End all cameras.")
logger.debug("End expire recordings (new).") logger.debug("End expire recordings.")
def expire_files(self) -> None:
logger.debug("Start expire files (legacy).")
default_expire = (
datetime.datetime.now().timestamp()
- SECONDS_IN_DAY * self.config.record.retain.days
)
delete_before = {}
for name, camera in self.config.cameras.items():
delete_before[name] = (
datetime.datetime.now().timestamp()
- SECONDS_IN_DAY * camera.record.retain.days
)
# find all the recordings older than the oldest recording in the db
try:
oldest_recording = (
Recordings.select().order_by(Recordings.start_time).limit(1).get()
)
p = Path(oldest_recording.path)
oldest_timestamp = p.stat().st_mtime - 1
except DoesNotExist:
oldest_timestamp = datetime.datetime.now().timestamp()
except FileNotFoundError:
logger.warning(f"Unable to find file from recordings database: {p}")
Recordings.delete().where(Recordings.id == oldest_recording.id).execute()
return
logger.debug(f"Oldest recording in the db: {oldest_timestamp}")
files_to_check = []
for root, _, files in os.walk(RECORD_DIR):
for file in files:
file_path = os.path.join(root, file)
if os.path.getmtime(file_path) < oldest_timestamp:
files_to_check.append(file_path)
for f in files_to_check:
p = Path(f)
try:
if p.stat().st_mtime < delete_before.get(p.parent.name, default_expire):
p.unlink(missing_ok=True)
except FileNotFoundError:
logger.warning(f"Attempted to expire missing file: {f}")
logger.debug("End expire files (legacy).")
def sync_recordings(self) -> None: def sync_recordings(self) -> None:
"""Check the db for stale recordings entries that don't exist in the filesystem."""
logger.debug("Start sync recordings.") logger.debug("Start sync recordings.")
# get all recordings in the db # get all recordings in the db
@ -283,5 +234,4 @@ class RecordingCleanup(threading.Thread):
if counter == 0: if counter == 0:
self.expire_recordings() self.expire_recordings()
self.expire_files()
remove_empty_directories(RECORD_DIR) remove_empty_directories(RECORD_DIR)

View File

@ -234,6 +234,7 @@ class CameraWatchdog(threading.Thread):
self.frame_shape = self.config.frame_shape_yuv self.frame_shape = self.config.frame_shape_yuv
self.frame_size = self.frame_shape[0] * self.frame_shape[1] self.frame_size = self.frame_shape[0] * self.frame_shape[1]
self.stop_event = stop_event self.stop_event = stop_event
self.sleeptime = self.config.ffmpeg.retry_interval
def run(self): def run(self):
self.start_ffmpeg_detect() self.start_ffmpeg_detect()
@ -253,8 +254,8 @@ class CameraWatchdog(threading.Thread):
} }
) )
time.sleep(10) time.sleep(self.sleeptime)
while not self.stop_event.wait(10): while not self.stop_event.wait(self.sleeptime):
now = datetime.datetime.now().timestamp() now = datetime.datetime.now().timestamp()
if not self.capture_thread.is_alive(): if not self.capture_thread.is_alive():

View File

@ -6,7 +6,7 @@ matplotlib == 3.7.*
mypy == 0.942 mypy == 0.942
numpy == 1.23.* numpy == 1.23.*
onvif_zeep == 0.2.12 onvif_zeep == 0.2.12
opencv-python-headless == 4.5.5.* opencv-python-headless == 4.7.0.*
paho-mqtt == 1.6.* paho-mqtt == 1.6.*
peewee == 3.16.* peewee == 3.16.*
peewee_migrate == 1.10.* peewee_migrate == 1.10.*

View File

@ -28,13 +28,18 @@ export default function CameraImage({ camera, onload, searchParams = '', stretch
const scaledHeight = useMemo(() => { const scaledHeight = useMemo(() => {
const scaledHeight = Math.floor(availableWidth / aspectRatio); const scaledHeight = Math.floor(availableWidth / aspectRatio);
return stretch ? scaledHeight : Math.min(scaledHeight, height); const finalHeight = stretch ? scaledHeight : Math.min(scaledHeight, height);
if (finalHeight > 0) {
return finalHeight;
}
return 100;
}, [availableWidth, aspectRatio, height, stretch]); }, [availableWidth, aspectRatio, height, stretch]);
const scaledWidth = useMemo(() => Math.ceil(scaledHeight * aspectRatio - scrollBarWidth), [ const scaledWidth = useMemo(
scaledHeight, () => Math.ceil(scaledHeight * aspectRatio - scrollBarWidth),
aspectRatio, [scaledHeight, aspectRatio, scrollBarWidth]
scrollBarWidth, );
]);
const img = useMemo(() => new Image(), []); const img = useMemo(() => new Image(), []);
img.onload = useCallback( img.onload = useCallback(
@ -58,18 +63,16 @@ export default function CameraImage({ camera, onload, searchParams = '', stretch
return ( return (
<div className="relative w-full" ref={containerRef}> <div className="relative w-full" ref={containerRef}>
{ {enabled ? (
(enabled) ?
<canvas data-testid="cameraimage-canvas" height={scaledHeight} ref={canvasRef} width={scaledWidth} /> <canvas data-testid="cameraimage-canvas" height={scaledHeight} ref={canvasRef} width={scaledWidth} />
: <div class="text-center pt-6">Camera is disabled in config, no stream or snapshot available!</div> ) : (
} <div class="text-center pt-6">Camera is disabled in config, no stream or snapshot available!</div>
{ )}
(!hasLoaded && enabled) ? ( {!hasLoaded && enabled ? (
<div className="absolute inset-0 flex justify-center" style={`height: ${scaledHeight}px`}> <div className="absolute inset-0 flex justify-center" style={`height: ${scaledHeight}px`}>
<ActivityIndicator /> <ActivityIndicator />
</div> </div>
) : null ) : null}
}
</div> </div>
); );
} }

View File

@ -0,0 +1,65 @@
import { Fragment, h } from 'preact';
import { useState } from 'preact/hooks';
export default function TimelineEventOverlay({ eventOverlay, cameraConfig }) {
const boxLeftEdge = Math.round(eventOverlay.data.box[0] * 100);
const boxTopEdge = Math.round(eventOverlay.data.box[1] * 100);
const boxRightEdge = Math.round((1 - eventOverlay.data.box[2] - eventOverlay.data.box[0]) * 100);
const boxBottomEdge = Math.round((1 - eventOverlay.data.box[3] - eventOverlay.data.box[1]) * 100);
const [isHovering, setIsHovering] = useState(false);
const getHoverStyle = () => {
if (boxLeftEdge < 15) {
// show object stats on right side
return {
left: `${boxLeftEdge + eventOverlay.data.box[2] * 100 + 1}%`,
top: `${boxTopEdge}%`,
};
}
return {
right: `${boxRightEdge + eventOverlay.data.box[2] * 100 + 1}%`,
top: `${boxTopEdge}%`,
};
};
const getObjectArea = () => {
const width = eventOverlay.data.box[2] * cameraConfig.detect.width;
const height = eventOverlay.data.box[3] * cameraConfig.detect.height;
return Math.round(width * height);
};
const getObjectRatio = () => {
const width = eventOverlay.data.box[2] * cameraConfig.detect.width;
const height = eventOverlay.data.box[3] * cameraConfig.detect.height;
return Math.round(100 * (width / height)) / 100;
};
return (
<Fragment>
<div
className="absolute border-4 border-red-600"
onMouseEnter={() => setIsHovering(true)}
onMouseLeave={() => setIsHovering(false)}
onTouchStart={() => setIsHovering(true)}
onTouchEnd={() => setIsHovering(false)}
style={{
left: `${boxLeftEdge}%`,
top: `${boxTopEdge}%`,
right: `${boxRightEdge}%`,
bottom: `${boxBottomEdge}%`,
}}
>
{eventOverlay.class_type == 'entered_zone' ? (
<div className="absolute w-2 h-2 bg-yellow-500 left-[50%] -translate-x-1/2 translate-y-3/4 bottom-0" />
) : null}
</div>
{isHovering && (
<div className="absolute bg-white dark:bg-slate-800 p-4 block dark:text-white text-lg" style={getHoverStyle()}>
<div>{`Area: ${getObjectArea()} px`}</div>
<div>{`Ratio: ${getObjectRatio()}`}</div>
</div>
)}
</Fragment>
);
}

View File

@ -29,6 +29,7 @@ import { formatUnixTimestampToDateTime, getDurationFromTimestamps } from '../uti
import TimeAgo from '../components/TimeAgo'; import TimeAgo from '../components/TimeAgo';
import Timepicker from '../components/TimePicker'; import Timepicker from '../components/TimePicker';
import TimelineSummary from '../components/TimelineSummary'; import TimelineSummary from '../components/TimelineSummary';
import TimelineEventOverlay from '../components/TimelineEventOverlay';
const API_LIMIT = 25; const API_LIMIT = 25;
@ -717,23 +718,10 @@ export default function Events({ path, ...props }) {
}} }}
> >
{eventOverlay ? ( {eventOverlay ? (
<div <TimelineEventOverlay
className="absolute border-4 border-red-600" eventOverlay={eventOverlay}
style={{ cameraConfig={config.cameras[event.camera]}
left: `${Math.round(eventOverlay.data.box[0] * 100)}%`, />
top: `${Math.round(eventOverlay.data.box[1] * 100)}%`,
right: `${Math.round(
(1 - eventOverlay.data.box[2] - eventOverlay.data.box[0]) * 100
)}%`,
bottom: `${Math.round(
(1 - eventOverlay.data.box[3] - eventOverlay.data.box[1]) * 100
)}%`,
}}
>
{eventOverlay.class_type == 'entered_zone' ? (
<div className="absolute w-2 h-2 bg-yellow-500 left-[50%] -translate-x-1/2 translate-y-3/4 bottom-0" />
) : null}
</div>
) : null} ) : null}
</VideoPlayer> </VideoPlayer>
</div> </div>