From 998bffe70654e89bac993ccb34f3ef8980896fcc Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Thu, 6 Jul 2023 06:25:37 -0600 Subject: [PATCH 01/25] Fix min region size not being divisible by 4 (#7040) * Fix min region size not being divisible by 4 * Simplify half calculation --- frigate/video.py | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/frigate/video.py b/frigate/video.py index 8980fcde0..2ba82a577 100755 --- a/frigate/video.py +++ b/frigate/video.py @@ -14,7 +14,7 @@ import cv2 import numpy as np from setproctitle import setproctitle -from frigate.config import CameraConfig, DetectConfig +from frigate.config import CameraConfig, DetectConfig, ModelConfig from frigate.const import ALL_ATTRIBUTE_LABELS, ATTRIBUTE_LABEL_MAP, CACHE_DIR from frigate.detectors.detector_config import PixelFormatEnum from frigate.log import LogPipe @@ -95,7 +95,17 @@ def filtered(obj, objects_to_track, object_filters): return False -def create_tensor_input(frame, model_config, region): +def get_min_region_size(model_config: ModelConfig) -> int: + """Get the min region size and ensure it is divisible by 4.""" + half = int(max(model_config.height, model_config.width) / 2) + + if half % 4 == 0: + return half + + return int((half + 3) / 4) * 4 + + +def create_tensor_input(frame, model_config: ModelConfig, region): if model_config.input_pixel_format == PixelFormatEnum.rgb: cropped_frame = yuv_region_2_rgb(frame, region) elif model_config.input_pixel_format == PixelFormatEnum.bgr: @@ -719,7 +729,7 @@ def process_frames( camera_name: str, frame_queue: mp.Queue, frame_shape, - model_config, + model_config: ModelConfig, detect_config: DetectConfig, frame_manager: FrameManager, motion_detector: MotionDetector, @@ -743,7 +753,7 @@ def process_frames( startup_scan_counter = 0 - region_min_size = int(max(model_config.height, model_config.width) / 2) + region_min_size = get_min_region_size(model_config) while not stop_event.is_set(): if exit_on_empty and frame_queue.empty(): From 0f68fbc8db9158c0d8b7523e003712ed7c35607d Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Thu, 6 Jul 2023 06:26:53 -0600 Subject: [PATCH 02/25] Use pre capture for custom events (#7038) * Use pre capture for custom events * Formatting --- frigate/events/external.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/frigate/events/external.py b/frigate/events/external.py index 20456b9cb..23439f2bd 100644 --- a/frigate/events/external.py +++ b/frigate/events/external.py @@ -57,8 +57,12 @@ class ExternalEventProcessor: "label": label, "sub_label": sub_label, "camera": camera, - "start_time": now, - "end_time": now + duration if duration is not None else None, + "start_time": now - camera_config.record.events.pre_capture, + "end_time": now + + duration + + camera_config.record.events.post_capture + if duration is not None + else None, "thumbnail": thumbnail, "has_clip": camera_config.record.enabled and include_recording, "has_snapshot": True, From 339b6944f13ba7b3ae3c924472b821c0a7ce66a8 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Thu, 6 Jul 2023 06:30:05 -0600 Subject: [PATCH 03/25] Force birdseye cameras into standard aspect ratios (#7026) * Force birdseye cameras into standard aspect ratios * Clarify comment * Formatting * Actually use the calculated aspect ratio when building the layout * Fix Y aspect * Force canvas into known aspect ratio as well * Save canvas size and don't recalculate * Cache coefficients that are used for different size layouts * Further optimize calculations to not be done multiple times --- frigate/output.py | 123 +++++++++++++++++++++++++++++----------------- 1 file changed, 79 insertions(+), 44 deletions(-) diff --git a/frigate/output.py b/frigate/output.py index ab928efb5..24f4c45cc 100644 --- a/frigate/output.py +++ b/frigate/output.py @@ -29,6 +29,61 @@ from frigate.util import SharedMemoryFrameManager, copy_yuv_to_position, get_yuv logger = logging.getLogger(__name__) +def get_standard_aspect_ratio(width, height) -> tuple[int, int]: + """Ensure that only standard aspect ratios are used.""" + known_aspects = [ + (16, 9), + (9, 16), + (32, 9), + (12, 9), + (9, 12), + ] # aspects are scaled to have common relative size + known_aspects_ratios = list( + map(lambda aspect: aspect[0] / aspect[1], known_aspects) + ) + closest = min( + known_aspects_ratios, + key=lambda x: abs(x - (width / height)), + ) + return known_aspects[known_aspects_ratios.index(closest)] + + +class Canvas: + def __init__(self, canvas_width: int, canvas_height: int) -> None: + gcd = math.gcd(canvas_width, canvas_height) + self.aspect = get_standard_aspect_ratio( + (canvas_width / gcd), (canvas_height / gcd) + ) + self.width = canvas_width + self.height = (self.width * self.aspect[1]) / self.aspect[0] + self.coefficient_cache: dict[int, int] = {} + self.aspect_cache: dict[str, tuple[int, int]] = {} + + def get_aspect(self, coefficient: int) -> tuple[int, int]: + return (self.aspect[0] * coefficient, self.aspect[1] * coefficient) + + def get_coefficient(self, camera_count: int) -> int: + return self.coefficient_cache.get(camera_count, 2) + + def set_coefficient(self, camera_count: int, coefficient: int) -> None: + self.coefficient_cache[camera_count] = coefficient + + def get_camera_aspect( + self, cam_name: str, camera_width: int, camera_height: int + ) -> tuple[int, int]: + cached = self.aspect_cache.get(cam_name) + + if cached: + return cached + + gcd = math.gcd(camera_width, camera_height) + camera_aspect = get_standard_aspect_ratio( + camera_width / gcd, camera_height / gcd + ) + self.aspect_cache[cam_name] = camera_aspect + return camera_aspect + + class FFMpegConverter: def __init__( self, @@ -170,6 +225,7 @@ class BirdsEyeFrameManager: self.frame_shape = (height, width) self.yuv_shape = (height * 3 // 2, width) self.frame = np.ndarray(self.yuv_shape, dtype=np.uint8) + self.canvas = Canvas(width, height) self.stop_event = stop_event # initialize the frame as black and with the Frigate logo @@ -318,16 +374,15 @@ class BirdsEyeFrameManager: ), ) - canvas_width = self.config.birdseye.width - canvas_height = self.config.birdseye.height - if len(active_cameras) == 1: # show single camera as fullscreen camera = active_cameras_to_add[0] camera_dims = self.cameras[camera]["dimensions"].copy() - scaled_width = int(canvas_height * camera_dims[0] / camera_dims[1]) + scaled_width = int(self.canvas.height * camera_dims[0] / camera_dims[1]) coefficient = ( - 1 if scaled_width <= canvas_width else canvas_width / scaled_width + 1 + if scaled_width <= self.canvas.width + else self.canvas.width / scaled_width ) self.camera_layout = [ [ @@ -337,14 +392,14 @@ class BirdsEyeFrameManager: 0, 0, int(scaled_width * coefficient), - int(canvas_height * coefficient), + int(self.canvas.height * coefficient), ), ) ] ] else: # calculate optimal layout - coefficient = 2 + coefficient = self.canvas.get_coefficient(len(active_cameras)) calculating = True # decrease scaling coefficient until height of all cameras can fit into the birdseye canvas @@ -353,7 +408,6 @@ class BirdsEyeFrameManager: return layout_candidate = self.calculate_layout( - (canvas_width, canvas_height), active_cameras_to_add, coefficient, ) @@ -367,6 +421,7 @@ class BirdsEyeFrameManager: return calculating = False + self.canvas.set_coefficient(len(active_cameras), coefficient) self.camera_layout = layout_candidate @@ -378,9 +433,7 @@ class BirdsEyeFrameManager: return True - def calculate_layout( - self, canvas, cameras_to_add: list[str], coefficient - ) -> tuple[any]: + def calculate_layout(self, cameras_to_add: list[str], coefficient) -> tuple[any]: """Calculate the optimal layout for 2+ cameras.""" def map_layout(row_height: int): @@ -397,23 +450,20 @@ class BirdsEyeFrameManager: x = starting_x for cameras in row: camera_dims = self.cameras[cameras[0]]["dimensions"].copy() + camera_aspect = cameras[1] if camera_dims[1] > camera_dims[0]: scaled_height = int(row_height * 2) - scaled_width = int( - scaled_height * camera_dims[0] / camera_dims[1] - ) + scaled_width = int(scaled_height * camera_aspect) starting_x = scaled_width else: scaled_height = row_height - scaled_width = int( - scaled_height * camera_dims[0] / camera_dims[1] - ) + scaled_width = int(scaled_height * camera_aspect) # layout is too large if ( - x + scaled_width > canvas_width - or y + scaled_height > canvas_height + x + scaled_width > self.canvas.width + or y + scaled_height > self.canvas.height ): return 0, 0, None @@ -425,13 +475,9 @@ class BirdsEyeFrameManager: return max_width, y, candidate_layout - canvas_width = canvas[0] - canvas_height = canvas[1] + canvas_aspect_x, canvas_aspect_y = self.canvas.get_aspect(coefficient) camera_layout: list[list[any]] = [] camera_layout.append([]) - canvas_gcd = math.gcd(canvas[0], canvas[1]) - canvas_aspect_x = (canvas[0] / canvas_gcd) * coefficient - canvas_aspect_y = (canvas[0] / canvas_gcd) * coefficient starting_x = 0 x = starting_x y = 0 @@ -439,18 +485,9 @@ class BirdsEyeFrameManager: max_y = 0 for camera in cameras_to_add: camera_dims = self.cameras[camera]["dimensions"].copy() - camera_gcd = math.gcd(camera_dims[0], camera_dims[1]) - camera_aspect_x = camera_dims[0] / camera_gcd - camera_aspect_y = camera_dims[1] / camera_gcd - - if round(camera_aspect_x / camera_aspect_y, 1) == 1.8: - # account for slightly off 16:9 cameras - camera_aspect_x = 16 - camera_aspect_y = 9 - elif round(camera_aspect_x / camera_aspect_y, 1) == 1.3: - # make 4:3 cameras the same relative size as 16:9 - camera_aspect_x = 12 - camera_aspect_y = 9 + camera_aspect_x, camera_aspect_y = self.canvas.get_camera_aspect( + camera, camera_dims[0], camera_dims[1] + ) if camera_dims[1] > camera_dims[0]: portrait = True @@ -462,10 +499,7 @@ class BirdsEyeFrameManager: camera_layout[y_i].append( ( camera, - ( - camera_aspect_x, - camera_aspect_y, - ), + camera_aspect_x / camera_aspect_y, ) ) @@ -491,7 +525,7 @@ class BirdsEyeFrameManager: camera_layout[y_i].append( ( camera, - (camera_aspect_x, camera_aspect_y), + camera_aspect_x / camera_aspect_y, ) ) x += camera_aspect_x @@ -499,15 +533,16 @@ class BirdsEyeFrameManager: if y + max_y > canvas_aspect_y: return None - row_height = int(canvas_height / coefficient) + row_height = int(self.canvas.height / coefficient) total_width, total_height, standard_candidate_layout = map_layout(row_height) # layout can't be optimized more - if total_width / canvas_width >= 0.99: + if total_width / self.canvas.width >= 0.99: return standard_candidate_layout scale_up_percent = min( - 1 - (total_width / canvas_width), 1 - (total_height / canvas_height) + 1 - (total_width / self.canvas.width), + 1 - (total_height / self.canvas.height), ) row_height = int(row_height * (1 + round(scale_up_percent, 1))) _, _, scaled_layout = map_layout(row_height) From f30ba254445221d1f9fb016457774c11e704048a Mon Sep 17 00:00:00 2001 From: Andrew Reiter Date: Thu, 6 Jul 2023 08:31:17 -0400 Subject: [PATCH 04/25] Reduce framerate before downscaling (#7022) * Reduce framerate before downscaling It is cheaper to drop frames and downscale those that remain than it is to downscale all frames and then drop some of them. This is achieved with the filter chain `-cv fps=FPS,scale=W:H`, and perhaps was the original intention. The plain `-r` and `-s` flags do not execute in order though - they each put themselves at the *end* of the filterchain, so `-r FPS -s WxH` actually applies the scale filter first, and then the rate filter. This fix can halve the CPU used by the detect ffmpeg process. * Bring back hard rate limits --- frigate/ffmpeg_presets.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/frigate/ffmpeg_presets.py b/frigate/ffmpeg_presets.py index a2785813c..3bbc36868 100644 --- a/frigate/ffmpeg_presets.py +++ b/frigate/ffmpeg_presets.py @@ -107,14 +107,14 @@ PRESETS_HW_ACCEL_DECODE = { } PRESETS_HW_ACCEL_SCALE = { - "preset-rpi-32-h264": "-r {0} -s {1}x{2}", - "preset-rpi-64-h264": "-r {0} -s {1}x{2}", + "preset-rpi-32-h264": "-r {0} -vf fps={0},scale={1}:{2}", + "preset-rpi-64-h264": "-r {0} -vf fps={0},scale={1}:{2}", "preset-vaapi": "-r {0} -vf fps={0},scale_vaapi=w={1}:h={2},hwdownload,format=yuv420p", "preset-intel-qsv-h264": "-r {0} -vf vpp_qsv=framerate={0}:w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p", "preset-intel-qsv-h265": "-r {0} -vf vpp_qsv=framerate={0}:w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p", "preset-nvidia-h264": "-r {0} -vf fps={0},scale_cuda=w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p", "preset-nvidia-h265": "-r {0} -vf fps={0},scale_cuda=w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p", - "default": "-r {0} -s {1}x{2}", + "default": "-r {0} -vf fps={0},scale={1}:{2}", } PRESETS_HW_ACCEL_ENCODE_BIRDSEYE = { From 325205740ba4d7c8b206e2cf159193f9009d12b5 Mon Sep 17 00:00:00 2001 From: Andrew Reiter Date: Thu, 6 Jul 2023 08:35:26 -0400 Subject: [PATCH 05/25] Check ffmpeg version instead of checking for presence of BTBN_PATH (#7023) * Check ffmpeg version instead of checking for presence of BTBN_PATH * Query ffmpeg version in s6 run script instead of subprocessing in every import * Define LIBAVFORMAT_VERSION_MAJOR in devcontainer too * Formatting * Default ffmpeg version to current btbn version so unit tests pass --- .devcontainer/post_create.sh | 5 +++++ docker/rootfs/etc/s6-overlay/s6-rc.d/frigate/run | 1 + docker/rootfs/usr/local/go2rtc/create_config.py | 4 ++-- frigate/const.py | 1 - frigate/ffmpeg_presets.py | 7 +++++-- 5 files changed, 13 insertions(+), 5 deletions(-) diff --git a/.devcontainer/post_create.sh b/.devcontainer/post_create.sh index 9c5dec5bb..1a1832f3b 100755 --- a/.devcontainer/post_create.sh +++ b/.devcontainer/post_create.sh @@ -14,6 +14,11 @@ curl -L https://api.github.com/meta | jq -r '.ssh_keys | .[]' | \ sudo mkdir -p /media/frigate sudo chown -R "$(id -u):$(id -g)" /media/frigate +# When started as a service, LIBAVFORMAT_VERSION_MAJOR is defined in the +# s6 service file. For dev, where frigate is started from an interactive +# shell, we define it in .bashrc instead. +echo 'export LIBAVFORMAT_VERSION_MAJOR=$(ffmpeg -version | grep -Po "libavformat\W+\K\d+")' >> $HOME/.bashrc + make version cd web diff --git a/docker/rootfs/etc/s6-overlay/s6-rc.d/frigate/run b/docker/rootfs/etc/s6-overlay/s6-rc.d/frigate/run index 0a835550e..f2cc40fcf 100755 --- a/docker/rootfs/etc/s6-overlay/s6-rc.d/frigate/run +++ b/docker/rootfs/etc/s6-overlay/s6-rc.d/frigate/run @@ -44,6 +44,7 @@ function migrate_db_path() { echo "[INFO] Preparing Frigate..." migrate_db_path +export LIBAVFORMAT_VERSION_MAJOR=$(ffmpeg -version | grep -Po 'libavformat\W+\K\d+') echo "[INFO] Starting Frigate..." diff --git a/docker/rootfs/usr/local/go2rtc/create_config.py b/docker/rootfs/usr/local/go2rtc/create_config.py index 1397adee8..0531b173d 100644 --- a/docker/rootfs/usr/local/go2rtc/create_config.py +++ b/docker/rootfs/usr/local/go2rtc/create_config.py @@ -7,7 +7,7 @@ import sys import yaml sys.path.insert(0, "/opt/frigate") -from frigate.const import BIRDSEYE_PIPE, BTBN_PATH # noqa: E402 +from frigate.const import BIRDSEYE_PIPE # noqa: E402 from frigate.ffmpeg_presets import ( # noqa: E402 parse_preset_hardware_acceleration_encode, ) @@ -71,7 +71,7 @@ elif go2rtc_config["rtsp"].get("default_query") is None: go2rtc_config["rtsp"]["default_query"] = "mp4" # need to replace ffmpeg command when using ffmpeg4 -if not os.path.exists(BTBN_PATH): +if int(os.environ["LIBAVFORMAT_VERSION_MAJOR"]) < 59: if go2rtc_config.get("ffmpeg") is None: go2rtc_config["ffmpeg"] = { "rtsp": "-fflags nobuffer -flags low_delay -stimeout 5000000 -user_agent go2rtc/ffmpeg -rtsp_transport tcp -i {input}" diff --git a/frigate/const.py b/frigate/const.py index 20e2b0daa..b6b0e44bd 100644 --- a/frigate/const.py +++ b/frigate/const.py @@ -11,7 +11,6 @@ YAML_EXT = (".yaml", ".yml") FRIGATE_LOCALHOST = "http://127.0.0.1:5000" PLUS_ENV_VAR = "PLUS_API_KEY" PLUS_API_HOST = "https://api.frigate.video" -BTBN_PATH = "/usr/lib/btbn-ffmpeg" # Attributes diff --git a/frigate/ffmpeg_presets.py b/frigate/ffmpeg_presets.py index 3bbc36868..4c2e16dbb 100644 --- a/frigate/ffmpeg_presets.py +++ b/frigate/ffmpeg_presets.py @@ -5,7 +5,6 @@ import os from enum import Enum from typing import Any -from frigate.const import BTBN_PATH from frigate.util import vainfo_hwaccel from frigate.version import VERSION @@ -43,7 +42,11 @@ class LibvaGpuSelector: return "" -TIMEOUT_PARAM = "-timeout" if os.path.exists(BTBN_PATH) else "-stimeout" +TIMEOUT_PARAM = ( + "-timeout" + if int(os.getenv("LIBAVFORMAT_VERSION_MAJOR", "59")) >= 59 + else "-stimeout" +) _gpu_selector = LibvaGpuSelector() _user_agent_args = [ From d0891e518338e229bf08e8ca2242349c0346cfa3 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Thu, 6 Jul 2023 06:42:17 -0600 Subject: [PATCH 06/25] Ptz cleanup (#6999) * Use _ instead of - for ptz presets * Update mqtt.md * Fix * Formatting --- docs/docs/integrations/mqtt.md | 2 +- frigate/comms/dispatcher.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/docs/integrations/mqtt.md b/docs/docs/integrations/mqtt.md index 229285676..378eadb3e 100644 --- a/docs/docs/integrations/mqtt.md +++ b/docs/docs/integrations/mqtt.md @@ -184,7 +184,7 @@ Topic to send PTZ commands to camera. | Command | Description | | ---------------------- | ----------------------------------------------------------------------------------------- | -| `preset-` | send command to move to preset with name `` | +| `preset_` | send command to move to preset with name `` | | `MOVE_` | send command to continuously move in ``, possible values are [UP, DOWN, LEFT, RIGHT] | | `ZOOM_` | send command to continuously zoom ``, possible values are [IN, OUT] | | `STOP` | send command to stop moving | diff --git a/frigate/comms/dispatcher.py b/frigate/comms/dispatcher.py index 1c9105ce8..c8f90bad1 100644 --- a/frigate/comms/dispatcher.py +++ b/frigate/comms/dispatcher.py @@ -253,7 +253,7 @@ class Dispatcher: try: if "preset" in payload.lower(): command = OnvifCommandEnum.preset - param = payload.lower().split("-")[1] + param = payload.lower()[payload.index("_") + 1 :] else: command = OnvifCommandEnum[payload.lower()] param = "" From c38c981cd09a97f8e1ab0efea9da0980ae8b75e8 Mon Sep 17 00:00:00 2001 From: Sergey Krashevich Date: Thu, 6 Jul 2023 15:56:38 +0300 Subject: [PATCH 07/25] Performance: multiprocessing improvement: step 2 (#6986) * Refactored queues to use faster_fifo instead of mp.Queue * Refactored LimitedQueue to include a counter for the number of items in the queue and updated put and get methods to use the counter * Refactor app.py and util.py to use a custom Queue implementation called LQueue instead of the existing Queue * Refactor put and get methods in LimitedQueue to handle queue size and blocking behavior more efficiently * code format * remove code from other branch (merging fuckup) --- frigate/app.py | 20 ++++++++------- frigate/object_detection.py | 3 ++- frigate/record/maintainer.py | 4 +-- frigate/record/record.py | 3 ++- frigate/util.py | 50 ++++++++++++++++++++++++++++++++++++ frigate/video.py | 5 ++-- 6 files changed, 70 insertions(+), 15 deletions(-) diff --git a/frigate/app.py b/frigate/app.py index 4d4aa5dd4..ed32dee17 100644 --- a/frigate/app.py +++ b/frigate/app.py @@ -10,6 +10,7 @@ from multiprocessing.synchronize import Event as MpEvent from types import FrameType from typing import Optional +import faster_fifo as ff import psutil from faster_fifo import Queue from peewee_migrate import Router @@ -46,6 +47,7 @@ from frigate.stats import StatsEmitter, stats_init from frigate.storage import StorageMaintainer from frigate.timeline import TimelineProcessor from frigate.types import CameraMetricsTypes, FeatureMetricsTypes +from frigate.util import LimitedQueue as LQueue from frigate.version import VERSION from frigate.video import capture_camera, track_camera from frigate.watchdog import FrigateWatchdog @@ -56,11 +58,11 @@ logger = logging.getLogger(__name__) class FrigateApp: def __init__(self) -> None: self.stop_event: MpEvent = mp.Event() - self.detection_queue: Queue = mp.Queue() + self.detection_queue: Queue = ff.Queue() self.detectors: dict[str, ObjectDetectProcess] = {} self.detection_out_events: dict[str, MpEvent] = {} self.detection_shms: list[mp.shared_memory.SharedMemory] = [] - self.log_queue: Queue = mp.Queue() + self.log_queue: Queue = ff.Queue() self.plus_api = PlusApi() self.camera_metrics: dict[str, CameraMetricsTypes] = {} self.feature_metrics: dict[str, FeatureMetricsTypes] = {} @@ -156,7 +158,7 @@ class FrigateApp: "ffmpeg_pid": mp.Value("i", 0), # type: ignore[typeddict-item] # issue https://github.com/python/typeshed/issues/8799 # from mypy 0.981 onwards - "frame_queue": mp.Queue(maxsize=2), + "frame_queue": LQueue(maxsize=2), "capture_process": None, "process": None, } @@ -188,22 +190,22 @@ class FrigateApp: def init_queues(self) -> None: # Queues for clip processing - self.event_queue: Queue = mp.Queue() - self.event_processed_queue: Queue = mp.Queue() - self.video_output_queue: Queue = mp.Queue( + self.event_queue: Queue = ff.Queue() + self.event_processed_queue: Queue = ff.Queue() + self.video_output_queue: Queue = LQueue( maxsize=len(self.config.cameras.keys()) * 2 ) # Queue for cameras to push tracked objects to - self.detected_frames_queue: Queue = mp.Queue( + self.detected_frames_queue: Queue = LQueue( maxsize=len(self.config.cameras.keys()) * 2 ) # Queue for recordings info - self.recordings_info_queue: Queue = mp.Queue() + self.recordings_info_queue: Queue = ff.Queue() # Queue for timeline events - self.timeline_queue: Queue = mp.Queue() + self.timeline_queue: Queue = ff.Queue() def init_database(self) -> None: def vacuum_db(db: SqliteExtDatabase) -> None: diff --git a/frigate/object_detection.py b/frigate/object_detection.py index 0a2a7059c..cebd7ff41 100644 --- a/frigate/object_detection.py +++ b/frigate/object_detection.py @@ -7,6 +7,7 @@ import signal import threading from abc import ABC, abstractmethod +import faster_fifo as ff import numpy as np from setproctitle import setproctitle @@ -72,7 +73,7 @@ class LocalObjectDetector(ObjectDetector): def run_detector( name: str, - detection_queue: mp.Queue, + detection_queue: ff.Queue, out_events: dict[str, mp.Event], avg_speed, start, diff --git a/frigate/record/maintainer.py b/frigate/record/maintainer.py index 8e40fc6e7..b4208f2d2 100644 --- a/frigate/record/maintainer.py +++ b/frigate/record/maintainer.py @@ -3,7 +3,6 @@ import asyncio import datetime import logging -import multiprocessing as mp import os import queue import random @@ -15,6 +14,7 @@ from multiprocessing.synchronize import Event as MpEvent from pathlib import Path from typing import Any, Tuple +import faster_fifo as ff import psutil from frigate.config import FrigateConfig, RetainModeEnum @@ -30,7 +30,7 @@ class RecordingMaintainer(threading.Thread): def __init__( self, config: FrigateConfig, - recordings_info_queue: mp.Queue, + recordings_info_queue: ff.Queue, process_info: dict[str, FeatureMetricsTypes], stop_event: MpEvent, ): diff --git a/frigate/record/record.py b/frigate/record/record.py index 530adc031..0d22342aa 100644 --- a/frigate/record/record.py +++ b/frigate/record/record.py @@ -7,6 +7,7 @@ import threading from types import FrameType from typing import Optional +import faster_fifo as ff from playhouse.sqliteq import SqliteQueueDatabase from setproctitle import setproctitle @@ -22,7 +23,7 @@ logger = logging.getLogger(__name__) def manage_recordings( config: FrigateConfig, - recordings_info_queue: mp.Queue, + recordings_info_queue: ff.Queue, process_info: dict[str, FeatureMetricsTypes], ) -> None: stop_event = mp.Event() diff --git a/frigate/util.py b/frigate/util.py index f535a9572..cc9bb03c9 100755 --- a/frigate/util.py +++ b/frigate/util.py @@ -1,18 +1,22 @@ import copy +import ctypes import datetime import json import logging +import multiprocessing import os import re import shlex import signal import subprocess as sp +import time import traceback import urllib.parse from abc import ABC, abstractmethod from collections import Counter from collections.abc import Mapping from multiprocessing import shared_memory +from queue import Empty, Full from typing import Any, AnyStr, Optional, Tuple import cv2 @@ -21,6 +25,8 @@ import psutil import py3nvml.py3nvml as nvml import pytz import yaml +from faster_fifo import DEFAULT_CIRCULAR_BUFFER_SIZE, DEFAULT_TIMEOUT +from faster_fifo import Queue as FFQueue from frigate.const import REGEX_HTTP_CAMERA_USER_PASS, REGEX_RTSP_CAMERA_USER_PASS @@ -1218,3 +1224,47 @@ def get_video_properties(url, get_duration=False): result["height"] = round(height) return result + + +class LimitedQueue(FFQueue): + def __init__( + self, + maxsize=0, + max_size_bytes=DEFAULT_CIRCULAR_BUFFER_SIZE, + loads=None, + dumps=None, + ): + super().__init__(max_size_bytes=max_size_bytes, loads=loads, dumps=dumps) + self.maxsize = maxsize + self.size = multiprocessing.RawValue( + ctypes.c_int, 0 + ) # Add a counter for the number of items in the queue + + def put(self, x, block=True, timeout=DEFAULT_TIMEOUT): + if self.maxsize > 0 and self.size.value >= self.maxsize: + if block: + start_time = time.time() + while self.size.value >= self.maxsize: + remaining = timeout - (time.time() - start_time) + if remaining <= 0.0: + raise Full + time.sleep(min(remaining, 0.1)) + else: + raise Full + self.size.value += 1 + return super().put(x, block=block, timeout=timeout) + + def get(self, block=True, timeout=DEFAULT_TIMEOUT): + if self.size.value <= 0 and not block: + raise Empty + self.size.value -= 1 + return super().get(block=block, timeout=timeout) + + def qsize(self): + return self.size + + def empty(self): + return self.qsize() == 0 + + def full(self): + return self.qsize() == self.maxsize diff --git a/frigate/video.py b/frigate/video.py index 2ba82a577..5928a84d5 100755 --- a/frigate/video.py +++ b/frigate/video.py @@ -11,6 +11,7 @@ import time from collections import defaultdict import cv2 +import faster_fifo as ff import numpy as np from setproctitle import setproctitle @@ -727,7 +728,7 @@ def get_consolidated_object_detections(detected_object_groups): def process_frames( camera_name: str, - frame_queue: mp.Queue, + frame_queue: ff.Queue, frame_shape, model_config: ModelConfig, detect_config: DetectConfig, @@ -735,7 +736,7 @@ def process_frames( motion_detector: MotionDetector, object_detector: RemoteObjectDetector, object_tracker: ObjectTracker, - detected_objects_queue: mp.Queue, + detected_objects_queue: ff.Queue, process_info: dict, objects_to_track: list[str], object_filters, From 2fae9dcb93f41936ee1907fc0813719c8559fe1b Mon Sep 17 00:00:00 2001 From: Cody Cutrer Date: Thu, 6 Jul 2023 07:18:39 -0600 Subject: [PATCH 08/25] reduce contention on frame_queue (#6890) * reduce contention on frame_queue don't check if the queue is full, just attempt to add the frame in a non-blocking manner, and then if it fails, skip it * don't check if the frame queue is empty, just try and get from it * Update frigate/video.py Co-authored-by: Blake Blackshear --------- Co-authored-by: Blake Blackshear --- frigate/video.py | 29 +++++++++++++++-------------- 1 file changed, 15 insertions(+), 14 deletions(-) diff --git a/frigate/video.py b/frigate/video.py index 5928a84d5..630859b49 100755 --- a/frigate/video.py +++ b/frigate/video.py @@ -206,17 +206,16 @@ def capture_frames( frame_rate.update() - # if the queue is full, skip this frame - if frame_queue.full(): + # don't lock the queue to check, just try since it should rarely be full + try: + # add to the queue + frame_queue.put(current_frame.value, False) + # close the frame + frame_manager.close(frame_name) + except queue.Full: + # if the queue is full, skip this frame skipped_eps.update() frame_manager.delete(frame_name) - continue - - # close the frame - frame_manager.close(frame_name) - - # add to the queue - frame_queue.put(current_frame.value) class CameraWatchdog(threading.Thread): @@ -757,13 +756,15 @@ def process_frames( region_min_size = get_min_region_size(model_config) while not stop_event.is_set(): - if exit_on_empty and frame_queue.empty(): - logger.info("Exiting track_objects...") - break - try: - frame_time = frame_queue.get(True, 1) + if exit_on_empty: + frame_time = frame_queue.get(False) + else: + frame_time = frame_queue.get(True, 1) except queue.Empty: + if exit_on_empty: + logger.info("Exiting track_objects...") + break continue current_frame_time.value = frame_time From 606f00867e73366db984017c81c08790ce30cf19 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Thu, 6 Jul 2023 08:11:49 -0600 Subject: [PATCH 09/25] Fix go2rtc getting env var (#7052) --- docker/rootfs/etc/s6-overlay/s6-rc.d/go2rtc/run | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docker/rootfs/etc/s6-overlay/s6-rc.d/go2rtc/run b/docker/rootfs/etc/s6-overlay/s6-rc.d/go2rtc/run index 85c8f9526..fd5fcb568 100755 --- a/docker/rootfs/etc/s6-overlay/s6-rc.d/go2rtc/run +++ b/docker/rootfs/etc/s6-overlay/s6-rc.d/go2rtc/run @@ -43,6 +43,8 @@ function get_ip_and_port_from_supervisor() { export FRIGATE_GO2RTC_WEBRTC_CANDIDATE_INTERNAL="${ip_address}:${webrtc_port}" } +export LIBAVFORMAT_VERSION_MAJOR=$(ffmpeg -version | grep -Po 'libavformat\W+\K\d+') + if [[ ! -f "/dev/shm/go2rtc.yaml" ]]; then echo "[INFO] Preparing go2rtc config..." From baf671b7649e98068bd3b0f03ed8f03adbf46f03 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Thu, 6 Jul 2023 08:28:50 -0600 Subject: [PATCH 10/25] Cleanup and organize utils (#7033) * Force birdseye cameras into standard aspect ratios * Organize utils * Update tests * Formatting * Isort * Fix tests * Cleanup * isort --- frigate/app.py | 2 +- frigate/comms/dispatcher.py | 2 +- frigate/config.py | 4 +- frigate/detectors/detector_config.py | 2 +- frigate/events/audio.py | 3 +- frigate/events/external.py | 2 +- frigate/events/maintainer.py | 2 +- frigate/ffmpeg_presets.py | 2 +- frigate/http.py | 9 +- frigate/log.py | 2 +- frigate/object_detection.py | 4 +- frigate/object_processing.py | 2 +- frigate/output.py | 6 +- frigate/record/maintainer.py | 3 +- frigate/record/record.py | 2 +- frigate/stats.py | 2 +- frigate/test/test_camera_pw.py | 2 +- frigate/test/test_config.py | 2 +- frigate/test/test_copy_yuv_to_position.py | 2 +- frigate/test/test_gpu_stats.py | 2 +- frigate/test/test_video.py | 2 +- frigate/test/test_yuv_region_2_rgb.py | 2 +- frigate/timeline.py | 2 +- frigate/track/norfair_tracker.py | 2 +- frigate/util/builtin.py | 226 ++++++++ frigate/{util.py => util/image.py} | 652 +--------------------- frigate/util/services.py | 403 +++++++++++++ frigate/video.py | 6 +- frigate/watchdog.py | 2 +- 29 files changed, 689 insertions(+), 665 deletions(-) create mode 100644 frigate/util/builtin.py rename frigate/{util.py => util/image.py} (52%) mode change 100755 => 100644 create mode 100644 frigate/util/services.py diff --git a/frigate/app.py b/frigate/app.py index ed32dee17..96c2f1d27 100644 --- a/frigate/app.py +++ b/frigate/app.py @@ -47,7 +47,7 @@ from frigate.stats import StatsEmitter, stats_init from frigate.storage import StorageMaintainer from frigate.timeline import TimelineProcessor from frigate.types import CameraMetricsTypes, FeatureMetricsTypes -from frigate.util import LimitedQueue as LQueue +from frigate.util.builtin import LimitedQueue as LQueue from frigate.version import VERSION from frigate.video import capture_camera, track_camera from frigate.watchdog import FrigateWatchdog diff --git a/frigate/comms/dispatcher.py b/frigate/comms/dispatcher.py index c8f90bad1..ada7e4cb4 100644 --- a/frigate/comms/dispatcher.py +++ b/frigate/comms/dispatcher.py @@ -7,7 +7,7 @@ from typing import Any, Callable from frigate.config import FrigateConfig from frigate.ptz import OnvifCommandEnum, OnvifController from frigate.types import CameraMetricsTypes, FeatureMetricsTypes -from frigate.util import restart_frigate +from frigate.util.services import restart_frigate logger = logging.getLogger(__name__) diff --git a/frigate/config.py b/frigate/config.py index ea7ecdc49..9399320fe 100644 --- a/frigate/config.py +++ b/frigate/config.py @@ -22,13 +22,13 @@ from frigate.ffmpeg_presets import ( parse_preset_output_rtmp, ) from frigate.plus import PlusApi -from frigate.util import ( - create_mask, +from frigate.util.builtin import ( deep_merge, escape_special_characters, get_ffmpeg_arg_list, load_config_with_no_duplicates, ) +from frigate.util.image import create_mask logger = logging.getLogger(__name__) diff --git a/frigate/detectors/detector_config.py b/frigate/detectors/detector_config.py index f65826a57..ca1915449 100644 --- a/frigate/detectors/detector_config.py +++ b/frigate/detectors/detector_config.py @@ -11,7 +11,7 @@ from pydantic import BaseModel, Extra, Field from pydantic.fields import PrivateAttr from frigate.plus import PlusApi -from frigate.util import load_labels +from frigate.util.builtin import load_labels logger = logging.getLogger(__name__) diff --git a/frigate/events/audio.py b/frigate/events/audio.py index 488c94fcc..de0f07e0c 100644 --- a/frigate/events/audio.py +++ b/frigate/events/audio.py @@ -26,7 +26,8 @@ from frigate.ffmpeg_presets import parse_preset_input from frigate.log import LogPipe from frigate.object_detection import load_labels from frigate.types import FeatureMetricsTypes -from frigate.util import get_ffmpeg_arg_list, listen +from frigate.util.builtin import get_ffmpeg_arg_list +from frigate.util.services import listen from frigate.video import start_or_restart_ffmpeg, stop_ffmpeg try: diff --git a/frigate/events/external.py b/frigate/events/external.py index 23439f2bd..a801e6d24 100644 --- a/frigate/events/external.py +++ b/frigate/events/external.py @@ -14,7 +14,7 @@ from faster_fifo import Queue from frigate.config import CameraConfig, FrigateConfig from frigate.const import CLIPS_DIR from frigate.events.maintainer import EventTypeEnum -from frigate.util import draw_box_with_label +from frigate.util.image import draw_box_with_label logger = logging.getLogger(__name__) diff --git a/frigate/events/maintainer.py b/frigate/events/maintainer.py index 34cb01261..9640128e1 100644 --- a/frigate/events/maintainer.py +++ b/frigate/events/maintainer.py @@ -11,7 +11,7 @@ from faster_fifo import Queue from frigate.config import EventsConfig, FrigateConfig from frigate.models import Event from frigate.types import CameraMetricsTypes -from frigate.util import to_relative_box +from frigate.util.builtin import to_relative_box logger = logging.getLogger(__name__) diff --git a/frigate/ffmpeg_presets.py b/frigate/ffmpeg_presets.py index 4c2e16dbb..43d2504bd 100644 --- a/frigate/ffmpeg_presets.py +++ b/frigate/ffmpeg_presets.py @@ -5,7 +5,7 @@ import os from enum import Enum from typing import Any -from frigate.util import vainfo_hwaccel +from frigate.util.services import vainfo_hwaccel from frigate.version import VERSION logger = logging.getLogger(__name__) diff --git a/frigate/http.py b/frigate/http.py index 57b2103e7..fe6dc54ef 100644 --- a/frigate/http.py +++ b/frigate/http.py @@ -38,13 +38,8 @@ from frigate.ptz import OnvifController from frigate.record.export import PlaybackFactorEnum, RecordingExporter from frigate.stats import stats_snapshot from frigate.storage import StorageMaintainer -from frigate.util import ( - clean_camera_user_pass, - ffprobe_stream, - get_tz_modifiers, - restart_frigate, - vainfo_hwaccel, -) +from frigate.util.builtin import clean_camera_user_pass, get_tz_modifiers +from frigate.util.services import ffprobe_stream, restart_frigate, vainfo_hwaccel from frigate.version import VERSION logger = logging.getLogger(__name__) diff --git a/frigate/log.py b/frigate/log.py index ac51fc3da..e839de76d 100644 --- a/frigate/log.py +++ b/frigate/log.py @@ -13,7 +13,7 @@ from typing import Deque, Optional from faster_fifo import Queue from setproctitle import setproctitle -from frigate.util import clean_camera_user_pass +from frigate.util.builtin import clean_camera_user_pass def listener_configurer() -> None: diff --git a/frigate/object_detection.py b/frigate/object_detection.py index cebd7ff41..a0c31b755 100644 --- a/frigate/object_detection.py +++ b/frigate/object_detection.py @@ -13,7 +13,9 @@ from setproctitle import setproctitle from frigate.detectors import create_detector from frigate.detectors.detector_config import InputTensorEnum -from frigate.util import EventsPerSecond, SharedMemoryFrameManager, listen, load_labels +from frigate.util.builtin import EventsPerSecond, load_labels +from frigate.util.image import SharedMemoryFrameManager +from frigate.util.services import listen logger = logging.getLogger(__name__) diff --git a/frigate/object_processing.py b/frigate/object_processing.py index e69210cce..d7151a6c8 100644 --- a/frigate/object_processing.py +++ b/frigate/object_processing.py @@ -22,7 +22,7 @@ from frigate.config import ( ) from frigate.const import CLIPS_DIR from frigate.events.maintainer import EventTypeEnum -from frigate.util import ( +from frigate.util.image import ( SharedMemoryFrameManager, area, calculate_region, diff --git a/frigate/output.py b/frigate/output.py index 24f4c45cc..80f084edb 100644 --- a/frigate/output.py +++ b/frigate/output.py @@ -24,7 +24,11 @@ from ws4py.websocket import WebSocket from frigate.config import BirdseyeModeEnum, FrigateConfig from frigate.const import BASE_DIR, BIRDSEYE_PIPE -from frigate.util import SharedMemoryFrameManager, copy_yuv_to_position, get_yuv_crop +from frigate.util.image import ( + SharedMemoryFrameManager, + copy_yuv_to_position, + get_yuv_crop, +) logger = logging.getLogger(__name__) diff --git a/frigate/record/maintainer.py b/frigate/record/maintainer.py index b4208f2d2..d21affefa 100644 --- a/frigate/record/maintainer.py +++ b/frigate/record/maintainer.py @@ -21,7 +21,8 @@ from frigate.config import FrigateConfig, RetainModeEnum from frigate.const import CACHE_DIR, MAX_SEGMENT_DURATION, RECORD_DIR from frigate.models import Event, Recordings from frigate.types import FeatureMetricsTypes -from frigate.util import area, get_video_properties +from frigate.util.image import area +from frigate.util.services import get_video_properties logger = logging.getLogger(__name__) diff --git a/frigate/record/record.py b/frigate/record/record.py index 0d22342aa..3e812a809 100644 --- a/frigate/record/record.py +++ b/frigate/record/record.py @@ -16,7 +16,7 @@ from frigate.models import Event, Recordings, RecordingsToDelete, Timeline from frigate.record.cleanup import RecordingCleanup from frigate.record.maintainer import RecordingMaintainer from frigate.types import FeatureMetricsTypes -from frigate.util import listen +from frigate.util.services import listen logger = logging.getLogger(__name__) diff --git a/frigate/stats.py b/frigate/stats.py index 096ad7913..5fdc671ee 100644 --- a/frigate/stats.py +++ b/frigate/stats.py @@ -17,7 +17,7 @@ from frigate.config import FrigateConfig from frigate.const import CACHE_DIR, CLIPS_DIR, DRIVER_AMD, DRIVER_ENV_VAR, RECORD_DIR from frigate.object_detection import ObjectDetectProcess from frigate.types import CameraMetricsTypes, StatsTrackingTypes -from frigate.util import ( +from frigate.util.services import ( get_amd_gpu_stats, get_bandwidth_stats, get_cpu_stats, diff --git a/frigate/test/test_camera_pw.py b/frigate/test/test_camera_pw.py index 92aec48d8..137d3aad0 100644 --- a/frigate/test/test_camera_pw.py +++ b/frigate/test/test_camera_pw.py @@ -2,7 +2,7 @@ import unittest -from frigate.util import clean_camera_user_pass, escape_special_characters +from frigate.util.builtin import clean_camera_user_pass, escape_special_characters class TestUserPassCleanup(unittest.TestCase): diff --git a/frigate/test/test_config.py b/frigate/test/test_config.py index 8e767f77f..f87317817 100644 --- a/frigate/test/test_config.py +++ b/frigate/test/test_config.py @@ -9,7 +9,7 @@ from frigate.config import BirdseyeModeEnum, FrigateConfig from frigate.const import MODEL_CACHE_DIR from frigate.detectors import DetectorTypeEnum from frigate.plus import PlusApi -from frigate.util import deep_merge, load_config_with_no_duplicates +from frigate.util.builtin import deep_merge, load_config_with_no_duplicates class TestConfig(unittest.TestCase): diff --git a/frigate/test/test_copy_yuv_to_position.py b/frigate/test/test_copy_yuv_to_position.py index 33582e2d1..4a31928bd 100644 --- a/frigate/test/test_copy_yuv_to_position.py +++ b/frigate/test/test_copy_yuv_to_position.py @@ -3,7 +3,7 @@ from unittest import TestCase, main import cv2 import numpy as np -from frigate.util import copy_yuv_to_position, get_yuv_crop +from frigate.util.image import copy_yuv_to_position, get_yuv_crop class TestCopyYuvToPosition(TestCase): diff --git a/frigate/test/test_gpu_stats.py b/frigate/test/test_gpu_stats.py index 2cf92def1..4c0a9938e 100644 --- a/frigate/test/test_gpu_stats.py +++ b/frigate/test/test_gpu_stats.py @@ -1,7 +1,7 @@ import unittest from unittest.mock import MagicMock, patch -from frigate.util import get_amd_gpu_stats, get_intel_gpu_stats +from frigate.util.services import get_amd_gpu_stats, get_intel_gpu_stats class TestGpuStats(unittest.TestCase): diff --git a/frigate/test/test_video.py b/frigate/test/test_video.py index e5f7e83fd..99736f658 100644 --- a/frigate/test/test_video.py +++ b/frigate/test/test_video.py @@ -5,7 +5,7 @@ import numpy as np from norfair.drawing.color import Palette from norfair.drawing.drawer import Drawer -from frigate.util import intersection +from frigate.util.image import intersection from frigate.video import ( get_cluster_boundary, get_cluster_candidates, diff --git a/frigate/test/test_yuv_region_2_rgb.py b/frigate/test/test_yuv_region_2_rgb.py index a56a78b1c..10144e674 100644 --- a/frigate/test/test_yuv_region_2_rgb.py +++ b/frigate/test/test_yuv_region_2_rgb.py @@ -3,7 +3,7 @@ from unittest import TestCase, main import cv2 import numpy as np -from frigate.util import yuv_region_2_rgb +from frigate.util.image import yuv_region_2_rgb class TestYuvRegion2RGB(TestCase): diff --git a/frigate/timeline.py b/frigate/timeline.py index 6cfcbe928..48392ad96 100644 --- a/frigate/timeline.py +++ b/frigate/timeline.py @@ -10,7 +10,7 @@ from faster_fifo import Queue from frigate.config import FrigateConfig from frigate.events.maintainer import EventTypeEnum from frigate.models import Timeline -from frigate.util import to_relative_box +from frigate.util.builtin import to_relative_box logger = logging.getLogger(__name__) diff --git a/frigate/track/norfair_tracker.py b/frigate/track/norfair_tracker.py index b0c4621b4..9389d2973 100644 --- a/frigate/track/norfair_tracker.py +++ b/frigate/track/norfair_tracker.py @@ -7,7 +7,7 @@ from norfair.drawing.drawer import Drawer from frigate.config import DetectConfig from frigate.track import ObjectTracker -from frigate.util import intersection_over_union +from frigate.util.image import intersection_over_union # Normalizes distance from estimate relative to object size diff --git a/frigate/util/builtin.py b/frigate/util/builtin.py new file mode 100644 index 000000000..900764a23 --- /dev/null +++ b/frigate/util/builtin.py @@ -0,0 +1,226 @@ +"""Utilities for builtin types manipulation.""" + +import copy +import ctypes +import datetime +import logging +import multiprocessing +import re +import shlex +import time +import urllib.parse +from collections import Counter +from collections.abc import Mapping +from queue import Empty, Full +from typing import Any, Tuple + +import pytz +import yaml +from faster_fifo import DEFAULT_CIRCULAR_BUFFER_SIZE, DEFAULT_TIMEOUT +from faster_fifo import Queue as FFQueue + +from frigate.const import REGEX_HTTP_CAMERA_USER_PASS, REGEX_RTSP_CAMERA_USER_PASS + +logger = logging.getLogger(__name__) + + +class EventsPerSecond: + def __init__(self, max_events=1000, last_n_seconds=10): + self._start = None + self._max_events = max_events + self._last_n_seconds = last_n_seconds + self._timestamps = [] + + def start(self): + self._start = datetime.datetime.now().timestamp() + + def update(self): + now = datetime.datetime.now().timestamp() + if self._start is None: + self._start = now + self._timestamps.append(now) + # truncate the list when it goes 100 over the max_size + if len(self._timestamps) > self._max_events + 100: + self._timestamps = self._timestamps[(1 - self._max_events) :] + self.expire_timestamps(now) + + def eps(self): + now = datetime.datetime.now().timestamp() + if self._start is None: + self._start = now + # compute the (approximate) events in the last n seconds + self.expire_timestamps(now) + seconds = min(now - self._start, self._last_n_seconds) + # avoid divide by zero + if seconds == 0: + seconds = 1 + return len(self._timestamps) / seconds + + # remove aged out timestamps + def expire_timestamps(self, now): + threshold = now - self._last_n_seconds + while self._timestamps and self._timestamps[0] < threshold: + del self._timestamps[0] + + +class LimitedQueue(FFQueue): + def __init__( + self, + maxsize=0, + max_size_bytes=DEFAULT_CIRCULAR_BUFFER_SIZE, + loads=None, + dumps=None, + ): + super().__init__(max_size_bytes=max_size_bytes, loads=loads, dumps=dumps) + self.maxsize = maxsize + self.size = multiprocessing.RawValue( + ctypes.c_int, 0 + ) # Add a counter for the number of items in the queue + + def put(self, x, block=True, timeout=DEFAULT_TIMEOUT): + if self.maxsize > 0 and self.size.value >= self.maxsize: + if block: + start_time = time.time() + while self.size.value >= self.maxsize: + remaining = timeout - (time.time() - start_time) + if remaining <= 0.0: + raise Full + time.sleep(min(remaining, 0.1)) + else: + raise Full + self.size.value += 1 + return super().put(x, block=block, timeout=timeout) + + def get(self, block=True, timeout=DEFAULT_TIMEOUT): + if self.size.value <= 0 and not block: + raise Empty + self.size.value -= 1 + return super().get(block=block, timeout=timeout) + + def qsize(self): + return self.size + + def empty(self): + return self.qsize() == 0 + + def full(self): + return self.qsize() == self.maxsize + + +def deep_merge(dct1: dict, dct2: dict, override=False, merge_lists=False) -> dict: + """ + :param dct1: First dict to merge + :param dct2: Second dict to merge + :param override: if same key exists in both dictionaries, should override? otherwise ignore. (default=True) + :return: The merge dictionary + """ + merged = copy.deepcopy(dct1) + for k, v2 in dct2.items(): + if k in merged: + v1 = merged[k] + if isinstance(v1, dict) and isinstance(v2, Mapping): + merged[k] = deep_merge(v1, v2, override) + elif isinstance(v1, list) and isinstance(v2, list): + if merge_lists: + merged[k] = v1 + v2 + else: + if override: + merged[k] = copy.deepcopy(v2) + else: + merged[k] = copy.deepcopy(v2) + return merged + + +def load_config_with_no_duplicates(raw_config) -> dict: + """Get config ensuring duplicate keys are not allowed.""" + + # https://stackoverflow.com/a/71751051 + class PreserveDuplicatesLoader(yaml.loader.Loader): + pass + + def map_constructor(loader, node, deep=False): + keys = [loader.construct_object(node, deep=deep) for node, _ in node.value] + vals = [loader.construct_object(node, deep=deep) for _, node in node.value] + key_count = Counter(keys) + data = {} + for key, val in zip(keys, vals): + if key_count[key] > 1: + raise ValueError( + f"Config input {key} is defined multiple times for the same field, this is not allowed." + ) + else: + data[key] = val + return data + + PreserveDuplicatesLoader.add_constructor( + yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, map_constructor + ) + return yaml.load(raw_config, PreserveDuplicatesLoader) + + +def clean_camera_user_pass(line: str) -> str: + """Removes user and password from line.""" + if "rtsp://" in line: + return re.sub(REGEX_RTSP_CAMERA_USER_PASS, "://*:*@", line) + else: + return re.sub(REGEX_HTTP_CAMERA_USER_PASS, "user=*&password=*", line) + + +def escape_special_characters(path: str) -> str: + """Cleans reserved characters to encodings for ffmpeg.""" + try: + found = re.search(REGEX_RTSP_CAMERA_USER_PASS, path).group(0)[3:-1] + pw = found[(found.index(":") + 1) :] + return path.replace(pw, urllib.parse.quote_plus(pw)) + except AttributeError: + # path does not have user:pass + return path + + +def get_ffmpeg_arg_list(arg: Any) -> list: + """Use arg if list or convert to list format.""" + return arg if isinstance(arg, list) else shlex.split(arg) + + +def load_labels(path, encoding="utf-8"): + """Loads labels from file (with or without index numbers). + Args: + path: path to label file. + encoding: label file encoding. + Returns: + Dictionary mapping indices to labels. + """ + with open(path, "r", encoding=encoding) as f: + labels = {index: "unknown" for index in range(91)} + lines = f.readlines() + if not lines: + return {} + + if lines[0].split(" ", maxsplit=1)[0].isdigit(): + pairs = [line.split(" ", maxsplit=1) for line in lines] + labels.update({int(index): label.strip() for index, label in pairs}) + else: + labels.update({index: line.strip() for index, line in enumerate(lines)}) + return labels + + +def get_tz_modifiers(tz_name: str) -> Tuple[str, str]: + seconds_offset = ( + datetime.datetime.now(pytz.timezone(tz_name)).utcoffset().total_seconds() + ) + hours_offset = int(seconds_offset / 60 / 60) + minutes_offset = int(seconds_offset / 60 - hours_offset * 60) + hour_modifier = f"{hours_offset} hour" + minute_modifier = f"{minutes_offset} minute" + return hour_modifier, minute_modifier + + +def to_relative_box( + width: int, height: int, box: Tuple[int, int, int, int] +) -> Tuple[int, int, int, int]: + return ( + box[0] / width, # x + box[1] / height, # y + (box[2] - box[0]) / width, # w + (box[3] - box[1]) / height, # h + ) diff --git a/frigate/util.py b/frigate/util/image.py old mode 100755 new mode 100644 similarity index 52% rename from frigate/util.py rename to frigate/util/image.py index cc9bb03c9..4af94500d --- a/frigate/util.py +++ b/frigate/util/image.py @@ -1,89 +1,17 @@ -import copy -import ctypes +"""Utilities for creating and manipulating image frames.""" + import datetime -import json import logging -import multiprocessing -import os -import re -import shlex -import signal -import subprocess as sp -import time -import traceback -import urllib.parse from abc import ABC, abstractmethod -from collections import Counter -from collections.abc import Mapping from multiprocessing import shared_memory -from queue import Empty, Full -from typing import Any, AnyStr, Optional, Tuple +from typing import AnyStr, Optional import cv2 import numpy as np -import psutil -import py3nvml.py3nvml as nvml -import pytz -import yaml -from faster_fifo import DEFAULT_CIRCULAR_BUFFER_SIZE, DEFAULT_TIMEOUT -from faster_fifo import Queue as FFQueue - -from frigate.const import REGEX_HTTP_CAMERA_USER_PASS, REGEX_RTSP_CAMERA_USER_PASS logger = logging.getLogger(__name__) -def deep_merge(dct1: dict, dct2: dict, override=False, merge_lists=False) -> dict: - """ - :param dct1: First dict to merge - :param dct2: Second dict to merge - :param override: if same key exists in both dictionaries, should override? otherwise ignore. (default=True) - :return: The merge dictionary - """ - merged = copy.deepcopy(dct1) - for k, v2 in dct2.items(): - if k in merged: - v1 = merged[k] - if isinstance(v1, dict) and isinstance(v2, Mapping): - merged[k] = deep_merge(v1, v2, override) - elif isinstance(v1, list) and isinstance(v2, list): - if merge_lists: - merged[k] = v1 + v2 - else: - if override: - merged[k] = copy.deepcopy(v2) - else: - merged[k] = copy.deepcopy(v2) - return merged - - -def load_config_with_no_duplicates(raw_config) -> dict: - """Get config ensuring duplicate keys are not allowed.""" - - # https://stackoverflow.com/a/71751051 - class PreserveDuplicatesLoader(yaml.loader.Loader): - pass - - def map_constructor(loader, node, deep=False): - keys = [loader.construct_object(node, deep=deep) for node, _ in node.value] - vals = [loader.construct_object(node, deep=deep) for _, node in node.value] - key_count = Counter(keys) - data = {} - for key, val in zip(keys, vals): - if key_count[key] > 1: - raise ValueError( - f"Config input {key} is defined multiple times for the same field, this is not allowed." - ) - else: - data[key] = val - return data - - PreserveDuplicatesLoader.add_constructor( - yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, map_constructor - ) - return yaml.load(raw_config, PreserveDuplicatesLoader) - - def draw_timestamp( frame, timestamp, @@ -645,432 +573,6 @@ def clipped(obj, frame_shape): return False -def restart_frigate(): - proc = psutil.Process(1) - # if this is running via s6, sigterm pid 1 - if proc.name() == "s6-svscan": - proc.terminate() - # otherwise, just try and exit frigate - else: - os.kill(os.getpid(), signal.SIGTERM) - - -class EventsPerSecond: - def __init__(self, max_events=1000, last_n_seconds=10): - self._start = None - self._max_events = max_events - self._last_n_seconds = last_n_seconds - self._timestamps = [] - - def start(self): - self._start = datetime.datetime.now().timestamp() - - def update(self): - now = datetime.datetime.now().timestamp() - if self._start is None: - self._start = now - self._timestamps.append(now) - # truncate the list when it goes 100 over the max_size - if len(self._timestamps) > self._max_events + 100: - self._timestamps = self._timestamps[(1 - self._max_events) :] - self.expire_timestamps(now) - - def eps(self): - now = datetime.datetime.now().timestamp() - if self._start is None: - self._start = now - # compute the (approximate) events in the last n seconds - self.expire_timestamps(now) - seconds = min(now - self._start, self._last_n_seconds) - # avoid divide by zero - if seconds == 0: - seconds = 1 - return len(self._timestamps) / seconds - - # remove aged out timestamps - def expire_timestamps(self, now): - threshold = now - self._last_n_seconds - while self._timestamps and self._timestamps[0] < threshold: - del self._timestamps[0] - - -def print_stack(sig, frame): - traceback.print_stack(frame) - - -def listen(): - signal.signal(signal.SIGUSR1, print_stack) - - -def create_mask(frame_shape, mask): - mask_img = np.zeros(frame_shape, np.uint8) - mask_img[:] = 255 - - if isinstance(mask, list): - for m in mask: - add_mask(m, mask_img) - - elif isinstance(mask, str): - add_mask(mask, mask_img) - - return mask_img - - -def add_mask(mask, mask_img): - points = mask.split(",") - contour = np.array( - [[int(points[i]), int(points[i + 1])] for i in range(0, len(points), 2)] - ) - cv2.fillPoly(mask_img, pts=[contour], color=(0)) - - -def load_labels(path, encoding="utf-8"): - """Loads labels from file (with or without index numbers). - Args: - path: path to label file. - encoding: label file encoding. - Returns: - Dictionary mapping indices to labels. - """ - with open(path, "r", encoding=encoding) as f: - labels = {index: "unknown" for index in range(91)} - lines = f.readlines() - if not lines: - return {} - - if lines[0].split(" ", maxsplit=1)[0].isdigit(): - pairs = [line.split(" ", maxsplit=1) for line in lines] - labels.update({int(index): label.strip() for index, label in pairs}) - else: - labels.update({index: line.strip() for index, line in enumerate(lines)}) - return labels - - -def clean_camera_user_pass(line: str) -> str: - """Removes user and password from line.""" - if "rtsp://" in line: - return re.sub(REGEX_RTSP_CAMERA_USER_PASS, "://*:*@", line) - else: - return re.sub(REGEX_HTTP_CAMERA_USER_PASS, "user=*&password=*", line) - - -def escape_special_characters(path: str) -> str: - """Cleans reserved characters to encodings for ffmpeg.""" - try: - found = re.search(REGEX_RTSP_CAMERA_USER_PASS, path).group(0)[3:-1] - pw = found[(found.index(":") + 1) :] - return path.replace(pw, urllib.parse.quote_plus(pw)) - except AttributeError: - # path does not have user:pass - return path - - -def get_cgroups_version() -> str: - """Determine what version of cgroups is enabled.""" - - cgroup_path = "/sys/fs/cgroup" - - if not os.path.ismount(cgroup_path): - logger.debug(f"{cgroup_path} is not a mount point.") - return "unknown" - - try: - with open("/proc/mounts", "r") as f: - mounts = f.readlines() - - for mount in mounts: - mount_info = mount.split() - if mount_info[1] == cgroup_path: - fs_type = mount_info[2] - if fs_type == "cgroup2fs" or fs_type == "cgroup2": - return "cgroup2" - elif fs_type == "tmpfs": - return "cgroup" - else: - logger.debug( - f"Could not determine cgroups version: unhandled filesystem {fs_type}" - ) - break - except Exception as e: - logger.debug(f"Could not determine cgroups version: {e}") - - return "unknown" - - -def get_docker_memlimit_bytes() -> int: - """Get mem limit in bytes set in docker if present. Returns -1 if no limit detected.""" - - # check running a supported cgroups version - if get_cgroups_version() == "cgroup2": - memlimit_path = "/sys/fs/cgroup/memory.max" - - try: - with open(memlimit_path, "r") as f: - value = f.read().strip() - - if value.isnumeric(): - return int(value) - elif value.lower() == "max": - return -1 - except Exception as e: - logger.debug(f"Unable to get docker memlimit: {e}") - - return -1 - - -def get_cpu_stats() -> dict[str, dict]: - """Get cpu usages for each process id""" - usages = {} - docker_memlimit = get_docker_memlimit_bytes() / 1024 - total_mem = os.sysconf("SC_PAGE_SIZE") * os.sysconf("SC_PHYS_PAGES") / 1024 - - for process in psutil.process_iter(["pid", "name", "cpu_percent", "cmdline"]): - pid = process.info["pid"] - try: - cpu_percent = process.info["cpu_percent"] - cmdline = process.info["cmdline"] - - with open(f"/proc/{pid}/stat", "r") as f: - stats = f.readline().split() - utime = int(stats[13]) - stime = int(stats[14]) - starttime = int(stats[21]) - - with open("/proc/uptime") as f: - system_uptime_sec = int(float(f.read().split()[0])) - - clk_tck = os.sysconf(os.sysconf_names["SC_CLK_TCK"]) - - process_utime_sec = utime // clk_tck - process_stime_sec = stime // clk_tck - process_starttime_sec = starttime // clk_tck - - process_elapsed_sec = system_uptime_sec - process_starttime_sec - process_usage_sec = process_utime_sec + process_stime_sec - cpu_average_usage = process_usage_sec * 100 // process_elapsed_sec - - with open(f"/proc/{pid}/statm", "r") as f: - mem_stats = f.readline().split() - mem_res = int(mem_stats[1]) * os.sysconf("SC_PAGE_SIZE") / 1024 - - if docker_memlimit > 0: - mem_pct = round((mem_res / docker_memlimit) * 100, 1) - else: - mem_pct = round((mem_res / total_mem) * 100, 1) - - usages[pid] = { - "cpu": str(cpu_percent), - "cpu_average": str(round(cpu_average_usage, 2)), - "mem": f"{mem_pct}", - "cmdline": " ".join(cmdline), - } - except Exception: - continue - - return usages - - -def get_physical_interfaces(interfaces) -> list: - with open("/proc/net/dev", "r") as file: - lines = file.readlines() - - physical_interfaces = [] - for line in lines: - if ":" in line: - interface = line.split(":")[0].strip() - for int in interfaces: - if interface.startswith(int): - physical_interfaces.append(interface) - - return physical_interfaces - - -def get_bandwidth_stats(config) -> dict[str, dict]: - """Get bandwidth usages for each ffmpeg process id""" - usages = {} - top_command = ["nethogs", "-t", "-v0", "-c5", "-d1"] + get_physical_interfaces( - config.telemetry.network_interfaces - ) - - p = sp.run( - top_command, - encoding="ascii", - capture_output=True, - ) - - if p.returncode != 0: - return usages - else: - lines = p.stdout.split("\n") - for line in lines: - stats = list(filter(lambda a: a != "", line.strip().split("\t"))) - try: - if re.search( - r"(^ffmpeg|\/go2rtc|frigate\.detector\.[a-z]+)/([0-9]+)/", stats[0] - ): - process = stats[0].split("/") - usages[process[len(process) - 2]] = { - "bandwidth": round(float(stats[1]) + float(stats[2]), 1), - } - except (IndexError, ValueError): - continue - - return usages - - -def get_amd_gpu_stats() -> dict[str, str]: - """Get stats using radeontop.""" - radeontop_command = ["radeontop", "-d", "-", "-l", "1"] - - p = sp.run( - radeontop_command, - encoding="ascii", - capture_output=True, - ) - - if p.returncode != 0: - logger.error(f"Unable to poll radeon GPU stats: {p.stderr}") - return None - else: - usages = p.stdout.split(",") - results: dict[str, str] = {} - - for hw in usages: - if "gpu" in hw: - results["gpu"] = f"{hw.strip().split(' ')[1].replace('%', '')}%" - elif "vram" in hw: - results["mem"] = f"{hw.strip().split(' ')[1].replace('%', '')}%" - - return results - - -def get_intel_gpu_stats() -> dict[str, str]: - """Get stats using intel_gpu_top.""" - intel_gpu_top_command = [ - "timeout", - "0.5s", - "intel_gpu_top", - "-J", - "-o", - "-", - "-s", - "1", - ] - - p = sp.run( - intel_gpu_top_command, - encoding="ascii", - capture_output=True, - ) - - # timeout has a non-zero returncode when timeout is reached - if p.returncode != 124: - logger.error(f"Unable to poll intel GPU stats: {p.stderr}") - return None - else: - reading = "".join(p.stdout.split()) - results: dict[str, str] = {} - - # render is used for qsv - render = [] - for result in re.findall(r'"Render/3D/0":{[a-z":\d.,%]+}', reading): - packet = json.loads(result[14:]) - single = packet.get("busy", 0.0) - render.append(float(single)) - - if render: - render_avg = sum(render) / len(render) - else: - render_avg = 1 - - # video is used for vaapi - video = [] - for result in re.findall('"Video/\d":{[a-z":\d.,%]+}', reading): - packet = json.loads(result[10:]) - single = packet.get("busy", 0.0) - video.append(float(single)) - - if video: - video_avg = sum(video) / len(video) - else: - video_avg = 1 - - results["gpu"] = f"{round((video_avg + render_avg) / 2, 2)}%" - results["mem"] = "-%" - return results - - -def try_get_info(f, h, default="N/A"): - try: - v = f(h) - except nvml.NVMLError_NotSupported: - v = default - return v - - -def get_nvidia_gpu_stats() -> dict[int, dict]: - results = {} - try: - nvml.nvmlInit() - deviceCount = nvml.nvmlDeviceGetCount() - for i in range(deviceCount): - handle = nvml.nvmlDeviceGetHandleByIndex(i) - meminfo = try_get_info(nvml.nvmlDeviceGetMemoryInfo, handle) - util = try_get_info(nvml.nvmlDeviceGetUtilizationRates, handle) - if util != "N/A": - gpu_util = util.gpu - else: - gpu_util = 0 - - if meminfo != "N/A": - gpu_mem_util = meminfo.used / meminfo.total * 100 - else: - gpu_mem_util = -1 - - results[i] = { - "name": nvml.nvmlDeviceGetName(handle), - "gpu": gpu_util, - "mem": gpu_mem_util, - } - except Exception: - pass - finally: - return results - - -def ffprobe_stream(path: str) -> sp.CompletedProcess: - """Run ffprobe on stream.""" - clean_path = escape_special_characters(path) - ffprobe_cmd = [ - "ffprobe", - "-timeout", - "1000000", - "-print_format", - "json", - "-show_entries", - "stream=codec_long_name,width,height,bit_rate,duration,display_aspect_ratio,avg_frame_rate", - "-loglevel", - "quiet", - clean_path, - ] - return sp.run(ffprobe_cmd, capture_output=True) - - -def vainfo_hwaccel(device_name: Optional[str] = None) -> sp.CompletedProcess: - """Run vainfo.""" - ffprobe_cmd = ( - ["vainfo"] - if not device_name - else ["vainfo", "--display", "drm", "--device", f"/dev/dri/{device_name}"] - ) - return sp.run(ffprobe_cmd, capture_output=True) - - -def get_ffmpeg_arg_list(arg: Any) -> list: - """Use arg if list or convert to list format.""" - return arg if isinstance(arg, list) else shlex.split(arg) - - class FrameManager(ABC): @abstractmethod def create(self, name, size) -> AnyStr: @@ -1138,133 +640,23 @@ class SharedMemoryFrameManager(FrameManager): del self.shm_store[name] -def get_tz_modifiers(tz_name: str) -> Tuple[str, str]: - seconds_offset = ( - datetime.datetime.now(pytz.timezone(tz_name)).utcoffset().total_seconds() +def create_mask(frame_shape, mask): + mask_img = np.zeros(frame_shape, np.uint8) + mask_img[:] = 255 + + if isinstance(mask, list): + for m in mask: + add_mask(m, mask_img) + + elif isinstance(mask, str): + add_mask(mask, mask_img) + + return mask_img + + +def add_mask(mask, mask_img): + points = mask.split(",") + contour = np.array( + [[int(points[i]), int(points[i + 1])] for i in range(0, len(points), 2)] ) - hours_offset = int(seconds_offset / 60 / 60) - minutes_offset = int(seconds_offset / 60 - hours_offset * 60) - hour_modifier = f"{hours_offset} hour" - minute_modifier = f"{minutes_offset} minute" - return hour_modifier, minute_modifier - - -def to_relative_box( - width: int, height: int, box: Tuple[int, int, int, int] -) -> Tuple[int, int, int, int]: - return ( - box[0] / width, # x - box[1] / height, # y - (box[2] - box[0]) / width, # w - (box[3] - box[1]) / height, # h - ) - - -def get_video_properties(url, get_duration=False): - def calculate_duration(video: Optional[any]) -> float: - duration = None - - if video is not None: - # Get the frames per second (fps) of the video stream - fps = video.get(cv2.CAP_PROP_FPS) - total_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT)) - - if fps and total_frames: - duration = total_frames / fps - - # if cv2 failed need to use ffprobe - if duration is None: - ffprobe_cmd = [ - "ffprobe", - "-v", - "error", - "-show_entries", - "format=duration", - "-of", - "default=noprint_wrappers=1:nokey=1", - f"{url}", - ] - p = sp.run(ffprobe_cmd, capture_output=True) - - if p.returncode == 0 and p.stdout.decode(): - duration = float(p.stdout.decode().strip()) - else: - duration = -1 - - return duration - - width = height = 0 - - try: - # Open the video stream - video = cv2.VideoCapture(url) - - # Check if the video stream was opened successfully - if not video.isOpened(): - video = None - except Exception: - video = None - - result = {} - - if get_duration: - result["duration"] = calculate_duration(video) - - if video is not None: - # Get the width of frames in the video stream - width = video.get(cv2.CAP_PROP_FRAME_WIDTH) - - # Get the height of frames in the video stream - height = video.get(cv2.CAP_PROP_FRAME_HEIGHT) - - # Release the video stream - video.release() - - result["width"] = round(width) - result["height"] = round(height) - - return result - - -class LimitedQueue(FFQueue): - def __init__( - self, - maxsize=0, - max_size_bytes=DEFAULT_CIRCULAR_BUFFER_SIZE, - loads=None, - dumps=None, - ): - super().__init__(max_size_bytes=max_size_bytes, loads=loads, dumps=dumps) - self.maxsize = maxsize - self.size = multiprocessing.RawValue( - ctypes.c_int, 0 - ) # Add a counter for the number of items in the queue - - def put(self, x, block=True, timeout=DEFAULT_TIMEOUT): - if self.maxsize > 0 and self.size.value >= self.maxsize: - if block: - start_time = time.time() - while self.size.value >= self.maxsize: - remaining = timeout - (time.time() - start_time) - if remaining <= 0.0: - raise Full - time.sleep(min(remaining, 0.1)) - else: - raise Full - self.size.value += 1 - return super().put(x, block=block, timeout=timeout) - - def get(self, block=True, timeout=DEFAULT_TIMEOUT): - if self.size.value <= 0 and not block: - raise Empty - self.size.value -= 1 - return super().get(block=block, timeout=timeout) - - def qsize(self): - return self.size - - def empty(self): - return self.qsize() == 0 - - def full(self): - return self.qsize() == self.maxsize + cv2.fillPoly(mask_img, pts=[contour], color=(0)) diff --git a/frigate/util/services.py b/frigate/util/services.py new file mode 100644 index 000000000..507ee76ea --- /dev/null +++ b/frigate/util/services.py @@ -0,0 +1,403 @@ +"""Utilities for services.""" + +import json +import logging +import os +import re +import signal +import subprocess as sp +import traceback +from typing import Optional + +import cv2 +import psutil +import py3nvml.py3nvml as nvml + +from frigate.util.builtin import escape_special_characters + +logger = logging.getLogger(__name__) + + +def restart_frigate(): + proc = psutil.Process(1) + # if this is running via s6, sigterm pid 1 + if proc.name() == "s6-svscan": + proc.terminate() + # otherwise, just try and exit frigate + else: + os.kill(os.getpid(), signal.SIGTERM) + + +def print_stack(sig, frame): + traceback.print_stack(frame) + + +def listen(): + signal.signal(signal.SIGUSR1, print_stack) + + +def get_cgroups_version() -> str: + """Determine what version of cgroups is enabled.""" + + cgroup_path = "/sys/fs/cgroup" + + if not os.path.ismount(cgroup_path): + logger.debug(f"{cgroup_path} is not a mount point.") + return "unknown" + + try: + with open("/proc/mounts", "r") as f: + mounts = f.readlines() + + for mount in mounts: + mount_info = mount.split() + if mount_info[1] == cgroup_path: + fs_type = mount_info[2] + if fs_type == "cgroup2fs" or fs_type == "cgroup2": + return "cgroup2" + elif fs_type == "tmpfs": + return "cgroup" + else: + logger.debug( + f"Could not determine cgroups version: unhandled filesystem {fs_type}" + ) + break + except Exception as e: + logger.debug(f"Could not determine cgroups version: {e}") + + return "unknown" + + +def get_docker_memlimit_bytes() -> int: + """Get mem limit in bytes set in docker if present. Returns -1 if no limit detected.""" + + # check running a supported cgroups version + if get_cgroups_version() == "cgroup2": + memlimit_path = "/sys/fs/cgroup/memory.max" + + try: + with open(memlimit_path, "r") as f: + value = f.read().strip() + + if value.isnumeric(): + return int(value) + elif value.lower() == "max": + return -1 + except Exception as e: + logger.debug(f"Unable to get docker memlimit: {e}") + + return -1 + + +def get_cpu_stats() -> dict[str, dict]: + """Get cpu usages for each process id""" + usages = {} + docker_memlimit = get_docker_memlimit_bytes() / 1024 + total_mem = os.sysconf("SC_PAGE_SIZE") * os.sysconf("SC_PHYS_PAGES") / 1024 + + for process in psutil.process_iter(["pid", "name", "cpu_percent", "cmdline"]): + pid = process.info["pid"] + try: + cpu_percent = process.info["cpu_percent"] + cmdline = process.info["cmdline"] + + with open(f"/proc/{pid}/stat", "r") as f: + stats = f.readline().split() + utime = int(stats[13]) + stime = int(stats[14]) + starttime = int(stats[21]) + + with open("/proc/uptime") as f: + system_uptime_sec = int(float(f.read().split()[0])) + + clk_tck = os.sysconf(os.sysconf_names["SC_CLK_TCK"]) + + process_utime_sec = utime // clk_tck + process_stime_sec = stime // clk_tck + process_starttime_sec = starttime // clk_tck + + process_elapsed_sec = system_uptime_sec - process_starttime_sec + process_usage_sec = process_utime_sec + process_stime_sec + cpu_average_usage = process_usage_sec * 100 // process_elapsed_sec + + with open(f"/proc/{pid}/statm", "r") as f: + mem_stats = f.readline().split() + mem_res = int(mem_stats[1]) * os.sysconf("SC_PAGE_SIZE") / 1024 + + if docker_memlimit > 0: + mem_pct = round((mem_res / docker_memlimit) * 100, 1) + else: + mem_pct = round((mem_res / total_mem) * 100, 1) + + usages[pid] = { + "cpu": str(cpu_percent), + "cpu_average": str(round(cpu_average_usage, 2)), + "mem": f"{mem_pct}", + "cmdline": " ".join(cmdline), + } + except Exception: + continue + + return usages + + +def get_physical_interfaces(interfaces) -> list: + with open("/proc/net/dev", "r") as file: + lines = file.readlines() + + physical_interfaces = [] + for line in lines: + if ":" in line: + interface = line.split(":")[0].strip() + for int in interfaces: + if interface.startswith(int): + physical_interfaces.append(interface) + + return physical_interfaces + + +def get_bandwidth_stats(config) -> dict[str, dict]: + """Get bandwidth usages for each ffmpeg process id""" + usages = {} + top_command = ["nethogs", "-t", "-v0", "-c5", "-d1"] + get_physical_interfaces( + config.telemetry.network_interfaces + ) + + p = sp.run( + top_command, + encoding="ascii", + capture_output=True, + ) + + if p.returncode != 0: + return usages + else: + lines = p.stdout.split("\n") + for line in lines: + stats = list(filter(lambda a: a != "", line.strip().split("\t"))) + try: + if re.search( + r"(^ffmpeg|\/go2rtc|frigate\.detector\.[a-z]+)/([0-9]+)/", stats[0] + ): + process = stats[0].split("/") + usages[process[len(process) - 2]] = { + "bandwidth": round(float(stats[1]) + float(stats[2]), 1), + } + except (IndexError, ValueError): + continue + + return usages + + +def get_amd_gpu_stats() -> dict[str, str]: + """Get stats using radeontop.""" + radeontop_command = ["radeontop", "-d", "-", "-l", "1"] + + p = sp.run( + radeontop_command, + encoding="ascii", + capture_output=True, + ) + + if p.returncode != 0: + logger.error(f"Unable to poll radeon GPU stats: {p.stderr}") + return None + else: + usages = p.stdout.split(",") + results: dict[str, str] = {} + + for hw in usages: + if "gpu" in hw: + results["gpu"] = f"{hw.strip().split(' ')[1].replace('%', '')}%" + elif "vram" in hw: + results["mem"] = f"{hw.strip().split(' ')[1].replace('%', '')}%" + + return results + + +def get_intel_gpu_stats() -> dict[str, str]: + """Get stats using intel_gpu_top.""" + intel_gpu_top_command = [ + "timeout", + "0.5s", + "intel_gpu_top", + "-J", + "-o", + "-", + "-s", + "1", + ] + + p = sp.run( + intel_gpu_top_command, + encoding="ascii", + capture_output=True, + ) + + # timeout has a non-zero returncode when timeout is reached + if p.returncode != 124: + logger.error(f"Unable to poll intel GPU stats: {p.stderr}") + return None + else: + reading = "".join(p.stdout.split()) + results: dict[str, str] = {} + + # render is used for qsv + render = [] + for result in re.findall(r'"Render/3D/0":{[a-z":\d.,%]+}', reading): + packet = json.loads(result[14:]) + single = packet.get("busy", 0.0) + render.append(float(single)) + + if render: + render_avg = sum(render) / len(render) + else: + render_avg = 1 + + # video is used for vaapi + video = [] + for result in re.findall('"Video/\d":{[a-z":\d.,%]+}', reading): + packet = json.loads(result[10:]) + single = packet.get("busy", 0.0) + video.append(float(single)) + + if video: + video_avg = sum(video) / len(video) + else: + video_avg = 1 + + results["gpu"] = f"{round((video_avg + render_avg) / 2, 2)}%" + results["mem"] = "-%" + return results + + +def try_get_info(f, h, default="N/A"): + try: + v = f(h) + except nvml.NVMLError_NotSupported: + v = default + return v + + +def get_nvidia_gpu_stats() -> dict[int, dict]: + results = {} + try: + nvml.nvmlInit() + deviceCount = nvml.nvmlDeviceGetCount() + for i in range(deviceCount): + handle = nvml.nvmlDeviceGetHandleByIndex(i) + meminfo = try_get_info(nvml.nvmlDeviceGetMemoryInfo, handle) + util = try_get_info(nvml.nvmlDeviceGetUtilizationRates, handle) + if util != "N/A": + gpu_util = util.gpu + else: + gpu_util = 0 + + if meminfo != "N/A": + gpu_mem_util = meminfo.used / meminfo.total * 100 + else: + gpu_mem_util = -1 + + results[i] = { + "name": nvml.nvmlDeviceGetName(handle), + "gpu": gpu_util, + "mem": gpu_mem_util, + } + except Exception: + pass + finally: + return results + + +def ffprobe_stream(path: str) -> sp.CompletedProcess: + """Run ffprobe on stream.""" + clean_path = escape_special_characters(path) + ffprobe_cmd = [ + "ffprobe", + "-timeout", + "1000000", + "-print_format", + "json", + "-show_entries", + "stream=codec_long_name,width,height,bit_rate,duration,display_aspect_ratio,avg_frame_rate", + "-loglevel", + "quiet", + clean_path, + ] + return sp.run(ffprobe_cmd, capture_output=True) + + +def vainfo_hwaccel(device_name: Optional[str] = None) -> sp.CompletedProcess: + """Run vainfo.""" + ffprobe_cmd = ( + ["vainfo"] + if not device_name + else ["vainfo", "--display", "drm", "--device", f"/dev/dri/{device_name}"] + ) + return sp.run(ffprobe_cmd, capture_output=True) + + +def get_video_properties(url, get_duration=False): + def calculate_duration(video: Optional[any]) -> float: + duration = None + + if video is not None: + # Get the frames per second (fps) of the video stream + fps = video.get(cv2.CAP_PROP_FPS) + total_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT)) + + if fps and total_frames: + duration = total_frames / fps + + # if cv2 failed need to use ffprobe + if duration is None: + ffprobe_cmd = [ + "ffprobe", + "-v", + "error", + "-show_entries", + "format=duration", + "-of", + "default=noprint_wrappers=1:nokey=1", + f"{url}", + ] + p = sp.run(ffprobe_cmd, capture_output=True) + + if p.returncode == 0 and p.stdout.decode(): + duration = float(p.stdout.decode().strip()) + else: + duration = -1 + + return duration + + width = height = 0 + + try: + # Open the video stream + video = cv2.VideoCapture(url) + + # Check if the video stream was opened successfully + if not video.isOpened(): + video = None + except Exception: + video = None + + result = {} + + if get_duration: + result["duration"] = calculate_duration(video) + + if video is not None: + # Get the width of frames in the video stream + width = video.get(cv2.CAP_PROP_FRAME_WIDTH) + + # Get the height of frames in the video stream + height = video.get(cv2.CAP_PROP_FRAME_HEIGHT) + + # Release the video stream + video.release() + + result["width"] = round(width) + result["height"] = round(height) + + return result diff --git a/frigate/video.py b/frigate/video.py index 630859b49..0d0b3e5c6 100755 --- a/frigate/video.py +++ b/frigate/video.py @@ -24,8 +24,8 @@ from frigate.motion.improved_motion import ImprovedMotionDetector from frigate.object_detection import RemoteObjectDetector from frigate.track import ObjectTracker from frigate.track.norfair_tracker import NorfairTracker -from frigate.util import ( - EventsPerSecond, +from frigate.util.builtin import EventsPerSecond +from frigate.util.image import ( FrameManager, SharedMemoryFrameManager, area, @@ -33,11 +33,11 @@ from frigate.util import ( draw_box_with_label, intersection, intersection_over_union, - listen, yuv_region_2_bgr, yuv_region_2_rgb, yuv_region_2_yuv, ) +from frigate.util.services import listen logger = logging.getLogger(__name__) diff --git a/frigate/watchdog.py b/frigate/watchdog.py index 245e6f2cb..c6d55d18c 100644 --- a/frigate/watchdog.py +++ b/frigate/watchdog.py @@ -5,7 +5,7 @@ import time from multiprocessing.synchronize import Event as MpEvent from frigate.object_detection import ObjectDetectProcess -from frigate.util import restart_frigate +from frigate.util.services import restart_frigate logger = logging.getLogger(__name__) From cbb77e579464c78f42494465611e1544dfabb9e2 Mon Sep 17 00:00:00 2001 From: Sergey Krashevich Date: Thu, 6 Jul 2023 21:50:22 +0300 Subject: [PATCH 11/25] Bugfix: SqliteQueueDatabase instead of SqliteDatabase and retry_interval for fetching latest frame in http.py (#7059) * Refactor http.py to use SqliteQueueDatabase instead of SqliteDatabase and add retry interval for fetching latest frame * isort && black --- frigate/http.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/frigate/http.py b/frigate/http.py index fe6dc54ef..db3ccb8df 100644 --- a/frigate/http.py +++ b/frigate/http.py @@ -24,8 +24,9 @@ from flask import ( make_response, request, ) -from peewee import DoesNotExist, SqliteDatabase, fn, operator +from peewee import DoesNotExist, fn, operator from playhouse.shortcuts import model_to_dict +from playhouse.sqliteq import SqliteQueueDatabase from tzlocal import get_localzone_name from frigate.config import FrigateConfig @@ -49,7 +50,7 @@ bp = Blueprint("frigate", __name__) def create_app( frigate_config, - database: SqliteDatabase, + database: SqliteQueueDatabase, stats_tracking, detected_frames_processor, storage_maintainer: StorageMaintainer, @@ -1099,10 +1100,14 @@ def latest_frame(camera_name): frame = current_app.detected_frames_processor.get_current_frame( camera_name, draw_options ) + retry_interval = float( + current_app.frigate_config.cameras.get(camera_name).ffmpeg.retry_interval + or 10 + ) if frame is None or datetime.now().timestamp() > ( current_app.detected_frames_processor.get_current_frame_time(camera_name) - + 10 + + retry_interval ): if current_app.camera_error_image is None: error_image = glob.glob("/opt/frigate/frigate/images/camera-error.jpg") From 22cc2712a6c6a8beb860b7601a042bce19ceb24d Mon Sep 17 00:00:00 2001 From: Sergey Krashevich Date: Thu, 6 Jul 2023 21:51:28 +0300 Subject: [PATCH 12/25] Bump NGINX version to 1.25.1, VOD module version to 1.31, secure token module version to 1.5, and RTMP module version to 1.2.2 (#7058) --- docker/build_nginx.sh | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docker/build_nginx.sh b/docker/build_nginx.sh index 1e7bfad21..56c9a146d 100755 --- a/docker/build_nginx.sh +++ b/docker/build_nginx.sh @@ -2,10 +2,10 @@ set -euxo pipefail -NGINX_VERSION="1.22.1" -VOD_MODULE_VERSION="1.30" -SECURE_TOKEN_MODULE_VERSION="1.4" -RTMP_MODULE_VERSION="1.2.1" +NGINX_VERSION="1.25.1" +VOD_MODULE_VERSION="1.31" +SECURE_TOKEN_MODULE_VERSION="1.5" +RTMP_MODULE_VERSION="1.2.2" cp /etc/apt/sources.list /etc/apt/sources.list.d/sources-src.list sed -i 's|deb http|deb-src http|g' /etc/apt/sources.list.d/sources-src.list From f48dd8c1abb8e7c180fdc03cfc5acea16d96315d Mon Sep 17 00:00:00 2001 From: Sergey Krashevich Date: Thu, 6 Jul 2023 21:54:26 +0300 Subject: [PATCH 13/25] Feature: camera debug/config enhancements (#6920) * Add functionality to update YAML config file with PUT request in HTTP endpoint * Refactor copying of text to clipboard with Clipboard API and fallback to document.execCommand('copy') in CameraMap.jsx file * Update YAML file from URL query parameters in frigate/http.py and add functionality to save motion masks, zones, and object masks in CameraMap.jsx * formatting * fix merging fuckup * Refactor camera zone coordinate saving to use single query parameter per zone in CameraMap.jsx * remove unnecessary print statements in util.py * Refactor update_yaml_file function to separate the logic for updating YAML data into a new function update_yaml(). * fix merge errors * Refactor code to improve error handling and add dependencies to useEffect hooks --- frigate/http.py | 50 ++++++++++++++- frigate/util/builtin.py | 75 ++++++++++++++++++++++ requirements-wheels.txt | 1 + web/src/routes/CameraMap.jsx | 121 ++++++++++++++++++++++++++++++++--- 4 files changed, 236 insertions(+), 11 deletions(-) diff --git a/frigate/http.py b/frigate/http.py index db3ccb8df..ba62af047 100644 --- a/frigate/http.py +++ b/frigate/http.py @@ -30,7 +30,7 @@ from playhouse.sqliteq import SqliteQueueDatabase from tzlocal import get_localzone_name from frigate.config import FrigateConfig -from frigate.const import CLIPS_DIR, MAX_SEGMENT_DURATION, RECORD_DIR +from frigate.const import CLIPS_DIR, CONFIG_DIR, MAX_SEGMENT_DURATION, RECORD_DIR from frigate.events.external import ExternalEventProcessor from frigate.models import Event, Recordings, Timeline from frigate.object_processing import TrackedObject @@ -39,7 +39,11 @@ from frigate.ptz import OnvifController from frigate.record.export import PlaybackFactorEnum, RecordingExporter from frigate.stats import stats_snapshot from frigate.storage import StorageMaintainer -from frigate.util.builtin import clean_camera_user_pass, get_tz_modifiers +from frigate.util.builtin import ( + clean_camera_user_pass, + get_tz_modifiers, + update_yaml_from_url, +) from frigate.util.services import ffprobe_stream, restart_frigate, vainfo_hwaccel from frigate.version import VERSION @@ -1026,6 +1030,48 @@ def config_save(): return "Config successfully saved.", 200 +@bp.route("/config/set", methods=["PUT"]) +def config_set(): + config_file = os.environ.get("CONFIG_FILE", f"{CONFIG_DIR}/config.yml") + + # Check if we can use .yaml instead of .yml + config_file_yaml = config_file.replace(".yml", ".yaml") + + if os.path.isfile(config_file_yaml): + config_file = config_file_yaml + + with open(config_file, "r") as f: + old_raw_config = f.read() + f.close() + + try: + update_yaml_from_url(config_file, request.url) + with open(config_file, "r") as f: + new_raw_config = f.read() + f.close() + # Validate the config schema + try: + FrigateConfig.parse_raw(new_raw_config) + except Exception: + with open(config_file, "w") as f: + f.write(old_raw_config) + f.close() + return make_response( + jsonify( + { + "success": False, + "message": f"\nConfig Error:\n\n{str(traceback.format_exc())}", + } + ), + 400, + ) + except Exception as e: + logging.error(f"Error updating config: {e}") + return "Error updating config", 500 + + return "Config successfully updated", 200 + + @bp.route("/config/schema.json") def config_schema(): return current_app.response_class( diff --git a/frigate/util/builtin.py b/frigate/util/builtin.py index 900764a23..f55ea5e37 100644 --- a/frigate/util/builtin.py +++ b/frigate/util/builtin.py @@ -14,10 +14,12 @@ from collections.abc import Mapping from queue import Empty, Full from typing import Any, Tuple +import numpy as np import pytz import yaml from faster_fifo import DEFAULT_CIRCULAR_BUFFER_SIZE, DEFAULT_TIMEOUT from faster_fifo import Queue as FFQueue +from ruamel.yaml import YAML from frigate.const import REGEX_HTTP_CAMERA_USER_PASS, REGEX_RTSP_CAMERA_USER_PASS @@ -224,3 +226,76 @@ def to_relative_box( (box[2] - box[0]) / width, # w (box[3] - box[1]) / height, # h ) + + +def create_mask(frame_shape, mask): + mask_img = np.zeros(frame_shape, np.uint8) + mask_img[:] = 255 + + +def update_yaml_from_url(file_path, url): + parsed_url = urllib.parse.urlparse(url) + query_string = urllib.parse.parse_qs(parsed_url.query, keep_blank_values=True) + + for key_path_str, new_value_list in query_string.items(): + key_path = key_path_str.split(".") + for i in range(len(key_path)): + try: + index = int(key_path[i]) + key_path[i] = (key_path[i - 1], index) + key_path.pop(i - 1) + except ValueError: + pass + new_value = new_value_list[0] + update_yaml_file(file_path, key_path, new_value) + + +def update_yaml_file(file_path, key_path, new_value): + yaml = YAML() + with open(file_path, "r") as f: + data = yaml.load(f) + + data = update_yaml(data, key_path, new_value) + + with open(file_path, "w") as f: + yaml.dump(data, f) + + +def update_yaml(data, key_path, new_value): + temp = data + for key in key_path[:-1]: + if isinstance(key, tuple): + if key[0] not in temp: + temp[key[0]] = [{}] * max(1, key[1] + 1) + elif len(temp[key[0]]) <= key[1]: + temp[key[0]] += [{}] * (key[1] - len(temp[key[0]]) + 1) + temp = temp[key[0]][key[1]] + else: + if key not in temp: + temp[key] = {} + temp = temp[key] + + last_key = key_path[-1] + if new_value == "": + if isinstance(last_key, tuple): + del temp[last_key[0]][last_key[1]] + else: + del temp[last_key] + else: + if isinstance(last_key, tuple): + if last_key[0] not in temp: + temp[last_key[0]] = [{}] * max(1, last_key[1] + 1) + elif len(temp[last_key[0]]) <= last_key[1]: + temp[last_key[0]] += [{}] * (last_key[1] - len(temp[last_key[0]]) + 1) + temp[last_key[0]][last_key[1]] = new_value + else: + if ( + last_key in temp + and isinstance(temp[last_key], dict) + and isinstance(new_value, dict) + ): + temp[last_key].update(new_value) + else: + temp[last_key] = new_value + + return data diff --git a/requirements-wheels.txt b/requirements-wheels.txt index 778737a92..3232e8f31 100644 --- a/requirements-wheels.txt +++ b/requirements-wheels.txt @@ -15,6 +15,7 @@ pydantic == 1.10.* git+https://github.com/fbcotter/py3nvml#egg=py3nvml PyYAML == 6.0 pytz == 2023.3 +ruamel.yaml == 0.17.* tzlocal == 5.0.* types-PyYAML == 6.0.* requests == 2.31.* diff --git a/web/src/routes/CameraMap.jsx b/web/src/routes/CameraMap.jsx index ca77ec56e..2dce5597d 100644 --- a/web/src/routes/CameraMap.jsx +++ b/web/src/routes/CameraMap.jsx @@ -7,7 +7,7 @@ import { useResizeObserver } from '../hooks'; import { useCallback, useMemo, useRef, useState } from 'preact/hooks'; import { useApiHost } from '../api'; import useSWR from 'swr'; - +import axios from 'axios'; export default function CameraMasks({ camera }) { const { data: config } = useSWR('config'); const apiHost = useApiHost(); @@ -95,12 +95,53 @@ export default function CameraMasks({ camera }) { [motionMaskPoints, setMotionMaskPoints] ); - const handleCopyMotionMasks = useCallback(async () => { - await window.navigator.clipboard.writeText(` motion: - mask: -${motionMaskPoints.map((mask) => ` - ${polylinePointsToPolyline(mask)}`).join('\n')}`); + const handleCopyMotionMasks = useCallback(() => { + const textToCopy = ` motion: + mask: + ${motionMaskPoints.map((mask) => ` - ${polylinePointsToPolyline(mask)}`).join('\n')}`; + + if (window.navigator.clipboard && window.navigator.clipboard.writeText) { + // Use Clipboard API if available + window.navigator.clipboard.writeText(textToCopy).catch((err) => { + throw new Error('Failed to copy text: ', err); + }); + } else { + // Fallback to document.execCommand('copy') + const textarea = document.createElement('textarea'); + textarea.value = textToCopy; + document.body.appendChild(textarea); + textarea.select(); + + try { + const successful = document.execCommand('copy'); + if (!successful) { + throw new Error('Failed to copy text'); + } + } catch (err) { + throw new Error('Failed to copy text: ', err); + } + + document.body.removeChild(textarea); + } }, [motionMaskPoints]); + const handleSaveMotionMasks = useCallback(async () => { + try { + const queryParameters = motionMaskPoints + .map((mask, index) => `cameras.${camera}.motion.mask.${index}=${polylinePointsToPolyline(mask)}`) + .join('&'); + const endpoint = `config/set?${queryParameters}`; + const response = await axios.put(endpoint); + if (response.status === 200) { + // handle successful response + } + } catch (error) { + // handle error + //console.error(error); + } + }, [camera, motionMaskPoints]); + + // Zone methods const handleEditZone = useCallback( (key) => { @@ -127,15 +168,53 @@ ${motionMaskPoints.map((mask) => ` - ${polylinePointsToPolyline(mask)}`).jo ); const handleCopyZones = useCallback(async () => { - await window.navigator.clipboard.writeText(` zones: + const textToCopy = ` zones: ${Object.keys(zonePoints) .map( (zoneName) => ` ${zoneName}: - coordinates: ${polylinePointsToPolyline(zonePoints[zoneName])}` - ) - .join('\n')}`); + coordinates: ${polylinePointsToPolyline(zonePoints[zoneName])}`).join('\n')}`; + + if (window.navigator.clipboard && window.navigator.clipboard.writeText) { + // Use Clipboard API if available + window.navigator.clipboard.writeText(textToCopy).catch((err) => { + throw new Error('Failed to copy text: ', err); + }); + } else { + // Fallback to document.execCommand('copy') + const textarea = document.createElement('textarea'); + textarea.value = textToCopy; + document.body.appendChild(textarea); + textarea.select(); + + try { + const successful = document.execCommand('copy'); + if (!successful) { + throw new Error('Failed to copy text'); + } + } catch (err) { + throw new Error('Failed to copy text: ', err); + } + + document.body.removeChild(textarea); + } }, [zonePoints]); + const handleSaveZones = useCallback(async () => { + try { + const queryParameters = Object.keys(zonePoints) + .map((zoneName) => `cameras.${camera}.zones.${zoneName}.coordinates=${polylinePointsToPolyline(zonePoints[zoneName])}`) + .join('&'); + const endpoint = `config/set?${queryParameters}`; + const response = await axios.put(endpoint); + if (response.status === 200) { + // handle successful response + } + } catch (error) { + // handle error + //console.error(error); + } + }, [camera, zonePoints]); + // Object methods const handleEditObjectMask = useCallback( (key, subkey) => { @@ -175,6 +254,23 @@ ${Object.keys(objectMaskPoints) .join('\n')}`); }, [objectMaskPoints]); + const handleSaveObjectMasks = useCallback(async () => { + try { + const queryParameters = Object.keys(objectMaskPoints) + .filter((objectName) => objectMaskPoints[objectName].length > 0) + .map((objectName, index) => `cameras.${camera}.objects.filters.${objectName}.mask.${index}=${polylinePointsToPolyline(objectMaskPoints[objectName])}`) + .join('&'); + const endpoint = `config/set?${queryParameters}`; + const response = await axios.put(endpoint); + if (response.status === 200) { + // handle successful response + } + } catch (error) { + // handle error + //console.error(error); + } + }, [camera, objectMaskPoints]); + const handleAddToObjectMask = useCallback( (key) => { const newObjectMaskPoints = { ...objectMaskPoints, [key]: [...objectMaskPoints[key], []] }; @@ -246,6 +342,7 @@ ${Object.keys(objectMaskPoints) editing={editing} title="Motion masks" onCopy={handleCopyMotionMasks} + onSave={handleSaveMotionMasks} onCreate={handleAddMask} onEdit={handleEditMask} onRemove={handleRemoveMask} @@ -258,6 +355,7 @@ ${Object.keys(objectMaskPoints) editing={editing} title="Zones" onCopy={handleCopyZones} + onSave={handleSaveZones} onCreate={handleAddZone} onEdit={handleEditZone} onRemove={handleRemoveZone} @@ -272,6 +370,7 @@ ${Object.keys(objectMaskPoints) title="Object masks" onAdd={handleAddToObjectMask} onCopy={handleCopyObjectMasks} + onSave={handleSaveObjectMasks} onCreate={handleAddObjectMask} onEdit={handleEditObjectMask} onRemove={handleRemoveObjectMask} @@ -407,6 +506,7 @@ function MaskValues({ title, onAdd, onCopy, + onSave, onCreate, onEdit, onRemove, @@ -455,6 +555,8 @@ function MaskValues({ [onAdd] ); + + return (
@@ -463,6 +565,7 @@ function MaskValues({ +
         {yamlPrefix}

From 30dfdf47d4bc440ea1f558e4c3965798f64bb904 Mon Sep 17 00:00:00 2001
From: Sergey Krashevich 
Date: Thu, 6 Jul 2023 21:54:55 +0300
Subject: [PATCH 14/25] Add thread-safety to LimitedQueue by implementing a
 lock for put and get methods (#7053)

---
 frigate/util/builtin.py | 36 ++++++++++++++++++++----------------
 1 file changed, 20 insertions(+), 16 deletions(-)

diff --git a/frigate/util/builtin.py b/frigate/util/builtin.py
index f55ea5e37..2f623567c 100644
--- a/frigate/util/builtin.py
+++ b/frigate/util/builtin.py
@@ -78,29 +78,33 @@ class LimitedQueue(FFQueue):
         self.size = multiprocessing.RawValue(
             ctypes.c_int, 0
         )  # Add a counter for the number of items in the queue
+        self.lock = multiprocessing.Lock()  # Add a lock for thread-safety
 
     def put(self, x, block=True, timeout=DEFAULT_TIMEOUT):
-        if self.maxsize > 0 and self.size.value >= self.maxsize:
-            if block:
-                start_time = time.time()
-                while self.size.value >= self.maxsize:
-                    remaining = timeout - (time.time() - start_time)
-                    if remaining <= 0.0:
-                        raise Full
-                    time.sleep(min(remaining, 0.1))
-            else:
-                raise Full
-        self.size.value += 1
+        with self.lock:  # Ensure thread-safety
+            if self.maxsize > 0 and self.size.value >= self.maxsize:
+                if block:
+                    start_time = time.time()
+                    while self.size.value >= self.maxsize:
+                        remaining = timeout - (time.time() - start_time)
+                        if remaining <= 0.0:
+                            raise Full
+                        time.sleep(min(remaining, 0.1))
+                else:
+                    raise Full
+            self.size.value += 1
         return super().put(x, block=block, timeout=timeout)
 
     def get(self, block=True, timeout=DEFAULT_TIMEOUT):
-        if self.size.value <= 0 and not block:
-            raise Empty
-        self.size.value -= 1
-        return super().get(block=block, timeout=timeout)
+        item = super().get(block=block, timeout=timeout)
+        with self.lock:  # Ensure thread-safety
+            if self.size.value <= 0 and not block:
+                raise Empty
+            self.size.value -= 1
+        return item
 
     def qsize(self):
-        return self.size
+        return self.size.value
 
     def empty(self):
         return self.qsize() == 0

From dd02958f7c4cdb6a756e032aa8108300f052da2c Mon Sep 17 00:00:00 2001
From: Nate Meyer 
Date: Thu, 6 Jul 2023 15:20:33 -0400
Subject: [PATCH 15/25] Upgrade TensorRT to 8.5.3 (#7006)

* Update to latest tensorrt (8.6.1) release

* Build trt libyolo_layer.so in container

* Update tensorrt_models script to convert models from the frigate container

* Fix typo in model script

* Fix paths to yolo lib and models folder

* Add S6 scripts to test and convert specified TensortRT models at startup.

Rearrange tensorrt files into a docker support folder.

* Update TensorRT documentation to reflect the new model conversion process and minimum HW support.

* Fix model_cache path to live in config directory

* Move tensorrt s6 files to the correct directory

* Fix issues in model generation script

* Disable global timeout for s6 services

* Add version folder to tensorrt model_cache path

* Include TensorRT version 8.5.3

* Add numpy requirement prior to removal of np.bool

* This TRT version uses a mixture of cuda dependencies

* Redirect stdout from noisy model conversion
---
 Dockerfile                                    | 22 +++++++-
 docker/install_deps.sh                        |  2 +-
 .../etc/ld.so.conf.d/cuda_tensorrt.conf       |  1 +
 .../frigate/dependencies.d/trt-model-prepare  |  0
 .../trt-model-prepare/dependencies.d/base     |  0
 .../s6-overlay/s6-rc.d/trt-model-prepare/run  | 53 +++++++++++++++++++
 .../s6-overlay/s6-rc.d/trt-model-prepare/type |  1 +
 .../s6-overlay/s6-rc.d/trt-model-prepare/up   |  1 +
 .../tensorrt_detector/tensorrt_libyolo.sh     | 18 +++++++
 docker/tensorrt_models.sh                     | 34 ------------
 docs/docs/configuration/object_detectors.md   | 34 ++++++------
 docs/docs/frigate/hardware.md                 |  2 +-
 frigate/detectors/plugins/tensorrt.py         |  2 +-
 requirements-tensorrt.txt                     | 17 +++---
 14 files changed, 125 insertions(+), 62 deletions(-)
 rename docker/{ => support/tensorrt_detector}/rootfs/etc/ld.so.conf.d/cuda_tensorrt.conf (94%)
 create mode 100644 docker/support/tensorrt_detector/rootfs/etc/s6-overlay/s6-rc.d/frigate/dependencies.d/trt-model-prepare
 create mode 100644 docker/support/tensorrt_detector/rootfs/etc/s6-overlay/s6-rc.d/trt-model-prepare/dependencies.d/base
 create mode 100755 docker/support/tensorrt_detector/rootfs/etc/s6-overlay/s6-rc.d/trt-model-prepare/run
 create mode 100644 docker/support/tensorrt_detector/rootfs/etc/s6-overlay/s6-rc.d/trt-model-prepare/type
 create mode 100644 docker/support/tensorrt_detector/rootfs/etc/s6-overlay/s6-rc.d/trt-model-prepare/up
 create mode 100755 docker/support/tensorrt_detector/tensorrt_libyolo.sh
 delete mode 100755 docker/tensorrt_models.sh

diff --git a/Dockerfile b/Dockerfile
index d1dbd0755..660cb5b25 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -262,15 +262,35 @@ FROM deps AS frigate
 WORKDIR /opt/frigate/
 COPY --from=rootfs / /
 
+# Build TensorRT-specific library
+FROM nvcr.io/nvidia/tensorrt:23.03-py3 AS trt-deps
+
+RUN --mount=type=bind,source=docker/support/tensorrt_detector/tensorrt_libyolo.sh,target=/tensorrt_libyolo.sh \
+    /tensorrt_libyolo.sh
+
 # Frigate w/ TensorRT Support as separate image
 FROM frigate AS frigate-tensorrt
+
+#Disable S6 Global timeout
+ENV S6_CMD_WAIT_FOR_SERVICES_MAXTIME=0
+
+ENV TRT_VER=8.5.3
+ENV YOLO_MODELS="yolov7-tiny-416"
+
+COPY --from=trt-deps /usr/local/lib/libyolo_layer.so /usr/local/lib/libyolo_layer.so
+COPY --from=trt-deps /usr/local/src/tensorrt_demos /usr/local/src/tensorrt_demos
+COPY docker/support/tensorrt_detector/rootfs/ /
+
 RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \
     pip3 install -U /deps/trt-wheels/*.whl && \
-    ln -s libnvrtc.so.11.2 /usr/local/lib/python3.9/dist-packages/nvidia/cuda_nvrtc/lib/libnvrtc.so && \
     ldconfig
 
 # Dev Container w/ TRT
 FROM devcontainer AS devcontainer-trt
 
+COPY --from=trt-deps /usr/local/lib/libyolo_layer.so /usr/local/lib/libyolo_layer.so
+COPY --from=trt-deps /usr/local/src/tensorrt_demos /usr/local/src/tensorrt_demos
+COPY docker/support/tensorrt_detector/rootfs/ /
+COPY --from=trt-deps /usr/local/lib/libyolo_layer.so /usr/local/lib/libyolo_layer.so
 RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \
     pip3 install -U /deps/trt-wheels/*.whl
diff --git a/docker/install_deps.sh b/docker/install_deps.sh
index 25b6951b5..7d5242d83 100755
--- a/docker/install_deps.sh
+++ b/docker/install_deps.sh
@@ -68,7 +68,7 @@ if [[ "${TARGETARCH}" == "arm64" ]]; then
         libva-drm2 mesa-va-drivers
 fi
 
-apt-get purge gnupg apt-transport-https wget xz-utils -y
+apt-get purge gnupg apt-transport-https xz-utils -y
 apt-get clean autoclean -y
 apt-get autoremove --purge -y
 rm -rf /var/lib/apt/lists/*
diff --git a/docker/rootfs/etc/ld.so.conf.d/cuda_tensorrt.conf b/docker/support/tensorrt_detector/rootfs/etc/ld.so.conf.d/cuda_tensorrt.conf
similarity index 94%
rename from docker/rootfs/etc/ld.so.conf.d/cuda_tensorrt.conf
rename to docker/support/tensorrt_detector/rootfs/etc/ld.so.conf.d/cuda_tensorrt.conf
index d4248d047..fe16ed9c5 100644
--- a/docker/rootfs/etc/ld.so.conf.d/cuda_tensorrt.conf
+++ b/docker/support/tensorrt_detector/rootfs/etc/ld.so.conf.d/cuda_tensorrt.conf
@@ -1,3 +1,4 @@
+/usr/local/lib
 /usr/local/lib/python3.9/dist-packages/nvidia/cudnn/lib
 /usr/local/lib/python3.9/dist-packages/nvidia/cuda_runtime/lib
 /usr/local/lib/python3.9/dist-packages/nvidia/cublas/lib
diff --git a/docker/support/tensorrt_detector/rootfs/etc/s6-overlay/s6-rc.d/frigate/dependencies.d/trt-model-prepare b/docker/support/tensorrt_detector/rootfs/etc/s6-overlay/s6-rc.d/frigate/dependencies.d/trt-model-prepare
new file mode 100644
index 000000000..e69de29bb
diff --git a/docker/support/tensorrt_detector/rootfs/etc/s6-overlay/s6-rc.d/trt-model-prepare/dependencies.d/base b/docker/support/tensorrt_detector/rootfs/etc/s6-overlay/s6-rc.d/trt-model-prepare/dependencies.d/base
new file mode 100644
index 000000000..e69de29bb
diff --git a/docker/support/tensorrt_detector/rootfs/etc/s6-overlay/s6-rc.d/trt-model-prepare/run b/docker/support/tensorrt_detector/rootfs/etc/s6-overlay/s6-rc.d/trt-model-prepare/run
new file mode 100755
index 000000000..5f0e43553
--- /dev/null
+++ b/docker/support/tensorrt_detector/rootfs/etc/s6-overlay/s6-rc.d/trt-model-prepare/run
@@ -0,0 +1,53 @@
+#!/command/with-contenv bash
+# shellcheck shell=bash
+# Generate models for the TensorRT detector
+
+set -o errexit -o nounset -o pipefail
+
+MODEL_CACHE_DIR=${MODEL_CACHE_DIR:-"/config/model_cache/tensorrt"}
+OUTPUT_FOLDER="${MODEL_CACHE_DIR}/${TRT_VER}"
+
+# Create output folder
+mkdir -p ${OUTPUT_FOLDER}
+
+FIRST_MODEL=true
+MODEL_CONVERT=""
+
+for model in ${YOLO_MODELS//,/ }
+do
+    # Remove old link in case path/version changed
+    rm -f ${MODEL_CACHE_DIR}/${model}.trt
+    
+    if [[ ! -f ${OUTPUT_FOLDER}/${model}.trt ]]; then
+        if [[ ${FIRST_MODEL} = true ]]; then
+            MODEL_CONVERT="${model}"
+            FIRST_MODEL=false;
+        else
+            MODEL_CONVERT+=",${model}";
+        fi
+    else
+        ln -s ${OUTPUT_FOLDER}/${model}.trt ${MODEL_CACHE_DIR}/${model}.trt
+    fi
+done
+
+if [[ -z ${MODEL_CONVERT} ]]; then
+    echo "No models to convert."
+    exit 0
+fi
+
+echo "Generating the following TRT Models: ${MODEL_CONVERT}"
+
+# Build trt engine
+cd /usr/local/src/tensorrt_demos/yolo
+
+# Download yolo weights
+./download_yolo.sh $MODEL_CONVERT > /dev/null
+
+for model in ${MODEL_CONVERT//,/ }
+do
+    echo "Converting ${model} model"
+    python3 yolo_to_onnx.py -m ${model} > /dev/null
+    python3 onnx_to_tensorrt.py -m ${model} > /dev/null
+    cp ${model}.trt ${OUTPUT_FOLDER}/${model}.trt
+    ln -s ${OUTPUT_FOLDER}/${model}.trt ${MODEL_CACHE_DIR}/${model}.trt
+done
diff --git a/docker/support/tensorrt_detector/rootfs/etc/s6-overlay/s6-rc.d/trt-model-prepare/type b/docker/support/tensorrt_detector/rootfs/etc/s6-overlay/s6-rc.d/trt-model-prepare/type
new file mode 100644
index 000000000..bdd22a185
--- /dev/null
+++ b/docker/support/tensorrt_detector/rootfs/etc/s6-overlay/s6-rc.d/trt-model-prepare/type
@@ -0,0 +1 @@
+oneshot
diff --git a/docker/support/tensorrt_detector/rootfs/etc/s6-overlay/s6-rc.d/trt-model-prepare/up b/docker/support/tensorrt_detector/rootfs/etc/s6-overlay/s6-rc.d/trt-model-prepare/up
new file mode 100644
index 000000000..b9de40ad0
--- /dev/null
+++ b/docker/support/tensorrt_detector/rootfs/etc/s6-overlay/s6-rc.d/trt-model-prepare/up
@@ -0,0 +1 @@
+/etc/s6-overlay/s6-rc.d/trt-model-prepare/run
diff --git a/docker/support/tensorrt_detector/tensorrt_libyolo.sh b/docker/support/tensorrt_detector/tensorrt_libyolo.sh
new file mode 100755
index 000000000..e6fc415e5
--- /dev/null
+++ b/docker/support/tensorrt_detector/tensorrt_libyolo.sh
@@ -0,0 +1,18 @@
+#!/bin/bash
+
+set -euxo pipefail
+
+SCRIPT_DIR="/usr/local/src/tensorrt_demos"
+
+# Clone tensorrt_demos repo
+git clone --depth 1 https://github.com/NateMeyer/tensorrt_demos.git -b conditional_download
+
+# Build libyolo
+cd ./tensorrt_demos/plugins && make all
+cp libyolo_layer.so /usr/local/lib/libyolo_layer.so
+
+# Store yolo scripts for later conversion
+cd ../
+mkdir -p ${SCRIPT_DIR}/plugins
+cp plugins/libyolo_layer.so ${SCRIPT_DIR}/plugins/libyolo_layer.so
+cp -a yolo ${SCRIPT_DIR}/
diff --git a/docker/tensorrt_models.sh b/docker/tensorrt_models.sh
deleted file mode 100755
index 957e817d6..000000000
--- a/docker/tensorrt_models.sh
+++ /dev/null
@@ -1,34 +0,0 @@
-#!/bin/bash
-
-set -euxo pipefail
-
-CUDA_HOME=/usr/local/cuda
-LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/local/cuda/lib64:/usr/local/cuda/extras/CUPTI/lib64
-OUTPUT_FOLDER=/tensorrt_models
-echo "Generating the following TRT Models: ${YOLO_MODELS:="yolov4-tiny-288,yolov4-tiny-416,yolov7-tiny-416"}"
-
-# Create output folder
-mkdir -p ${OUTPUT_FOLDER}
-
-# Install packages
-pip install --upgrade pip && pip install onnx==1.9.0 protobuf==3.20.3
-
-# Clone tensorrt_demos repo
-git clone --depth 1 https://github.com/yeahme49/tensorrt_demos.git /tensorrt_demos
-
-# Build libyolo
-cd /tensorrt_demos/plugins && make all
-cp libyolo_layer.so ${OUTPUT_FOLDER}/libyolo_layer.so
-
-# Download yolo weights
-cd /tensorrt_demos/yolo && ./download_yolo.sh
-
-# Build trt engine
-cd /tensorrt_demos/yolo
-
-for model in ${YOLO_MODELS//,/ }
-do
-    python3 yolo_to_onnx.py -m ${model}
-    python3 onnx_to_tensorrt.py -m ${model}
-    cp /tensorrt_demos/yolo/${model}.trt ${OUTPUT_FOLDER}/${model}.trt;
-done
diff --git a/docs/docs/configuration/object_detectors.md b/docs/docs/configuration/object_detectors.md
index 3f48423bc..d684a2917 100644
--- a/docs/docs/configuration/object_detectors.md
+++ b/docs/docs/configuration/object_detectors.md
@@ -174,9 +174,7 @@ NVidia GPUs may be used for object detection using the TensorRT libraries. Due t
 
 ### Minimum Hardware Support
 
-The TensorRT detector uses the 11.x series of CUDA libraries which have minor version compatibility. The minimum driver version on the host system must be `>=450.80.02`. Also the GPU must support a Compute Capability of `5.0` or greater. This generally correlates to a Maxwell-era GPU or newer, check the NVIDIA GPU Compute Capability table linked below.
-
-> **TODO:** NVidia claims support on compute 3.5 and 3.7, but marks it as deprecated. This would have some, but not all, Kepler GPUs as possibly working. This needs testing before making any claims of support.
+The TensorRT detector uses the 12.x series of CUDA libraries which have minor version compatibility. The minimum driver version on the host system must be `>=525.60.13`. Also the GPU must support a Compute Capability of `5.0` or greater. This generally correlates to a Maxwell-era GPU or newer, check the NVIDIA GPU Compute Capability table linked below.
 
 To use the TensorRT detector, make sure your host system has the [nvidia-container-runtime](https://docs.docker.com/config/containers/resource_constraints/#access-an-nvidia-gpu) installed to pass through the GPU to the container and the host system has a compatible driver installed for your GPU.
 
@@ -192,22 +190,15 @@ There are improved capabilities in newer GPU architectures that TensorRT can ben
 
 ### Generate Models
 
-The model used for TensorRT must be preprocessed on the same hardware platform that they will run on. This means that each user must run additional setup to generate a model file for the TensorRT library. A script is provided that will build several common models.
+The model used for TensorRT must be preprocessed on the same hardware platform that they will run on. This means that each user must run additional setup to generate a model file for the TensorRT library. A script is included that will build several common models.
 
-To generate model files, create a new folder to save the models, download the script, and launch a docker container that will run the script.
+The Frigate image will generate model files during startup if the specified model is not found.  Processed models are stored in the `/config/model_cache` folder.  Typically the `/config` path is mapped to a directory on the host already and the `model_cache` does not need to be mapped separately unless the user wants to store it in a different location on the host.
 
-```bash
-mkdir trt-models
-wget https://github.com/blakeblackshear/frigate/raw/master/docker/tensorrt_models.sh
-chmod +x tensorrt_models.sh
-docker run --gpus=all --rm -it -v `pwd`/trt-models:/tensorrt_models -v `pwd`/tensorrt_models.sh:/tensorrt_models.sh nvcr.io/nvidia/tensorrt:22.07-py3 /tensorrt_models.sh
-```
+To by default, the `yolov7-tiny-416` model will be generated, but this can be overridden by specifying the `YOLO_MODELS` environment variable in Docker.  One or more models may be listed in a comma-separated format, and each one will be generated.  To select no model generation, set the variable to an empty string, `YOLO_MODELS=""`.  Models will only be generated if the corresponding `{model}.trt` file is not present in the `model_cache` folder, so you can force a model to be regenerated by deleting it from your Frigate data folder.
 
-The `trt-models` folder can then be mapped into your Frigate container as `trt-models` and the models referenced from the config.
+If your GPU does not support FP16 operations, you can pass the environment variable `USE_FP16=False` to disable it.
 
-If your GPU does not support FP16 operations, you can pass the environment variable `-e USE_FP16=False` to the `docker run` command to disable it.
-
-Specific models can be selected by passing an environment variable to the `docker run` command. Use the form `-e YOLO_MODELS=yolov4-416,yolov4-tiny-416` to select one or more model names. The models available are shown below.
+Specific models can be selected by passing an environment variable to the `docker run` command or in your `docker-compose.yml` file. Use the form `-e YOLO_MODELS=yolov4-416,yolov4-tiny-416` to select one or more model names. The models available are shown below.
 
 ```
 yolov3-288
@@ -237,11 +228,20 @@ yolov7x-640
 yolov7x-320
 ```
 
+An example `docker-compose.yml` fragment that converts the `yolov4-608` and `yolov7x-640` models for a Pascal card would look something like this:
+
+```yml
+frigate:
+  environment:
+    - YOLO_MODELS="yolov4-608,yolov7x-640"
+    - USE_FP16=false
+```
+
 ### Configuration Parameters
 
 The TensorRT detector can be selected by specifying `tensorrt` as the model type. The GPU will need to be passed through to the docker container using the same methods described in the [Hardware Acceleration](hardware_acceleration.md#nvidia-gpu) section. If you pass through multiple GPUs, you can select which GPU is used for a detector with the `device` configuration parameter. The `device` parameter is an integer value of the GPU index, as shown by `nvidia-smi` within the container.
 
-The TensorRT detector uses `.trt` model files that are located in `/trt-models/` by default. These model file path and dimensions used will depend on which model you have generated.
+The TensorRT detector uses `.trt` model files that are located in `/config/model_cache/tensorrt` by default. These model path and dimensions used will depend on which model you have generated.
 
 ```yaml
 detectors:
@@ -250,7 +250,7 @@ detectors:
     device: 0 #This is the default, select the first GPU
 
 model:
-  path: /trt-models/yolov7-tiny-416.trt
+  path: /config/model_cache/tensorrt/yolov7-tiny-416.trt
   input_tensor: nchw
   input_pixel_format: rgb
   width: 416
diff --git a/docs/docs/frigate/hardware.md b/docs/docs/frigate/hardware.md
index 36233ea68..5daf8fe3b 100644
--- a/docs/docs/frigate/hardware.md
+++ b/docs/docs/frigate/hardware.md
@@ -72,7 +72,7 @@ Inference speeds vary greatly depending on the CPU, GPU, or VPU used, some known
 
 ### TensorRT
 
-The TensortRT detector is able to run on x86 hosts that have an Nvidia GPU which supports the 11.x series of CUDA libraries. The minimum driver version on the host system must be `>=450.80.02`. Also the GPU must support a Compute Capability of `5.0` or greater. This generally correlates to a Maxwell-era GPU or newer, check the [TensorRT docs for more info](/configuration/object_detectors#nvidia-tensorrt-detector).
+The TensortRT detector is able to run on x86 hosts that have an Nvidia GPU which supports the 12.x series of CUDA libraries. The minimum driver version on the host system must be `>=525.60.13`. Also the GPU must support a Compute Capability of `5.0` or greater. This generally correlates to a Maxwell-era GPU or newer, check the [TensorRT docs for more info](/configuration/object_detectors#nvidia-tensorrt-detector).
 
 Inference speeds will vary greatly depending on the GPU and the model used.
 `tiny` variants are faster than the equivalent non-tiny model, some known examples are below:
diff --git a/frigate/detectors/plugins/tensorrt.py b/frigate/detectors/plugins/tensorrt.py
index 7251b8751..dea3fe078 100644
--- a/frigate/detectors/plugins/tensorrt.py
+++ b/frigate/detectors/plugins/tensorrt.py
@@ -78,7 +78,7 @@ class TensorRtDetector(DetectionApi):
         try:
             trt.init_libnvinfer_plugins(self.trt_logger, "")
 
-            ctypes.cdll.LoadLibrary("/trt-models/libyolo_layer.so")
+            ctypes.cdll.LoadLibrary("/usr/local/lib/libyolo_layer.so")
         except OSError as e:
             logger.error(
                 "ERROR: failed to load libraries. %s",
diff --git a/requirements-tensorrt.txt b/requirements-tensorrt.txt
index 90517babd..214202e43 100644
--- a/requirements-tensorrt.txt
+++ b/requirements-tensorrt.txt
@@ -1,9 +1,12 @@
 # NVidia TensorRT Support (amd64 only)
-nvidia-pyindex; platform_machine == 'x86_64'
-nvidia-tensorrt == 8.4.1.5; platform_machine == 'x86_64'
-cuda-python == 11.7; platform_machine == 'x86_64'
+--extra-index-url 'https://pypi.nvidia.com'
+numpy < 1.24; platform_machine == 'x86_64'
+tensorrt == 8.5.3.*; platform_machine == 'x86_64'
+cuda-python == 11.8; platform_machine == 'x86_64'
 cython == 0.29.*; platform_machine == 'x86_64'
-nvidia-cuda-runtime-cu11 == 11.7.*; platform_machine == 'x86_64'
-nvidia-cublas-cu11 == 11.11.*; platform_machine == 'x86_64'
-nvidia-cudnn-cu11 == 8.7.*; platform_machine == 'x86_64'
-nvidia-cuda-nvrtc-cu11 == 11.7.*; platform_machine == 'x86_64'
\ No newline at end of file
+nvidia-cuda-runtime-cu12 == 12.1.*; platform_machine == 'x86_64'
+nvidia-cuda-runtime-cu11 == 11.8.*; platform_machine == 'x86_64'
+nvidia-cublas-cu11 == 11.11.3.6; platform_machine == 'x86_64'
+nvidia-cudnn-cu11 == 8.6.0.*; platform_machine == 'x86_64'
+onnx==1.14.0; platform_machine == 'x86_64'
+protobuf==3.20.3; platform_machine == 'x86_64'
\ No newline at end of file

From 69630e73851c57428b64242a5ef0d592efa3c167 Mon Sep 17 00:00:00 2001
From: Nicolas Mowen 
Date: Fri, 7 Jul 2023 06:06:42 -0600
Subject: [PATCH 16/25] Fix incorrect respnses (#7066)

---
 frigate/http.py | 66 ++++++++++++++++++++++++++++++-------------------
 1 file changed, 41 insertions(+), 25 deletions(-)

diff --git a/frigate/http.py b/frigate/http.py
index ba62af047..fe5da869a 100644
--- a/frigate/http.py
+++ b/frigate/http.py
@@ -420,8 +420,8 @@ def get_labels():
         else:
             events = Event.select(Event.label).distinct()
     except Exception as e:
-        return jsonify(
-            {"success": False, "message": f"Failed to get labels: {e}"}, "404"
+        return make_response(
+            jsonify({"success": False, "message": f"Failed to get labels: {e}"}), 404
         )
 
     labels = sorted([e.label for e in events])
@@ -435,8 +435,9 @@ def get_sub_labels():
     try:
         events = Event.select(Event.sub_label).distinct()
     except Exception as e:
-        return jsonify(
-            {"success": False, "message": f"Failed to get sub_labels: {e}"}, "404"
+        return make_response(
+            jsonify({"success": False, "message": f"Failed to get sub_labels: {e}"}),
+            404,
         )
 
     sub_labels = [e.sub_label for e in events]
@@ -869,12 +870,17 @@ def events():
 @bp.route("/events//