diff --git a/.devcontainer/post_create.sh b/.devcontainer/post_create.sh index 9c5dec5bb..1a1832f3b 100755 --- a/.devcontainer/post_create.sh +++ b/.devcontainer/post_create.sh @@ -14,6 +14,11 @@ curl -L https://api.github.com/meta | jq -r '.ssh_keys | .[]' | \ sudo mkdir -p /media/frigate sudo chown -R "$(id -u):$(id -g)" /media/frigate +# When started as a service, LIBAVFORMAT_VERSION_MAJOR is defined in the +# s6 service file. For dev, where frigate is started from an interactive +# shell, we define it in .bashrc instead. +echo 'export LIBAVFORMAT_VERSION_MAJOR=$(ffmpeg -version | grep -Po "libavformat\W+\K\d+")' >> $HOME/.bashrc + make version cd web diff --git a/docker/rootfs/etc/s6-overlay/s6-rc.d/frigate/run b/docker/rootfs/etc/s6-overlay/s6-rc.d/frigate/run index 0a835550e..f2cc40fcf 100755 --- a/docker/rootfs/etc/s6-overlay/s6-rc.d/frigate/run +++ b/docker/rootfs/etc/s6-overlay/s6-rc.d/frigate/run @@ -44,6 +44,7 @@ function migrate_db_path() { echo "[INFO] Preparing Frigate..." migrate_db_path +export LIBAVFORMAT_VERSION_MAJOR=$(ffmpeg -version | grep -Po 'libavformat\W+\K\d+') echo "[INFO] Starting Frigate..." diff --git a/docker/rootfs/usr/local/go2rtc/create_config.py b/docker/rootfs/usr/local/go2rtc/create_config.py index 1397adee8..0531b173d 100644 --- a/docker/rootfs/usr/local/go2rtc/create_config.py +++ b/docker/rootfs/usr/local/go2rtc/create_config.py @@ -7,7 +7,7 @@ import sys import yaml sys.path.insert(0, "/opt/frigate") -from frigate.const import BIRDSEYE_PIPE, BTBN_PATH # noqa: E402 +from frigate.const import BIRDSEYE_PIPE # noqa: E402 from frigate.ffmpeg_presets import ( # noqa: E402 parse_preset_hardware_acceleration_encode, ) @@ -71,7 +71,7 @@ elif go2rtc_config["rtsp"].get("default_query") is None: go2rtc_config["rtsp"]["default_query"] = "mp4" # need to replace ffmpeg command when using ffmpeg4 -if not os.path.exists(BTBN_PATH): +if int(os.environ["LIBAVFORMAT_VERSION_MAJOR"]) < 59: if go2rtc_config.get("ffmpeg") is None: go2rtc_config["ffmpeg"] = { "rtsp": "-fflags nobuffer -flags low_delay -stimeout 5000000 -user_agent go2rtc/ffmpeg -rtsp_transport tcp -i {input}" diff --git a/docs/docs/integrations/mqtt.md b/docs/docs/integrations/mqtt.md index ef5a17141..f16b0f7e4 100644 --- a/docs/docs/integrations/mqtt.md +++ b/docs/docs/integrations/mqtt.md @@ -184,7 +184,7 @@ Topic to send PTZ commands to camera. | Command | Description | | ---------------------- | ----------------------------------------------------------------------------------------- | -| `preset-` | send command to move to preset with name `` | +| `preset_` | send command to move to preset with name `` | | `MOVE_` | send command to continuously move in ``, possible values are [UP, DOWN, LEFT, RIGHT] | | `ZOOM_` | send command to continuously zoom ``, possible values are [IN, OUT] | | `STOP` | send command to stop moving | diff --git a/frigate/app.py b/frigate/app.py index 507986abc..5a99fe243 100644 --- a/frigate/app.py +++ b/frigate/app.py @@ -10,6 +10,7 @@ from multiprocessing.synchronize import Event as MpEvent from types import FrameType from typing import Optional +import faster_fifo as ff import psutil from faster_fifo import Queue from peewee_migrate import Router @@ -47,6 +48,7 @@ from frigate.stats import StatsEmitter, stats_init from frigate.storage import StorageMaintainer from frigate.timeline import TimelineProcessor from frigate.types import CameraMetricsTypes, FeatureMetricsTypes +from frigate.util import LimitedQueue as LQueue from frigate.version import VERSION from frigate.video import capture_camera, track_camera from frigate.watchdog import FrigateWatchdog @@ -57,11 +59,11 @@ logger = logging.getLogger(__name__) class FrigateApp: def __init__(self) -> None: self.stop_event: MpEvent = mp.Event() - self.detection_queue: Queue = mp.Queue() + self.detection_queue: Queue = ff.Queue() self.detectors: dict[str, ObjectDetectProcess] = {} self.detection_out_events: dict[str, MpEvent] = {} self.detection_shms: list[mp.shared_memory.SharedMemory] = [] - self.log_queue: Queue = mp.Queue() + self.log_queue: Queue = ff.Queue() self.plus_api = PlusApi() self.camera_metrics: dict[str, CameraMetricsTypes] = {} self.feature_metrics: dict[str, FeatureMetricsTypes] = {} @@ -164,7 +166,7 @@ class FrigateApp: "ffmpeg_pid": mp.Value("i", 0), # type: ignore[typeddict-item] # issue https://github.com/python/typeshed/issues/8799 # from mypy 0.981 onwards - "frame_queue": mp.Queue(maxsize=2), + "frame_queue": LQueue(maxsize=2), "capture_process": None, "process": None, } @@ -197,22 +199,22 @@ class FrigateApp: def init_queues(self) -> None: # Queues for clip processing - self.event_queue: Queue = mp.Queue() - self.event_processed_queue: Queue = mp.Queue() - self.video_output_queue: Queue = mp.Queue( + self.event_queue: Queue = ff.Queue() + self.event_processed_queue: Queue = ff.Queue() + self.video_output_queue: Queue = LQueue( maxsize=len(self.config.cameras.keys()) * 2 ) # Queue for cameras to push tracked objects to - self.detected_frames_queue: Queue = mp.Queue( + self.detected_frames_queue: Queue = LQueue( maxsize=len(self.config.cameras.keys()) * 2 ) # Queue for recordings info - self.recordings_info_queue: Queue = mp.Queue() + self.recordings_info_queue: Queue = ff.Queue() # Queue for timeline events - self.timeline_queue: Queue = mp.Queue() + self.timeline_queue: Queue = ff.Queue() def init_database(self) -> None: def vacuum_db(db: SqliteExtDatabase) -> None: @@ -456,6 +458,7 @@ class FrigateApp: ) audio_process.daemon = True audio_process.start() + self.processes["audioDetector"] = audio_process.pid or 0 logger.info(f"Audio process started: {audio_process.pid}") def start_timeline_processor(self) -> None: diff --git a/frigate/comms/dispatcher.py b/frigate/comms/dispatcher.py index 9babf38a0..9217da4bb 100644 --- a/frigate/comms/dispatcher.py +++ b/frigate/comms/dispatcher.py @@ -273,7 +273,7 @@ class Dispatcher: try: if "preset" in payload.lower(): command = OnvifCommandEnum.preset - param = payload.lower().split("-")[1] + param = payload.lower()[payload.index("_") + 1 :] else: command = OnvifCommandEnum[payload.lower()] param = "" diff --git a/frigate/const.py b/frigate/const.py index 20e2b0daa..b6b0e44bd 100644 --- a/frigate/const.py +++ b/frigate/const.py @@ -11,7 +11,6 @@ YAML_EXT = (".yaml", ".yml") FRIGATE_LOCALHOST = "http://127.0.0.1:5000" PLUS_ENV_VAR = "PLUS_API_KEY" PLUS_API_HOST = "https://api.frigate.video" -BTBN_PATH = "/usr/lib/btbn-ffmpeg" # Attributes diff --git a/frigate/events/audio.py b/frigate/events/audio.py index 164c9c62d..488c94fcc 100644 --- a/frigate/events/audio.py +++ b/frigate/events/audio.py @@ -187,7 +187,7 @@ class AudioEventMaintainer(threading.Thread): else: resp = requests.post( f"{FRIGATE_LOCALHOST}/api/events/{self.config.name}/{label}/create", - json={"duration": None}, + json={"duration": None, "source_type": "audio"}, ) if resp.status_code == 200: @@ -209,14 +209,19 @@ class AudioEventMaintainer(threading.Thread): now - detection.get("last_detection", now) > self.config.audio.max_not_heard ): - requests.put( + resp = requests.put( f"{FRIGATE_LOCALHOST}/api/events/{detection['id']}/end", json={ "end_time": detection["last_detection"] + self.config.record.events.post_capture }, ) - self.detections[detection["label"]] = None + if resp.status_code == 200: + self.detections[detection["label"]] = None + else: + logger.warn( + f"Failed to end audio event {detection['id']} with status code {resp.status_code}" + ) def restart_audio_pipe(self) -> None: try: diff --git a/frigate/events/external.py b/frigate/events/external.py index 25ba289f2..23439f2bd 100644 --- a/frigate/events/external.py +++ b/frigate/events/external.py @@ -29,6 +29,7 @@ class ExternalEventProcessor: self, camera: str, label: str, + source_type: str, sub_label: Optional[str], duration: Optional[int], include_recording: bool, @@ -56,11 +57,16 @@ class ExternalEventProcessor: "label": label, "sub_label": sub_label, "camera": camera, - "start_time": now, - "end_time": now + duration if duration is not None else None, + "start_time": now - camera_config.record.events.pre_capture, + "end_time": now + + duration + + camera_config.record.events.post_capture + if duration is not None + else None, "thumbnail": thumbnail, "has_clip": camera_config.record.enabled and include_recording, "has_snapshot": True, + "type": source_type, }, ) ) diff --git a/frigate/events/maintainer.py b/frigate/events/maintainer.py index d688f7dfe..34cb01261 100644 --- a/frigate/events/maintainer.py +++ b/frigate/events/maintainer.py @@ -193,6 +193,7 @@ class EventProcessor(threading.Thread): "score": score, "top_score": event_data["top_score"], "attributes": attributes, + "type": "object", }, } @@ -216,8 +217,8 @@ class EventProcessor(threading.Thread): del self.events_in_process[event_data["id"]] self.event_processed_queue.put((event_data["id"], camera)) - def handle_external_detection(self, type: str, event_data: Event) -> None: - if type == "new": + def handle_external_detection(self, event_type: str, event_data: Event) -> None: + if event_type == "new": event = { Event.id: event_data["id"], Event.label: event_data["label"], @@ -229,10 +230,10 @@ class EventProcessor(threading.Thread): Event.has_clip: event_data["has_clip"], Event.has_snapshot: event_data["has_snapshot"], Event.zones: [], - Event.data: {}, + Event.data: {"type": event_data["type"]}, } Event.insert(event).execute() - elif type == "end": + elif event_type == "end": event = { Event.id: event_data["id"], Event.end_time: event_data["end_time"], diff --git a/frigate/ffmpeg_presets.py b/frigate/ffmpeg_presets.py index a2785813c..4c2e16dbb 100644 --- a/frigate/ffmpeg_presets.py +++ b/frigate/ffmpeg_presets.py @@ -5,7 +5,6 @@ import os from enum import Enum from typing import Any -from frigate.const import BTBN_PATH from frigate.util import vainfo_hwaccel from frigate.version import VERSION @@ -43,7 +42,11 @@ class LibvaGpuSelector: return "" -TIMEOUT_PARAM = "-timeout" if os.path.exists(BTBN_PATH) else "-stimeout" +TIMEOUT_PARAM = ( + "-timeout" + if int(os.getenv("LIBAVFORMAT_VERSION_MAJOR", "59")) >= 59 + else "-stimeout" +) _gpu_selector = LibvaGpuSelector() _user_agent_args = [ @@ -107,14 +110,14 @@ PRESETS_HW_ACCEL_DECODE = { } PRESETS_HW_ACCEL_SCALE = { - "preset-rpi-32-h264": "-r {0} -s {1}x{2}", - "preset-rpi-64-h264": "-r {0} -s {1}x{2}", + "preset-rpi-32-h264": "-r {0} -vf fps={0},scale={1}:{2}", + "preset-rpi-64-h264": "-r {0} -vf fps={0},scale={1}:{2}", "preset-vaapi": "-r {0} -vf fps={0},scale_vaapi=w={1}:h={2},hwdownload,format=yuv420p", "preset-intel-qsv-h264": "-r {0} -vf vpp_qsv=framerate={0}:w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p", "preset-intel-qsv-h265": "-r {0} -vf vpp_qsv=framerate={0}:w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p", "preset-nvidia-h264": "-r {0} -vf fps={0},scale_cuda=w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p", "preset-nvidia-h265": "-r {0} -vf fps={0},scale_cuda=w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p", - "default": "-r {0} -s {1}x{2}", + "default": "-r {0} -vf fps={0},scale={1}:{2}", } PRESETS_HW_ACCEL_ENCODE_BIRDSEYE = { diff --git a/frigate/http.py b/frigate/http.py index cf653ca4f..f3a456cf9 100644 --- a/frigate/http.py +++ b/frigate/http.py @@ -884,6 +884,7 @@ def create_event(camera_name, label): event_id = current_app.external_processor.create_manual_event( camera_name, label, + json.get("source_type", "api"), json.get("sub_label", None), json.get("duration", 30), json.get("include_recording", True), diff --git a/frigate/object_detection.py b/frigate/object_detection.py index 0a2a7059c..cebd7ff41 100644 --- a/frigate/object_detection.py +++ b/frigate/object_detection.py @@ -7,6 +7,7 @@ import signal import threading from abc import ABC, abstractmethod +import faster_fifo as ff import numpy as np from setproctitle import setproctitle @@ -72,7 +73,7 @@ class LocalObjectDetector(ObjectDetector): def run_detector( name: str, - detection_queue: mp.Queue, + detection_queue: ff.Queue, out_events: dict[str, mp.Event], avg_speed, start, diff --git a/frigate/output.py b/frigate/output.py index ab928efb5..24f4c45cc 100644 --- a/frigate/output.py +++ b/frigate/output.py @@ -29,6 +29,61 @@ from frigate.util import SharedMemoryFrameManager, copy_yuv_to_position, get_yuv logger = logging.getLogger(__name__) +def get_standard_aspect_ratio(width, height) -> tuple[int, int]: + """Ensure that only standard aspect ratios are used.""" + known_aspects = [ + (16, 9), + (9, 16), + (32, 9), + (12, 9), + (9, 12), + ] # aspects are scaled to have common relative size + known_aspects_ratios = list( + map(lambda aspect: aspect[0] / aspect[1], known_aspects) + ) + closest = min( + known_aspects_ratios, + key=lambda x: abs(x - (width / height)), + ) + return known_aspects[known_aspects_ratios.index(closest)] + + +class Canvas: + def __init__(self, canvas_width: int, canvas_height: int) -> None: + gcd = math.gcd(canvas_width, canvas_height) + self.aspect = get_standard_aspect_ratio( + (canvas_width / gcd), (canvas_height / gcd) + ) + self.width = canvas_width + self.height = (self.width * self.aspect[1]) / self.aspect[0] + self.coefficient_cache: dict[int, int] = {} + self.aspect_cache: dict[str, tuple[int, int]] = {} + + def get_aspect(self, coefficient: int) -> tuple[int, int]: + return (self.aspect[0] * coefficient, self.aspect[1] * coefficient) + + def get_coefficient(self, camera_count: int) -> int: + return self.coefficient_cache.get(camera_count, 2) + + def set_coefficient(self, camera_count: int, coefficient: int) -> None: + self.coefficient_cache[camera_count] = coefficient + + def get_camera_aspect( + self, cam_name: str, camera_width: int, camera_height: int + ) -> tuple[int, int]: + cached = self.aspect_cache.get(cam_name) + + if cached: + return cached + + gcd = math.gcd(camera_width, camera_height) + camera_aspect = get_standard_aspect_ratio( + camera_width / gcd, camera_height / gcd + ) + self.aspect_cache[cam_name] = camera_aspect + return camera_aspect + + class FFMpegConverter: def __init__( self, @@ -170,6 +225,7 @@ class BirdsEyeFrameManager: self.frame_shape = (height, width) self.yuv_shape = (height * 3 // 2, width) self.frame = np.ndarray(self.yuv_shape, dtype=np.uint8) + self.canvas = Canvas(width, height) self.stop_event = stop_event # initialize the frame as black and with the Frigate logo @@ -318,16 +374,15 @@ class BirdsEyeFrameManager: ), ) - canvas_width = self.config.birdseye.width - canvas_height = self.config.birdseye.height - if len(active_cameras) == 1: # show single camera as fullscreen camera = active_cameras_to_add[0] camera_dims = self.cameras[camera]["dimensions"].copy() - scaled_width = int(canvas_height * camera_dims[0] / camera_dims[1]) + scaled_width = int(self.canvas.height * camera_dims[0] / camera_dims[1]) coefficient = ( - 1 if scaled_width <= canvas_width else canvas_width / scaled_width + 1 + if scaled_width <= self.canvas.width + else self.canvas.width / scaled_width ) self.camera_layout = [ [ @@ -337,14 +392,14 @@ class BirdsEyeFrameManager: 0, 0, int(scaled_width * coefficient), - int(canvas_height * coefficient), + int(self.canvas.height * coefficient), ), ) ] ] else: # calculate optimal layout - coefficient = 2 + coefficient = self.canvas.get_coefficient(len(active_cameras)) calculating = True # decrease scaling coefficient until height of all cameras can fit into the birdseye canvas @@ -353,7 +408,6 @@ class BirdsEyeFrameManager: return layout_candidate = self.calculate_layout( - (canvas_width, canvas_height), active_cameras_to_add, coefficient, ) @@ -367,6 +421,7 @@ class BirdsEyeFrameManager: return calculating = False + self.canvas.set_coefficient(len(active_cameras), coefficient) self.camera_layout = layout_candidate @@ -378,9 +433,7 @@ class BirdsEyeFrameManager: return True - def calculate_layout( - self, canvas, cameras_to_add: list[str], coefficient - ) -> tuple[any]: + def calculate_layout(self, cameras_to_add: list[str], coefficient) -> tuple[any]: """Calculate the optimal layout for 2+ cameras.""" def map_layout(row_height: int): @@ -397,23 +450,20 @@ class BirdsEyeFrameManager: x = starting_x for cameras in row: camera_dims = self.cameras[cameras[0]]["dimensions"].copy() + camera_aspect = cameras[1] if camera_dims[1] > camera_dims[0]: scaled_height = int(row_height * 2) - scaled_width = int( - scaled_height * camera_dims[0] / camera_dims[1] - ) + scaled_width = int(scaled_height * camera_aspect) starting_x = scaled_width else: scaled_height = row_height - scaled_width = int( - scaled_height * camera_dims[0] / camera_dims[1] - ) + scaled_width = int(scaled_height * camera_aspect) # layout is too large if ( - x + scaled_width > canvas_width - or y + scaled_height > canvas_height + x + scaled_width > self.canvas.width + or y + scaled_height > self.canvas.height ): return 0, 0, None @@ -425,13 +475,9 @@ class BirdsEyeFrameManager: return max_width, y, candidate_layout - canvas_width = canvas[0] - canvas_height = canvas[1] + canvas_aspect_x, canvas_aspect_y = self.canvas.get_aspect(coefficient) camera_layout: list[list[any]] = [] camera_layout.append([]) - canvas_gcd = math.gcd(canvas[0], canvas[1]) - canvas_aspect_x = (canvas[0] / canvas_gcd) * coefficient - canvas_aspect_y = (canvas[0] / canvas_gcd) * coefficient starting_x = 0 x = starting_x y = 0 @@ -439,18 +485,9 @@ class BirdsEyeFrameManager: max_y = 0 for camera in cameras_to_add: camera_dims = self.cameras[camera]["dimensions"].copy() - camera_gcd = math.gcd(camera_dims[0], camera_dims[1]) - camera_aspect_x = camera_dims[0] / camera_gcd - camera_aspect_y = camera_dims[1] / camera_gcd - - if round(camera_aspect_x / camera_aspect_y, 1) == 1.8: - # account for slightly off 16:9 cameras - camera_aspect_x = 16 - camera_aspect_y = 9 - elif round(camera_aspect_x / camera_aspect_y, 1) == 1.3: - # make 4:3 cameras the same relative size as 16:9 - camera_aspect_x = 12 - camera_aspect_y = 9 + camera_aspect_x, camera_aspect_y = self.canvas.get_camera_aspect( + camera, camera_dims[0], camera_dims[1] + ) if camera_dims[1] > camera_dims[0]: portrait = True @@ -462,10 +499,7 @@ class BirdsEyeFrameManager: camera_layout[y_i].append( ( camera, - ( - camera_aspect_x, - camera_aspect_y, - ), + camera_aspect_x / camera_aspect_y, ) ) @@ -491,7 +525,7 @@ class BirdsEyeFrameManager: camera_layout[y_i].append( ( camera, - (camera_aspect_x, camera_aspect_y), + camera_aspect_x / camera_aspect_y, ) ) x += camera_aspect_x @@ -499,15 +533,16 @@ class BirdsEyeFrameManager: if y + max_y > canvas_aspect_y: return None - row_height = int(canvas_height / coefficient) + row_height = int(self.canvas.height / coefficient) total_width, total_height, standard_candidate_layout = map_layout(row_height) # layout can't be optimized more - if total_width / canvas_width >= 0.99: + if total_width / self.canvas.width >= 0.99: return standard_candidate_layout scale_up_percent = min( - 1 - (total_width / canvas_width), 1 - (total_height / canvas_height) + 1 - (total_width / self.canvas.width), + 1 - (total_height / self.canvas.height), ) row_height = int(row_height * (1 + round(scale_up_percent, 1))) _, _, scaled_layout = map_layout(row_height) diff --git a/frigate/record/maintainer.py b/frigate/record/maintainer.py index 8e40fc6e7..b4208f2d2 100644 --- a/frigate/record/maintainer.py +++ b/frigate/record/maintainer.py @@ -3,7 +3,6 @@ import asyncio import datetime import logging -import multiprocessing as mp import os import queue import random @@ -15,6 +14,7 @@ from multiprocessing.synchronize import Event as MpEvent from pathlib import Path from typing import Any, Tuple +import faster_fifo as ff import psutil from frigate.config import FrigateConfig, RetainModeEnum @@ -30,7 +30,7 @@ class RecordingMaintainer(threading.Thread): def __init__( self, config: FrigateConfig, - recordings_info_queue: mp.Queue, + recordings_info_queue: ff.Queue, process_info: dict[str, FeatureMetricsTypes], stop_event: MpEvent, ): diff --git a/frigate/record/record.py b/frigate/record/record.py index 530adc031..0d22342aa 100644 --- a/frigate/record/record.py +++ b/frigate/record/record.py @@ -7,6 +7,7 @@ import threading from types import FrameType from typing import Optional +import faster_fifo as ff from playhouse.sqliteq import SqliteQueueDatabase from setproctitle import setproctitle @@ -22,7 +23,7 @@ logger = logging.getLogger(__name__) def manage_recordings( config: FrigateConfig, - recordings_info_queue: mp.Queue, + recordings_info_queue: ff.Queue, process_info: dict[str, FeatureMetricsTypes], ) -> None: stop_event = mp.Event() diff --git a/frigate/util.py b/frigate/util.py index f535a9572..cc9bb03c9 100755 --- a/frigate/util.py +++ b/frigate/util.py @@ -1,18 +1,22 @@ import copy +import ctypes import datetime import json import logging +import multiprocessing import os import re import shlex import signal import subprocess as sp +import time import traceback import urllib.parse from abc import ABC, abstractmethod from collections import Counter from collections.abc import Mapping from multiprocessing import shared_memory +from queue import Empty, Full from typing import Any, AnyStr, Optional, Tuple import cv2 @@ -21,6 +25,8 @@ import psutil import py3nvml.py3nvml as nvml import pytz import yaml +from faster_fifo import DEFAULT_CIRCULAR_BUFFER_SIZE, DEFAULT_TIMEOUT +from faster_fifo import Queue as FFQueue from frigate.const import REGEX_HTTP_CAMERA_USER_PASS, REGEX_RTSP_CAMERA_USER_PASS @@ -1218,3 +1224,47 @@ def get_video_properties(url, get_duration=False): result["height"] = round(height) return result + + +class LimitedQueue(FFQueue): + def __init__( + self, + maxsize=0, + max_size_bytes=DEFAULT_CIRCULAR_BUFFER_SIZE, + loads=None, + dumps=None, + ): + super().__init__(max_size_bytes=max_size_bytes, loads=loads, dumps=dumps) + self.maxsize = maxsize + self.size = multiprocessing.RawValue( + ctypes.c_int, 0 + ) # Add a counter for the number of items in the queue + + def put(self, x, block=True, timeout=DEFAULT_TIMEOUT): + if self.maxsize > 0 and self.size.value >= self.maxsize: + if block: + start_time = time.time() + while self.size.value >= self.maxsize: + remaining = timeout - (time.time() - start_time) + if remaining <= 0.0: + raise Full + time.sleep(min(remaining, 0.1)) + else: + raise Full + self.size.value += 1 + return super().put(x, block=block, timeout=timeout) + + def get(self, block=True, timeout=DEFAULT_TIMEOUT): + if self.size.value <= 0 and not block: + raise Empty + self.size.value -= 1 + return super().get(block=block, timeout=timeout) + + def qsize(self): + return self.size + + def empty(self): + return self.qsize() == 0 + + def full(self): + return self.qsize() == self.maxsize diff --git a/frigate/video.py b/frigate/video.py index 037b297c9..2a9dda10f 100755 --- a/frigate/video.py +++ b/frigate/video.py @@ -11,10 +11,11 @@ import time from collections import defaultdict import cv2 +import faster_fifo as ff import numpy as np from setproctitle import setproctitle -from frigate.config import CameraConfig, DetectConfig +from frigate.config import CameraConfig, DetectConfig, ModelConfig from frigate.const import ALL_ATTRIBUTE_LABELS, ATTRIBUTE_LABEL_MAP, CACHE_DIR from frigate.detectors.detector_config import PixelFormatEnum from frigate.log import LogPipe @@ -95,7 +96,17 @@ def filtered(obj, objects_to_track, object_filters): return False -def create_tensor_input(frame, model_config, region): +def get_min_region_size(model_config: ModelConfig) -> int: + """Get the min region size and ensure it is divisible by 4.""" + half = int(max(model_config.height, model_config.width) / 2) + + if half % 4 == 0: + return half + + return int((half + 3) / 4) * 4 + + +def create_tensor_input(frame, model_config: ModelConfig, region): if model_config.input_pixel_format == PixelFormatEnum.rgb: cropped_frame = yuv_region_2_rgb(frame, region) elif model_config.input_pixel_format == PixelFormatEnum.bgr: @@ -195,17 +206,16 @@ def capture_frames( frame_rate.update() - # if the queue is full, skip this frame - if frame_queue.full(): + # don't lock the queue to check, just try since it should rarely be full + try: + # add to the queue + frame_queue.put(current_frame.value, False) + # close the frame + frame_manager.close(frame_name) + except queue.Full: + # if the queue is full, skip this frame skipped_eps.update() frame_manager.delete(frame_name) - continue - - # close the frame - frame_manager.close(frame_name) - - # add to the queue - frame_queue.put(current_frame.value) class CameraWatchdog(threading.Thread): @@ -720,15 +730,15 @@ def get_consolidated_object_detections(detected_object_groups): def process_frames( camera_name: str, - frame_queue: mp.Queue, + frame_queue: ff.Queue, frame_shape, - model_config, + model_config: ModelConfig, detect_config: DetectConfig, frame_manager: FrameManager, motion_detector: MotionDetector, object_detector: RemoteObjectDetector, object_tracker: ObjectTracker, - detected_objects_queue: mp.Queue, + detected_objects_queue: ff.Queue, process_info: dict, objects_to_track: list[str], object_filters, @@ -747,16 +757,18 @@ def process_frames( startup_scan_counter = 0 - region_min_size = int(max(model_config.height, model_config.width) / 2) + region_min_size = get_min_region_size(model_config) while not stop_event.is_set(): - if exit_on_empty and frame_queue.empty(): - logger.info("Exiting track_objects...") - break - try: - frame_time = frame_queue.get(True, 1) + if exit_on_empty: + frame_time = frame_queue.get(False) + else: + frame_time = frame_queue.get(True, 1) except queue.Empty: + if exit_on_empty: + logger.info("Exiting track_objects...") + break continue current_frame_time.value = frame_time diff --git a/web/src/routes/System.jsx b/web/src/routes/System.jsx index 82c8d619d..8f4f1436f 100644 --- a/web/src/routes/System.jsx +++ b/web/src/routes/System.jsx @@ -334,7 +334,7 @@ export default function System() { ) : (
- {cameraNames.map((camera) => ( + {cameraNames.map((camera) => ( config.cameras[camera]["enabled"] && (
{camera.replaceAll('_', ' ')} @@ -406,7 +406,7 @@ export default function System() {
-
+
) ))} )}