From b7dbcce6e596bbd4abfe04d91ff1d475987b5ce8 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Fri, 16 May 2025 07:33:08 -0600 Subject: [PATCH 001/530] Update ROCm to 6.4.0 (#18264) * Update to rocm 6.4.0 * Update URL * Remove old env var --- docker/rocm/Dockerfile | 5 ++--- docker/rocm/requirements-wheels-rocm.txt | 2 +- docker/rocm/rocm.hcl | 2 +- 3 files changed, 4 insertions(+), 5 deletions(-) diff --git a/docker/rocm/Dockerfile b/docker/rocm/Dockerfile index 7cac69eef..752b2d7cc 100644 --- a/docker/rocm/Dockerfile +++ b/docker/rocm/Dockerfile @@ -2,7 +2,7 @@ # https://askubuntu.com/questions/972516/debian-frontend-environment-variable ARG DEBIAN_FRONTEND=noninteractive -ARG ROCM=6.3.3 +ARG ROCM=6.4.0 ARG AMDGPU=gfx900 ARG HSA_OVERRIDE_GFX_VERSION ARG HSA_OVERRIDE @@ -15,7 +15,7 @@ ARG AMDGPU RUN apt update && \ apt install -y wget gpg && \ - wget -O rocm.deb https://repo.radeon.com/amdgpu-install/$ROCM/ubuntu/jammy/amdgpu-install_6.3.60303-1_all.deb && \ + wget -O rocm.deb https://repo.radeon.com/amdgpu-install/6.4/ubuntu/jammy/amdgpu-install_6.4.60400-1_all.deb && \ apt install -y ./rocm.deb && \ apt update && \ apt install -y rocm @@ -62,7 +62,6 @@ COPY --from=rocm /opt/rocm-dist/ / ####################################################################### FROM deps-prelim AS rocm-prelim-hsa-override0 ENV HSA_ENABLE_SDMA=0 -ENV MIGRAPHX_ENABLE_NHWC=1 ENV TF_ROCM_USE_IMMEDIATE_MODE=1 COPY --from=rocm-dist / / diff --git a/docker/rocm/requirements-wheels-rocm.txt b/docker/rocm/requirements-wheels-rocm.txt index 85450768e..1cdeaed20 100644 --- a/docker/rocm/requirements-wheels-rocm.txt +++ b/docker/rocm/requirements-wheels-rocm.txt @@ -1 +1 @@ -onnxruntime-rocm @ https://github.com/NickM-27/frigate-onnxruntime-rocm/releases/download/v6.3.3/onnxruntime_rocm-1.20.1-cp311-cp311-linux_x86_64.whl \ No newline at end of file +onnxruntime-rocm @ https://github.com/NickM-27/frigate-onnxruntime-rocm/releases/download/v6.4.0/onnxruntime_rocm-1.21.1-cp311-cp311-linux_x86_64.whl \ No newline at end of file diff --git a/docker/rocm/rocm.hcl b/docker/rocm/rocm.hcl index 6a84b350d..82ab58782 100644 --- a/docker/rocm/rocm.hcl +++ b/docker/rocm/rocm.hcl @@ -2,7 +2,7 @@ variable "AMDGPU" { default = "gfx900" } variable "ROCM" { - default = "6.3.3" + default = "6.4.0" } variable "HSA_OVERRIDE_GFX_VERSION" { default = "" From dc187eee1c538e0a467aab5116977b710d4d2ec5 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Thu, 22 May 2025 12:16:51 -0600 Subject: [PATCH 002/530] Dynamic Config Updates (#18353) * Create classes to handle publishing and subscribing config updates * Cleanup * Use config updater * Update handling for enabled config * Cleanup * Recording config updates * Birdseye config updates * Handle notifications * handle review * Update motion --- frigate/app.py | 6 +- frigate/comms/dispatcher.py | 83 +++++++++++++++----- frigate/comms/webpush.py | 22 ++++-- frigate/config/camera/updater.py | 119 +++++++++++++++++++++++++++++ frigate/events/audio.py | 33 +++----- frigate/motion/improved_motion.py | 10 +-- frigate/output/birdseye.py | 17 ----- frigate/output/output.py | 29 ++++--- frigate/output/preview.py | 10 --- frigate/record/maintainer.py | 21 ++--- frigate/review/maintainer.py | 68 +++++------------ frigate/track/object_processing.py | 35 ++++----- frigate/video.py | 99 ++++++++++++------------ 13 files changed, 316 insertions(+), 236 deletions(-) create mode 100644 frigate/config/camera/updater.py diff --git a/frigate/app.py b/frigate/app.py index cc596a98a..26c32c8f8 100644 --- a/frigate/app.py +++ b/frigate/app.py @@ -19,7 +19,6 @@ from frigate.api.auth import hash_password from frigate.api.fastapi_app import create_fastapi_app from frigate.camera import CameraMetrics, PTZMetrics from frigate.comms.base_communicator import Communicator -from frigate.comms.config_updater import ConfigPublisher from frigate.comms.dispatcher import Dispatcher from frigate.comms.event_metadata_updater import EventMetadataPublisher from frigate.comms.inter_process import InterProcessCommunicator @@ -27,6 +26,7 @@ from frigate.comms.mqtt import MqttClient from frigate.comms.webpush import WebPushClient from frigate.comms.ws import WebSocketClient from frigate.comms.zmq_proxy import ZmqProxy +from frigate.config.camera.updater import CameraConfigUpdatePublisher from frigate.config.config import FrigateConfig from frigate.const import ( CACHE_DIR, @@ -322,7 +322,7 @@ class FrigateApp: def init_inter_process_communicator(self) -> None: self.inter_process_communicator = InterProcessCommunicator() - self.inter_config_updater = ConfigPublisher() + self.inter_config_updater = CameraConfigUpdatePublisher() self.event_metadata_updater = EventMetadataPublisher() self.inter_zmq_proxy = ZmqProxy() @@ -482,7 +482,7 @@ class FrigateApp: capture_process = util.Process( target=capture_camera, name=f"camera_capture:{name}", - args=(name, config, shm_frame_count, self.camera_metrics[name]), + args=(config, shm_frame_count, self.camera_metrics[name]), ) capture_process.daemon = True self.camera_metrics[name].capture_process = capture_process diff --git a/frigate/comms/dispatcher.py b/frigate/comms/dispatcher.py index 87891ec88..33f3ec158 100644 --- a/frigate/comms/dispatcher.py +++ b/frigate/comms/dispatcher.py @@ -8,9 +8,13 @@ from typing import Any, Callable, Optional from frigate.camera import PTZMetrics from frigate.camera.activity_manager import CameraActivityManager from frigate.comms.base_communicator import Communicator -from frigate.comms.config_updater import ConfigPublisher from frigate.comms.webpush import WebPushClient from frigate.config import BirdseyeModeEnum, FrigateConfig +from frigate.config.camera.updater import ( + CameraConfigUpdateEnum, + CameraConfigUpdatePublisher, + CameraConfigUpdateTopic, +) from frigate.const import ( CLEAR_ONGOING_REVIEW_SEGMENTS, INSERT_MANY_RECORDINGS, @@ -38,7 +42,7 @@ class Dispatcher: def __init__( self, config: FrigateConfig, - config_updater: ConfigPublisher, + config_updater: CameraConfigUpdatePublisher, onvif: OnvifController, ptz_metrics: dict[str, PTZMetrics], communicators: list[Communicator], @@ -273,8 +277,11 @@ class Dispatcher: f"Turning on motion for {camera_name} due to detection being enabled." ) motion_settings.enabled = True - self.config_updater.publish( - f"config/motion/{camera_name}", motion_settings + self.config_updater.publish_update( + CameraConfigUpdateTopic( + CameraConfigUpdateEnum.motion, camera_name + ), + motion_settings, ) self.publish(f"{camera_name}/motion/state", payload, retain=True) elif payload == "OFF": @@ -303,7 +310,10 @@ class Dispatcher: logger.info(f"Turning off camera {camera_name}") camera_settings.enabled = False - self.config_updater.publish(f"config/enabled/{camera_name}", camera_settings) + self.config_updater.publish_update( + CameraConfigUpdateTopic(CameraConfigUpdateEnum.enabled, camera_name), + camera_settings.enabled, + ) self.publish(f"{camera_name}/enabled/state", payload, retain=True) def _on_motion_command(self, camera_name: str, payload: str) -> None: @@ -326,7 +336,10 @@ class Dispatcher: logger.info(f"Turning off motion for {camera_name}") motion_settings.enabled = False - self.config_updater.publish(f"config/motion/{camera_name}", motion_settings) + self.config_updater.publish_update( + CameraConfigUpdateTopic(CameraConfigUpdateEnum.motion, camera_name), + motion_settings, + ) self.publish(f"{camera_name}/motion/state", payload, retain=True) def _on_motion_improve_contrast_command( @@ -344,7 +357,10 @@ class Dispatcher: logger.info(f"Turning off improve contrast for {camera_name}") motion_settings.improve_contrast = False # type: ignore[union-attr] - self.config_updater.publish(f"config/motion/{camera_name}", motion_settings) + self.config_updater.publish_update( + CameraConfigUpdateTopic(CameraConfigUpdateEnum.motion, camera_name), + motion_settings, + ) self.publish(f"{camera_name}/improve_contrast/state", payload, retain=True) def _on_ptz_autotracker_command(self, camera_name: str, payload: str) -> None: @@ -384,7 +400,10 @@ class Dispatcher: motion_settings = self.config.cameras[camera_name].motion logger.info(f"Setting motion contour area for {camera_name}: {payload}") motion_settings.contour_area = payload # type: ignore[union-attr] - self.config_updater.publish(f"config/motion/{camera_name}", motion_settings) + self.config_updater.publish_update( + CameraConfigUpdateTopic(CameraConfigUpdateEnum.motion, camera_name), + motion_settings, + ) self.publish(f"{camera_name}/motion_contour_area/state", payload, retain=True) def _on_motion_threshold_command(self, camera_name: str, payload: int) -> None: @@ -398,7 +417,10 @@ class Dispatcher: motion_settings = self.config.cameras[camera_name].motion logger.info(f"Setting motion threshold for {camera_name}: {payload}") motion_settings.threshold = payload # type: ignore[union-attr] - self.config_updater.publish(f"config/motion/{camera_name}", motion_settings) + self.config_updater.publish_update( + CameraConfigUpdateTopic(CameraConfigUpdateEnum.motion, camera_name), + motion_settings, + ) self.publish(f"{camera_name}/motion_threshold/state", payload, retain=True) def _on_global_notification_command(self, payload: str) -> None: @@ -410,8 +432,8 @@ class Dispatcher: notification_settings = self.config.notifications logger.info(f"Setting all notifications: {payload}") notification_settings.enabled = payload == "ON" # type: ignore[union-attr] - self.config_updater.publish( - "config/notifications", {"_global_notifications": notification_settings} + self.config_updater.publisher.publish( + "config/notifications", notification_settings ) self.publish("notifications/state", payload, retain=True) @@ -434,7 +456,10 @@ class Dispatcher: logger.info(f"Turning off audio detection for {camera_name}") audio_settings.enabled = False - self.config_updater.publish(f"config/audio/{camera_name}", audio_settings) + self.config_updater.publish_update( + CameraConfigUpdateTopic(CameraConfigUpdateEnum.audio, camera_name), + audio_settings, + ) self.publish(f"{camera_name}/audio/state", payload, retain=True) def _on_recordings_command(self, camera_name: str, payload: str) -> None: @@ -456,7 +481,10 @@ class Dispatcher: logger.info(f"Turning off recordings for {camera_name}") record_settings.enabled = False - self.config_updater.publish(f"config/record/{camera_name}", record_settings) + self.config_updater.publish_update( + CameraConfigUpdateTopic(CameraConfigUpdateEnum.record, camera_name), + record_settings, + ) self.publish(f"{camera_name}/recordings/state", payload, retain=True) def _on_snapshots_command(self, camera_name: str, payload: str) -> None: @@ -472,6 +500,10 @@ class Dispatcher: logger.info(f"Turning off snapshots for {camera_name}") snapshots_settings.enabled = False + self.config_updater.publish_update( + CameraConfigUpdateTopic(CameraConfigUpdateEnum.snapshots, camera_name), + snapshots_settings, + ) self.publish(f"{camera_name}/snapshots/state", payload, retain=True) def _on_ptz_command(self, camera_name: str, payload: str) -> None: @@ -506,7 +538,10 @@ class Dispatcher: logger.info(f"Turning off birdseye for {camera_name}") birdseye_settings.enabled = False - self.config_updater.publish(f"config/birdseye/{camera_name}", birdseye_settings) + self.config_updater.publish_update( + CameraConfigUpdateTopic(CameraConfigUpdateEnum.birdseye, camera_name), + birdseye_settings, + ) self.publish(f"{camera_name}/birdseye/state", payload, retain=True) def _on_birdseye_mode_command(self, camera_name: str, payload: str) -> None: @@ -527,7 +562,10 @@ class Dispatcher: f"Setting birdseye mode for {camera_name} to {birdseye_settings.mode}" ) - self.config_updater.publish(f"config/birdseye/{camera_name}", birdseye_settings) + self.config_updater.publish_update( + CameraConfigUpdateTopic(CameraConfigUpdateEnum.birdseye, camera_name), + birdseye_settings, + ) self.publish(f"{camera_name}/birdseye_mode/state", payload, retain=True) def _on_camera_notification_command(self, camera_name: str, payload: str) -> None: @@ -559,8 +597,9 @@ class Dispatcher: ): self.web_push_client.suspended_cameras[camera_name] = 0 - self.config_updater.publish( - "config/notifications", {camera_name: notification_settings} + self.config_updater.publish_update( + CameraConfigUpdateTopic(CameraConfigUpdateEnum.notifications, camera_name), + notification_settings, ) self.publish(f"{camera_name}/notifications/state", payload, retain=True) self.publish(f"{camera_name}/notifications/suspended", "0", retain=True) @@ -617,7 +656,10 @@ class Dispatcher: logger.info(f"Turning off alerts for {camera_name}") review_settings.alerts.enabled = False - self.config_updater.publish(f"config/review/{camera_name}", review_settings) + self.config_updater.publish_update( + CameraConfigUpdateTopic(CameraConfigUpdateEnum.review, camera_name), + review_settings, + ) self.publish(f"{camera_name}/review_alerts/state", payload, retain=True) def _on_detections_command(self, camera_name: str, payload: str) -> None: @@ -639,5 +681,8 @@ class Dispatcher: logger.info(f"Turning off detections for {camera_name}") review_settings.detections.enabled = False - self.config_updater.publish(f"config/review/{camera_name}", review_settings) + self.config_updater.publish_update( + CameraConfigUpdateTopic(CameraConfigUpdateEnum.review, camera_name), + review_settings, + ) self.publish(f"{camera_name}/review_detections/state", payload, retain=True) diff --git a/frigate/comms/webpush.py b/frigate/comms/webpush.py index c5986d45c..d93c3169b 100644 --- a/frigate/comms/webpush.py +++ b/frigate/comms/webpush.py @@ -17,6 +17,10 @@ from titlecase import titlecase from frigate.comms.base_communicator import Communicator from frigate.comms.config_updater import ConfigSubscriber from frigate.config import FrigateConfig +from frigate.config.camera.updater import ( + CameraConfigUpdateEnum, + CameraConfigUpdateSubscriber, +) from frigate.const import CONFIG_DIR from frigate.models import User @@ -73,7 +77,12 @@ class WebPushClient(Communicator): # type: ignore[misc] self.web_pushers[user["username"]].append(WebPusher(sub)) # notification config updater - self.config_subscriber = ConfigSubscriber("config/notifications") + self.global_config_subscriber = ConfigSubscriber( + "config/notifications", exact=True + ) + self.config_subscriber = CameraConfigUpdateSubscriber( + self.config.cameras, [CameraConfigUpdateEnum.notifications] + ) def subscribe(self, receiver: Callable) -> None: """Wrapper for allowing dispatcher to subscribe.""" @@ -154,15 +163,14 @@ class WebPushClient(Communicator): # type: ignore[misc] def publish(self, topic: str, payload: Any, retain: bool = False) -> None: """Wrapper for publishing when client is in valid state.""" # check for updated notification config - _, updated_notification_config = self.config_subscriber.check_for_update() + _, updated_notification_config = ( + self.global_config_subscriber.check_for_update() + ) if updated_notification_config: - for key, value in updated_notification_config.items(): - if key == "_global_notifications": - self.config.notifications = value + self.config.notifications = updated_notification_config - elif key in self.config.cameras: - self.config.cameras[key].notifications = value + self.config_subscriber.check_for_updates() if topic == "reviews": decoded = json.loads(payload) diff --git a/frigate/config/camera/updater.py b/frigate/config/camera/updater.py new file mode 100644 index 000000000..5abca57eb --- /dev/null +++ b/frigate/config/camera/updater.py @@ -0,0 +1,119 @@ +"""Convenience classes for updating configurations dynamically.""" + +from dataclasses import dataclass +from enum import Enum +from typing import Any + +from frigate.comms.config_updater import ConfigPublisher, ConfigSubscriber +from frigate.config import CameraConfig + + +class CameraConfigUpdateEnum(str, Enum): + """Supported camera config update types.""" + + audio = "audio" + birdseye = "birdseye" + detect = "detect" + enabled = "enabled" + motion = "motion" # includes motion and motion masks + notifications = "notifications" + record = "record" + review = "review" + snapshots = "snapshots" + zones = "zones" + + +@dataclass +class CameraConfigUpdateTopic: + update_type: CameraConfigUpdateEnum + camera: str + + @property + def topic(self) -> str: + return f"config/cameras/{self.camera}/{self.update_type.name}" + + +class CameraConfigUpdatePublisher: + def __init__(self): + self.publisher = ConfigPublisher() + + def publish_update(self, topic: CameraConfigUpdateTopic, config: Any) -> None: + self.publisher.publish(topic.topic, config) + + def stop(self) -> None: + self.publisher.stop() + + +class CameraConfigUpdateSubscriber: + def __init__( + self, + camera_configs: dict[str, CameraConfig], + topics: list[CameraConfigUpdateEnum], + ): + self.camera_configs = camera_configs + self.topics = topics + + base_topic = "config/cameras" + + if len(self.camera_configs) == 1: + base_topic += f"/{list(self.camera_configs.keys())[0]}" + + self.subscriber = ConfigSubscriber( + base_topic, + exact=False, + ) + + def __update_config( + self, camera: str, update_type: CameraConfigUpdateEnum, updated_config: Any + ) -> None: + config = self.camera_configs[camera] + + if not config: + return + + if update_type == CameraConfigUpdateEnum.audio: + config.audio = updated_config + elif update_type == CameraConfigUpdateEnum.birdseye: + config.birdseye = updated_config + elif update_type == CameraConfigUpdateEnum.detect: + config.detect = updated_config + elif update_type == CameraConfigUpdateEnum.enabled: + config.enabled = updated_config + elif update_type == CameraConfigUpdateEnum.motion: + config.motion = updated_config + elif update_type == CameraConfigUpdateEnum.notifications: + config.notifications = updated_config + elif update_type == CameraConfigUpdateEnum.record: + config.record = updated_config + elif update_type == CameraConfigUpdateEnum.review: + config.review = updated_config + elif update_type == CameraConfigUpdateEnum.snapshots: + config.snapshots = updated_config + elif update_type == CameraConfigUpdateEnum.zones: + config.zones = updated_config + + def check_for_updates(self) -> dict[str, list[str]]: + updated_topics: dict[str, list[str]] = {} + + # get all updates available + while True: + update_topic, update_config = self.subscriber.check_for_update() + + if update_topic is None or update_config is None: + break + + _, _, camera, raw_type = update_topic.split("/") + update_type = CameraConfigUpdateEnum[raw_type] + + if update_type in self.topics: + if update_type.name in updated_topics: + updated_topics[update_type.name].append(camera) + else: + updated_topics[update_type.name] = [camera] + + self.__update_config(camera, update_type, update_config) + + return updated_topics + + def stop(self) -> None: + self.subscriber.stop() diff --git a/frigate/events/audio.py b/frigate/events/audio.py index f2a217fd3..8a929c8ff 100644 --- a/frigate/events/audio.py +++ b/frigate/events/audio.py @@ -12,7 +12,6 @@ import numpy as np import frigate.util as util from frigate.camera import CameraMetrics -from frigate.comms.config_updater import ConfigSubscriber from frigate.comms.detections_updater import DetectionPublisher, DetectionTypeEnum from frigate.comms.event_metadata_updater import ( EventMetadataPublisher, @@ -20,6 +19,10 @@ from frigate.comms.event_metadata_updater import ( ) from frigate.comms.inter_process import InterProcessRequestor from frigate.config import CameraConfig, CameraInput, FfmpegConfig +from frigate.config.camera.updater import ( + CameraConfigUpdateEnum, + CameraConfigUpdateSubscriber, +) from frigate.const import ( AUDIO_DURATION, AUDIO_FORMAT, @@ -138,9 +141,9 @@ class AudioEventMaintainer(threading.Thread): # create communication for audio detections self.requestor = InterProcessRequestor() - self.config_subscriber = ConfigSubscriber(f"config/audio/{camera.name}") - self.enabled_subscriber = ConfigSubscriber( - f"config/enabled/{camera.name}", True + self.config_subscriber = CameraConfigUpdateSubscriber( + {self.config.name: self.config}, + [CameraConfigUpdateEnum.audio, CameraConfigUpdateEnum.enabled], ) self.detection_publisher = DetectionPublisher(DetectionTypeEnum.audio) self.event_metadata_publisher = EventMetadataPublisher() @@ -308,21 +311,12 @@ class AudioEventMaintainer(threading.Thread): self.logger.error(f"Error reading audio data from ffmpeg process: {e}") log_and_restart() - def _update_enabled_state(self) -> bool: - """Fetch the latest config and update enabled state.""" - _, config_data = self.enabled_subscriber.check_for_update() - if config_data: - self.config.enabled = config_data.enabled - return config_data.enabled - - return self.config.enabled - def run(self) -> None: - if self._update_enabled_state(): + if self.config.enabled: self.start_or_restart_ffmpeg() while not self.stop_event.is_set(): - enabled = self._update_enabled_state() + enabled = self.config.enabled if enabled != self.was_enabled: if enabled: self.logger.debug( @@ -344,13 +338,7 @@ class AudioEventMaintainer(threading.Thread): continue # check if there is an updated config - ( - updated_topic, - updated_audio_config, - ) = self.config_subscriber.check_for_update() - - if updated_topic: - self.config.audio = updated_audio_config + self.config_subscriber.check_for_updates() self.read_audio() @@ -359,7 +347,6 @@ class AudioEventMaintainer(threading.Thread): self.logpipe.close() self.requestor.stop() self.config_subscriber.stop() - self.enabled_subscriber.stop() self.detection_publisher.stop() diff --git a/frigate/motion/improved_motion.py b/frigate/motion/improved_motion.py index 69de6d015..10818ea70 100644 --- a/frigate/motion/improved_motion.py +++ b/frigate/motion/improved_motion.py @@ -5,7 +5,6 @@ import numpy as np from scipy.ndimage import gaussian_filter from frigate.camera import PTZMetrics -from frigate.comms.config_updater import ConfigSubscriber from frigate.config import MotionConfig from frigate.motion import MotionDetector from frigate.util.image import grab_cv2_contours @@ -49,7 +48,6 @@ class ImprovedMotionDetector(MotionDetector): self.contrast_values = np.zeros((contrast_frame_history, 2), np.uint8) self.contrast_values[:, 1:2] = 255 self.contrast_values_index = 0 - self.config_subscriber = ConfigSubscriber(f"config/motion/{name}", True) self.ptz_metrics = ptz_metrics self.last_stop_time = None @@ -59,12 +57,6 @@ class ImprovedMotionDetector(MotionDetector): def detect(self, frame): motion_boxes = [] - # check for updated motion config - _, updated_motion_config = self.config_subscriber.check_for_update() - - if updated_motion_config: - self.config = updated_motion_config - if not self.config.enabled: return motion_boxes @@ -246,4 +238,4 @@ class ImprovedMotionDetector(MotionDetector): def stop(self) -> None: """stop the motion detector.""" - self.config_subscriber.stop() + pass diff --git a/frigate/output/birdseye.py b/frigate/output/birdseye.py index b295af82e..78686fd63 100644 --- a/frigate/output/birdseye.py +++ b/frigate/output/birdseye.py @@ -15,7 +15,6 @@ from typing import Any, Optional import cv2 import numpy as np -from frigate.comms.config_updater import ConfigSubscriber from frigate.config import BirdseyeModeEnum, FfmpegConfig, FrigateConfig from frigate.const import BASE_DIR, BIRDSEYE_PIPE, INSTALL_DIR from frigate.util.image import ( @@ -754,7 +753,6 @@ class Birdseye: "birdseye", self.converter, websocket_server, stop_event ) self.birdseye_manager = BirdsEyeFrameManager(config, stop_event) - self.birdseye_subscriber = ConfigSubscriber("config/birdseye/") self.frame_manager = SharedMemoryFrameManager() self.stop_event = stop_event @@ -791,20 +789,6 @@ class Birdseye: frame_time: float, frame: np.ndarray, ) -> None: - # check if there is an updated config - while True: - ( - updated_birdseye_topic, - updated_birdseye_config, - ) = self.birdseye_subscriber.check_for_update() - - if not updated_birdseye_topic: - break - - if updated_birdseye_config: - camera_name = updated_birdseye_topic.rpartition("/")[-1] - self.config.cameras[camera_name].birdseye = updated_birdseye_config - if self.birdseye_manager.update( camera, len([o for o in current_tracked_objects if not o["stationary"]]), @@ -815,6 +799,5 @@ class Birdseye: self.__send_new_frame() def stop(self) -> None: - self.birdseye_subscriber.stop() self.converter.join() self.broadcaster.join() diff --git a/frigate/output/output.py b/frigate/output/output.py index 1723ac73c..6decf0005 100644 --- a/frigate/output/output.py +++ b/frigate/output/output.py @@ -17,10 +17,13 @@ from ws4py.server.wsgirefserver import ( ) from ws4py.server.wsgiutils import WebSocketWSGIApplication -from frigate.comms.config_updater import ConfigSubscriber from frigate.comms.detections_updater import DetectionSubscriber, DetectionTypeEnum from frigate.comms.ws import WebSocket from frigate.config import FrigateConfig +from frigate.config.camera.updater import ( + CameraConfigUpdateEnum, + CameraConfigUpdateSubscriber, +) from frigate.const import CACHE_DIR, CLIPS_DIR from frigate.output.birdseye import Birdseye from frigate.output.camera import JsmpegCamera @@ -99,7 +102,14 @@ def output_frames( websocket_thread = threading.Thread(target=websocket_server.serve_forever) detection_subscriber = DetectionSubscriber(DetectionTypeEnum.video) - config_enabled_subscriber = ConfigSubscriber("config/enabled/") + config_subscriber = CameraConfigUpdateSubscriber( + config.cameras, + [ + CameraConfigUpdateEnum.birdseye, + CameraConfigUpdateEnum.enabled, + CameraConfigUpdateEnum.record, + ], + ) jsmpeg_cameras: dict[str, JsmpegCamera] = {} birdseye: Birdseye | None = None @@ -125,18 +135,7 @@ def output_frames( while not stop_event.is_set(): # check if there is an updated config - while True: - ( - updated_enabled_topic, - updated_enabled_config, - ) = config_enabled_subscriber.check_for_update() - - if not updated_enabled_topic: - break - - if updated_enabled_config: - camera_name = updated_enabled_topic.rpartition("/")[-1] - config.cameras[camera_name].enabled = updated_enabled_config.enabled + config_subscriber.check_for_updates() (topic, data) = detection_subscriber.check_for_update(timeout=1) now = datetime.datetime.now().timestamp() @@ -240,7 +239,7 @@ def output_frames( if birdseye is not None: birdseye.stop() - config_enabled_subscriber.stop() + config_subscriber.stop() websocket_server.manager.close_all() websocket_server.manager.stop() websocket_server.manager.join() diff --git a/frigate/output/preview.py b/frigate/output/preview.py index 08caa6738..6dfd90904 100644 --- a/frigate/output/preview.py +++ b/frigate/output/preview.py @@ -13,7 +13,6 @@ from typing import Any import cv2 import numpy as np -from frigate.comms.config_updater import ConfigSubscriber from frigate.comms.inter_process import InterProcessRequestor from frigate.config import CameraConfig, RecordQualityEnum from frigate.const import CACHE_DIR, CLIPS_DIR, INSERT_PREVIEW, PREVIEW_FRAME_TYPE @@ -174,9 +173,6 @@ class PreviewRecorder: # create communication for finished previews self.requestor = InterProcessRequestor() - self.config_subscriber = ConfigSubscriber( - f"config/record/{self.config.name}", True - ) y, u1, u2, v1, v2 = get_yuv_crop( self.config.frame_shape_yuv, @@ -323,12 +319,6 @@ class PreviewRecorder: ) -> None: self.offline = False - # check for updated record config - _, updated_record_config = self.config_subscriber.check_for_update() - - if updated_record_config: - self.config.record = updated_record_config - # always write the first frame if self.start_time == 0: self.start_time = frame_time diff --git a/frigate/record/maintainer.py b/frigate/record/maintainer.py index f1b9a600e..7f13451d6 100644 --- a/frigate/record/maintainer.py +++ b/frigate/record/maintainer.py @@ -16,7 +16,6 @@ from typing import Any, Optional, Tuple import numpy as np import psutil -from frigate.comms.config_updater import ConfigSubscriber from frigate.comms.detections_updater import DetectionSubscriber, DetectionTypeEnum from frigate.comms.inter_process import InterProcessRequestor from frigate.comms.recordings_updater import ( @@ -24,6 +23,10 @@ from frigate.comms.recordings_updater import ( RecordingsDataTypeEnum, ) from frigate.config import FrigateConfig, RetainModeEnum +from frigate.config.camera.updater import ( + CameraConfigUpdateEnum, + CameraConfigUpdateSubscriber, +) from frigate.const import ( CACHE_DIR, CACHE_SEGMENT_FORMAT, @@ -71,7 +74,9 @@ class RecordingMaintainer(threading.Thread): # create communication for retained recordings self.requestor = InterProcessRequestor() - self.config_subscriber = ConfigSubscriber("config/record/") + self.config_subscriber = CameraConfigUpdateSubscriber( + self.config.cameras, [CameraConfigUpdateEnum.record] + ) self.detection_subscriber = DetectionSubscriber(DetectionTypeEnum.all) self.recordings_publisher = RecordingsDataPublisher( RecordingsDataTypeEnum.recordings_available_through @@ -518,17 +523,7 @@ class RecordingMaintainer(threading.Thread): run_start = datetime.datetime.now().timestamp() # check if there is an updated config - while True: - ( - updated_topic, - updated_record_config, - ) = self.config_subscriber.check_for_update() - - if not updated_topic: - break - - camera_name = updated_topic.rpartition("/")[-1] - self.config.cameras[camera_name].record = updated_record_config + self.config_subscriber.check_for_updates() stale_frame_count = 0 stale_frame_count_threshold = 10 diff --git a/frigate/review/maintainer.py b/frigate/review/maintainer.py index b144b6e52..7f60a0209 100644 --- a/frigate/review/maintainer.py +++ b/frigate/review/maintainer.py @@ -15,10 +15,13 @@ from typing import Any, Optional import cv2 import numpy as np -from frigate.comms.config_updater import ConfigSubscriber from frigate.comms.detections_updater import DetectionSubscriber, DetectionTypeEnum from frigate.comms.inter_process import InterProcessRequestor from frigate.config import CameraConfig, FrigateConfig +from frigate.config.camera.updater import ( + CameraConfigUpdateEnum, + CameraConfigUpdateSubscriber, +) from frigate.const import ( CLEAR_ONGOING_REVIEW_SEGMENTS, CLIPS_DIR, @@ -150,9 +153,14 @@ class ReviewSegmentMaintainer(threading.Thread): # create communication for review segments self.requestor = InterProcessRequestor() - self.record_config_subscriber = ConfigSubscriber("config/record/") - self.review_config_subscriber = ConfigSubscriber("config/review/") - self.enabled_config_subscriber = ConfigSubscriber("config/enabled/") + self.config_subscriber = CameraConfigUpdateSubscriber( + config.cameras, + [ + CameraConfigUpdateEnum.enabled, + CameraConfigUpdateEnum.record, + CameraConfigUpdateEnum.review, + ], + ) self.detection_subscriber = DetectionSubscriber(DetectionTypeEnum.all) # manual events @@ -458,50 +466,15 @@ class ReviewSegmentMaintainer(threading.Thread): def run(self) -> None: while not self.stop_event.is_set(): # check if there is an updated config - while True: - ( - updated_record_topic, - updated_record_config, - ) = self.record_config_subscriber.check_for_update() + updated_topics = self.config_subscriber.check_for_updates() - ( - updated_review_topic, - updated_review_config, - ) = self.review_config_subscriber.check_for_update() + if "record" in updated_topics: + for camera in updated_topics["record"]: + self.end_segment(camera) - ( - updated_enabled_topic, - updated_enabled_config, - ) = self.enabled_config_subscriber.check_for_update() - - if ( - not updated_record_topic - and not updated_review_topic - and not updated_enabled_topic - ): - break - - if updated_record_topic: - camera_name = updated_record_topic.rpartition("/")[-1] - self.config.cameras[camera_name].record = updated_record_config - - # immediately end segment - if not updated_record_config.enabled: - self.end_segment(camera_name) - - if updated_review_topic: - camera_name = updated_review_topic.rpartition("/")[-1] - self.config.cameras[camera_name].review = updated_review_config - - if updated_enabled_config: - camera_name = updated_enabled_topic.rpartition("/")[-1] - self.config.cameras[ - camera_name - ].enabled = updated_enabled_config.enabled - - # immediately end segment as we may not get another update - if not updated_enabled_config.enabled: - self.end_segment(camera_name) + if "enabled" in updated_topics: + for camera in updated_topics["enabled"]: + self.end_segment(camera) (topic, data) = self.detection_subscriber.check_for_update(timeout=1) @@ -730,8 +703,7 @@ class ReviewSegmentMaintainer(threading.Thread): f"Dedicated LPR camera API has been called for {camera}, but detections are disabled. LPR events will not appear as a detection." ) - self.record_config_subscriber.stop() - self.review_config_subscriber.stop() + self.config_subscriber.stop() self.requestor.stop() self.detection_subscriber.stop() logger.info("Exiting review maintainer...") diff --git a/frigate/track/object_processing.py b/frigate/track/object_processing.py index 773c6da30..1bc2c40fc 100644 --- a/frigate/track/object_processing.py +++ b/frigate/track/object_processing.py @@ -14,7 +14,6 @@ import numpy as np from peewee import SQL, DoesNotExist from frigate.camera.state import CameraState -from frigate.comms.config_updater import ConfigSubscriber from frigate.comms.detections_updater import DetectionPublisher, DetectionTypeEnum from frigate.comms.dispatcher import Dispatcher from frigate.comms.event_metadata_updater import ( @@ -29,6 +28,10 @@ from frigate.config import ( RecordConfig, SnapshotsConfig, ) +from frigate.config.camera.updater import ( + CameraConfigUpdateEnum, + CameraConfigUpdateSubscriber, +) from frigate.const import ( FAST_QUEUE_TIMEOUT, UPDATE_CAMERA_ACTIVITY, @@ -67,7 +70,9 @@ class TrackedObjectProcessor(threading.Thread): self.last_motion_detected: dict[str, float] = {} self.ptz_autotracker_thread = ptz_autotracker_thread - self.config_enabled_subscriber = ConfigSubscriber("config/enabled/") + self.config_subscriber = CameraConfigUpdateSubscriber( + self.config.cameras, [CameraConfigUpdateEnum.enabled] + ) self.requestor = InterProcessRequestor() self.detection_publisher = DetectionPublisher(DetectionTypeEnum.all) @@ -638,24 +643,14 @@ class TrackedObjectProcessor(threading.Thread): def run(self): while not self.stop_event.is_set(): # check for config updates - while True: - ( - updated_enabled_topic, - updated_enabled_config, - ) = self.config_enabled_subscriber.check_for_update() + updated_topics = self.config_subscriber.check_for_updates() - if not updated_enabled_topic: - break - - camera_name = updated_enabled_topic.rpartition("/")[-1] - self.config.cameras[ - camera_name - ].enabled = updated_enabled_config.enabled - - if self.camera_states[camera_name].prev_enabled is None: - self.camera_states[ - camera_name - ].prev_enabled = updated_enabled_config.enabled + if "enabled" in updated_topics: + for camera in updated_topics["enabled"]: + if self.camera_states[camera].prev_enabled is None: + self.camera_states[camera].prev_enabled = self.config.cameras[ + camera + ].enabled # manage camera disabled state for camera, config in self.config.cameras.items(): @@ -764,6 +759,6 @@ class TrackedObjectProcessor(threading.Thread): self.event_sender.stop() self.event_end_subscriber.stop() self.sub_label_subscriber.stop() - self.config_enabled_subscriber.stop() + self.config_subscriber.stop() logger.info("Exiting object processor...") diff --git a/frigate/video.py b/frigate/video.py index f2197ed66..95fbf2267 100755 --- a/frigate/video.py +++ b/frigate/video.py @@ -15,10 +15,13 @@ import cv2 from setproctitle import setproctitle from frigate.camera import CameraMetrics, PTZMetrics -from frigate.comms.config_updater import ConfigSubscriber from frigate.comms.inter_process import InterProcessRequestor from frigate.config import CameraConfig, DetectConfig, ModelConfig from frigate.config.camera.camera import CameraTypeEnum +from frigate.config.camera.updater import ( + CameraConfigUpdateEnum, + CameraConfigUpdateSubscriber, +) from frigate.const import ( CACHE_DIR, CACHE_SEGMENT_FORMAT, @@ -112,15 +115,13 @@ def capture_frames( frame_rate.start() skipped_eps = EventsPerSecond() skipped_eps.start() - config_subscriber = ConfigSubscriber(f"config/enabled/{config.name}", True) + config_subscriber = CameraConfigUpdateSubscriber( + {config.name: config}, [CameraConfigUpdateEnum.enabled] + ) def get_enabled_state(): """Fetch the latest enabled state from ZMQ.""" - _, config_data = config_subscriber.check_for_update() - - if config_data: - config.enabled = config_data.enabled - + config_subscriber.check_for_updates() return config.enabled while not stop_event.is_set(): @@ -167,7 +168,6 @@ def capture_frames( class CameraWatchdog(threading.Thread): def __init__( self, - camera_name, config: CameraConfig, shm_frame_count: int, frame_queue: Queue, @@ -177,13 +177,12 @@ class CameraWatchdog(threading.Thread): stop_event, ): threading.Thread.__init__(self) - self.logger = logging.getLogger(f"watchdog.{camera_name}") - self.camera_name = camera_name + self.logger = logging.getLogger(f"watchdog.{config.name}") self.config = config self.shm_frame_count = shm_frame_count self.capture_thread = None self.ffmpeg_detect_process = None - self.logpipe = LogPipe(f"ffmpeg.{self.camera_name}.detect") + self.logpipe = LogPipe(f"ffmpeg.{self.config.name}.detect") self.ffmpeg_other_processes: list[dict[str, Any]] = [] self.camera_fps = camera_fps self.skipped_fps = skipped_fps @@ -196,16 +195,14 @@ class CameraWatchdog(threading.Thread): self.stop_event = stop_event self.sleeptime = self.config.ffmpeg.retry_interval - self.config_subscriber = ConfigSubscriber(f"config/enabled/{camera_name}", True) + self.config_subscriber = CameraConfigUpdateSubscriber( + {config.name: config}, [CameraConfigUpdateEnum.enabled] + ) self.was_enabled = self.config.enabled def _update_enabled_state(self) -> bool: """Fetch the latest config and update enabled state.""" - _, config_data = self.config_subscriber.check_for_update() - if config_data: - self.config.enabled = config_data.enabled - return config_data.enabled - + self.config_subscriber.check_for_updates() return self.config.enabled def reset_capture_thread( @@ -245,10 +242,10 @@ class CameraWatchdog(threading.Thread): enabled = self._update_enabled_state() if enabled != self.was_enabled: if enabled: - self.logger.debug(f"Enabling camera {self.camera_name}") + self.logger.debug(f"Enabling camera {self.config.name}") self.start_all_ffmpeg() else: - self.logger.debug(f"Disabling camera {self.camera_name}") + self.logger.debug(f"Disabling camera {self.config.name}") self.stop_all_ffmpeg() self.was_enabled = enabled continue @@ -261,7 +258,7 @@ class CameraWatchdog(threading.Thread): if not self.capture_thread.is_alive(): self.camera_fps.value = 0 self.logger.error( - f"Ffmpeg process crashed unexpectedly for {self.camera_name}." + f"Ffmpeg process crashed unexpectedly for {self.config.name}." ) self.reset_capture_thread(terminate=False) elif self.camera_fps.value >= (self.config.detect.fps + 10): @@ -271,13 +268,13 @@ class CameraWatchdog(threading.Thread): self.fps_overflow_count = 0 self.camera_fps.value = 0 self.logger.info( - f"{self.camera_name} exceeded fps limit. Exiting ffmpeg..." + f"{self.config.name} exceeded fps limit. Exiting ffmpeg..." ) self.reset_capture_thread(drain_output=False) elif now - self.capture_thread.current_frame.value > 20: self.camera_fps.value = 0 self.logger.info( - f"No frames received from {self.camera_name} in 20 seconds. Exiting ffmpeg..." + f"No frames received from {self.config.name} in 20 seconds. Exiting ffmpeg..." ) self.reset_capture_thread() else: @@ -299,7 +296,7 @@ class CameraWatchdog(threading.Thread): latest_segment_time + datetime.timedelta(seconds=120) ): self.logger.error( - f"No new recording segments were created for {self.camera_name} in the last 120s. restarting the ffmpeg record process..." + f"No new recording segments were created for {self.config.name} in the last 120s. restarting the ffmpeg record process..." ) p["process"] = start_or_restart_ffmpeg( p["cmd"], @@ -346,13 +343,13 @@ class CameraWatchdog(threading.Thread): def start_all_ffmpeg(self): """Start all ffmpeg processes (detection and others).""" - logger.debug(f"Starting all ffmpeg processes for {self.camera_name}") + logger.debug(f"Starting all ffmpeg processes for {self.config.name}") self.start_ffmpeg_detect() for c in self.config.ffmpeg_cmds: if "detect" in c["roles"]: continue logpipe = LogPipe( - f"ffmpeg.{self.camera_name}.{'_'.join(sorted(c['roles']))}" + f"ffmpeg.{self.config.name}.{'_'.join(sorted(c['roles']))}" ) self.ffmpeg_other_processes.append( { @@ -365,12 +362,12 @@ class CameraWatchdog(threading.Thread): def stop_all_ffmpeg(self): """Stop all ffmpeg processes (detection and others).""" - logger.debug(f"Stopping all ffmpeg processes for {self.camera_name}") + logger.debug(f"Stopping all ffmpeg processes for {self.config.name}") if self.capture_thread is not None and self.capture_thread.is_alive(): self.capture_thread.join(timeout=5) if self.capture_thread.is_alive(): self.logger.warning( - f"Capture thread for {self.camera_name} did not stop gracefully." + f"Capture thread for {self.config.name} did not stop gracefully." ) if self.ffmpeg_detect_process is not None: stop_ffmpeg(self.ffmpeg_detect_process, self.logger) @@ -397,7 +394,7 @@ class CameraWatchdog(threading.Thread): newest_segment_time = latest_segment for file in cache_files: - if self.camera_name in file: + if self.config.name in file: basename = os.path.splitext(file)[0] _, date = basename.rsplit("@", maxsplit=1) segment_time = datetime.datetime.strptime( @@ -454,7 +451,7 @@ class CameraCapture(threading.Thread): def capture_camera( - name, config: CameraConfig, shm_frame_count: int, camera_metrics: CameraMetrics + config: CameraConfig, shm_frame_count: int, camera_metrics: CameraMetrics ): stop_event = mp.Event() @@ -464,11 +461,10 @@ def capture_camera( signal.signal(signal.SIGTERM, receiveSignal) signal.signal(signal.SIGINT, receiveSignal) - threading.current_thread().name = f"capture:{name}" - setproctitle(f"frigate.capture:{name}") + threading.current_thread().name = f"capture:{config.name}" + setproctitle(f"frigate.capture:{config.name}") camera_watchdog = CameraWatchdog( - name, config, shm_frame_count, camera_metrics.frame_queue, @@ -536,7 +532,6 @@ def track_camera( frame_shape, model_config, config, - config.detect, frame_manager, motion_detector, object_detector, @@ -603,7 +598,6 @@ def process_frames( frame_shape: tuple[int, int], model_config: ModelConfig, camera_config: CameraConfig, - detect_config: DetectConfig, frame_manager: FrameManager, motion_detector: MotionDetector, object_detector: RemoteObjectDetector, @@ -618,8 +612,14 @@ def process_frames( exit_on_empty: bool = False, ): next_region_update = get_tomorrow_at_time(2) - detect_config_subscriber = ConfigSubscriber(f"config/detect/{camera_name}", True) - enabled_config_subscriber = ConfigSubscriber(f"config/enabled/{camera_name}", True) + config_subscriber = CameraConfigUpdateSubscriber( + {camera_name: camera_config}, + [ + CameraConfigUpdateEnum.detect, + CameraConfigUpdateEnum.enabled, + CameraConfigUpdateEnum.motion, + ], + ) fps_tracker = EventsPerSecond() fps_tracker.start() @@ -654,11 +654,11 @@ def process_frames( ] while not stop_event.is_set(): - _, updated_enabled_config = enabled_config_subscriber.check_for_update() + updated_configs = config_subscriber.check_for_updates() - if updated_enabled_config: + if "enabled" in updated_configs: prev_enabled = camera_enabled - camera_enabled = updated_enabled_config.enabled + camera_enabled = camera_config.enabled if ( not camera_enabled @@ -686,12 +686,6 @@ def process_frames( time.sleep(0.1) continue - # check for updated detect config - _, updated_detect_config = detect_config_subscriber.check_for_update() - - if updated_detect_config: - detect_config = updated_detect_config - if ( datetime.datetime.now().astimezone(datetime.timezone.utc) > next_region_update @@ -726,14 +720,14 @@ def process_frames( consolidated_detections = [] # if detection is disabled - if not detect_config.enabled: + if not camera_config.detect.enabled: object_tracker.match_and_update(frame_name, frame_time, []) else: # get stationary object ids # check every Nth frame for stationary objects # disappeared objects are not stationary # also check for overlapping motion boxes - if stationary_frame_counter == detect_config.stationary.interval: + if stationary_frame_counter == camera_config.detect.stationary.interval: stationary_frame_counter = 0 stationary_object_ids = [] else: @@ -742,7 +736,8 @@ def process_frames( obj["id"] for obj in object_tracker.tracked_objects.values() # if it has exceeded the stationary threshold - if obj["motionless_count"] >= detect_config.stationary.threshold + if obj["motionless_count"] + >= camera_config.detect.stationary.threshold # and it hasn't disappeared and object_tracker.disappeared[obj["id"]] == 0 # and it doesn't overlap with any current motion boxes when not calibrating @@ -757,7 +752,8 @@ def process_frames( ( # use existing object box for stationary objects obj["estimate"] - if obj["motionless_count"] < detect_config.stationary.threshold + if obj["motionless_count"] + < camera_config.detect.stationary.threshold else obj["box"] ) for obj in object_tracker.tracked_objects.values() @@ -831,7 +827,7 @@ def process_frames( for region in regions: detections.extend( detect( - detect_config, + camera_config.detect, object_detector, frame, model_config, @@ -978,5 +974,4 @@ def process_frames( motion_detector.stop() requestor.stop() - detect_config_subscriber.stop() - enabled_config_subscriber.stop() + config_subscriber.stop() From 4dc526761cfe4b7282adebf2efcc9f4d16b50c46 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Thu, 22 May 2025 20:51:23 -0600 Subject: [PATCH 003/530] Dynamically update masks and zones for cameras (#18359) * Include config publisher in api * Call update topic for passed topics * Update zones dynamically * Update zones internally * Support zone and mask reset * Handle updating objects config * Don't put status for needing to restart Frigate * Cleanup http tests * Fix tests --- frigate/api/app.py | 16 ++- frigate/api/defs/request/app_body.py | 1 + frigate/api/fastapi_app.py | 3 + frigate/app.py | 1 + frigate/config/base.py | 24 +++++ frigate/config/camera/updater.py | 3 + frigate/motion/__init__.py | 12 ++- frigate/motion/improved_motion.py | 15 +-- frigate/test/http_api/base_http_test.py | 1 + frigate/test/test_http.py | 97 +++---------------- frigate/track/object_processing.py | 3 +- frigate/video.py | 14 ++- .../settings/MotionMaskEditPane.tsx | 1 + .../settings/ObjectMaskEditPane.tsx | 1 + web/src/components/settings/ZoneEditPane.tsx | 6 +- web/src/views/settings/MasksAndZonesView.tsx | 20 +--- 16 files changed, 100 insertions(+), 118 deletions(-) diff --git a/frigate/api/app.py b/frigate/api/app.py index f6e9471f2..351518673 100644 --- a/frigate/api/app.py +++ b/frigate/api/app.py @@ -28,6 +28,10 @@ from frigate.api.defs.query.app_query_parameters import AppTimelineHourlyQueryPa from frigate.api.defs.request.app_body import AppConfigSetBody from frigate.api.defs.tags import Tags from frigate.config import FrigateConfig +from frigate.config.camera.updater import ( + CameraConfigUpdateEnum, + CameraConfigUpdateTopic, +) from frigate.models import Event, Timeline from frigate.stats.prometheus import get_metrics, update_metrics from frigate.util.builtin import ( @@ -385,8 +389,18 @@ def config_set(request: Request, body: AppConfigSetBody): status_code=500, ) - if body.requires_restart == 0: + if body.requires_restart == 0 or body.update_topic: request.app.frigate_config = config + + if body.update_topic and body.update_topic.startswith("config/cameras/"): + _, _, camera, field = body.update_topic.split("/") + + settings = config.get_nested_object(body.update_topic) + request.app.config_publisher.publish_update( + CameraConfigUpdateTopic(CameraConfigUpdateEnum[field], camera), + settings, + ) + return JSONResponse( content=( { diff --git a/frigate/api/defs/request/app_body.py b/frigate/api/defs/request/app_body.py index 1fc05db2f..7456a6c77 100644 --- a/frigate/api/defs/request/app_body.py +++ b/frigate/api/defs/request/app_body.py @@ -5,6 +5,7 @@ from pydantic import BaseModel class AppConfigSetBody(BaseModel): requires_restart: int = 1 + update_topic: str | None = None class AppPutPasswordBody(BaseModel): diff --git a/frigate/api/fastapi_app.py b/frigate/api/fastapi_app.py index 0657752dc..1265f3af9 100644 --- a/frigate/api/fastapi_app.py +++ b/frigate/api/fastapi_app.py @@ -26,6 +26,7 @@ from frigate.comms.event_metadata_updater import ( EventMetadataPublisher, ) from frigate.config import FrigateConfig +from frigate.config.camera.updater import CameraConfigUpdatePublisher from frigate.embeddings import EmbeddingsContext from frigate.ptz.onvif import OnvifController from frigate.stats.emitter import StatsEmitter @@ -57,6 +58,7 @@ def create_fastapi_app( onvif: OnvifController, stats_emitter: StatsEmitter, event_metadata_updater: EventMetadataPublisher, + config_publisher: CameraConfigUpdatePublisher, ): logger.info("Starting FastAPI app") app = FastAPI( @@ -127,6 +129,7 @@ def create_fastapi_app( app.onvif = onvif app.stats_emitter = stats_emitter app.event_metadata_updater = event_metadata_updater + app.config_publisher = config_publisher app.jwt_token = get_jwt_secret() if frigate_config.auth.enabled else None return app diff --git a/frigate/app.py b/frigate/app.py index 26c32c8f8..57873383b 100644 --- a/frigate/app.py +++ b/frigate/app.py @@ -679,6 +679,7 @@ class FrigateApp: self.onvif_controller, self.stats_emitter, self.event_metadata_updater, + self.inter_config_updater, ), host="127.0.0.1", port=5001, diff --git a/frigate/config/base.py b/frigate/config/base.py index 068a68acd..1e369e293 100644 --- a/frigate/config/base.py +++ b/frigate/config/base.py @@ -1,5 +1,29 @@ +from typing import Any + from pydantic import BaseModel, ConfigDict class FrigateBaseModel(BaseModel): model_config = ConfigDict(extra="forbid", protected_namespaces=()) + + def get_nested_object(self, path: str) -> Any: + parts = path.split("/") + obj = self + for part in parts: + if part == "config": + continue + + if isinstance(obj, BaseModel): + try: + obj = getattr(obj, part) + except AttributeError: + return None + elif isinstance(obj, dict): + try: + obj = obj[part] + except KeyError: + return None + else: + return None + + return obj diff --git a/frigate/config/camera/updater.py b/frigate/config/camera/updater.py index 5abca57eb..140e02207 100644 --- a/frigate/config/camera/updater.py +++ b/frigate/config/camera/updater.py @@ -17,6 +17,7 @@ class CameraConfigUpdateEnum(str, Enum): enabled = "enabled" motion = "motion" # includes motion and motion masks notifications = "notifications" + objects = "objects" record = "record" review = "review" snapshots = "snapshots" @@ -83,6 +84,8 @@ class CameraConfigUpdateSubscriber: config.motion = updated_config elif update_type == CameraConfigUpdateEnum.notifications: config.notifications = updated_config + elif update_type == CameraConfigUpdateEnum.objects: + config.objects = updated_config elif update_type == CameraConfigUpdateEnum.record: config.record = updated_config elif update_type == CameraConfigUpdateEnum.review: diff --git a/frigate/motion/__init__.py b/frigate/motion/__init__.py index db5f25879..1f6785d5d 100644 --- a/frigate/motion/__init__.py +++ b/frigate/motion/__init__.py @@ -1,6 +1,8 @@ from abc import ABC, abstractmethod from typing import Tuple +from numpy import ndarray + from frigate.config import MotionConfig @@ -18,13 +20,21 @@ class MotionDetector(ABC): pass @abstractmethod - def detect(self, frame): + def detect(self, frame: ndarray) -> list: + """Detect motion and return motion boxes.""" pass @abstractmethod def is_calibrating(self): + """Return if motion is recalibrating.""" + pass + + @abstractmethod + def update_mask(self) -> None: + """Update the motion mask after a config change.""" pass @abstractmethod def stop(self): + """Stop any ongoing work and processes.""" pass diff --git a/frigate/motion/improved_motion.py b/frigate/motion/improved_motion.py index 10818ea70..77eae26a9 100644 --- a/frigate/motion/improved_motion.py +++ b/frigate/motion/improved_motion.py @@ -35,12 +35,7 @@ class ImprovedMotionDetector(MotionDetector): self.avg_frame = np.zeros(self.motion_frame_size, np.float32) self.motion_frame_count = 0 self.frame_counter = 0 - resized_mask = cv2.resize( - config.mask, - dsize=(self.motion_frame_size[1], self.motion_frame_size[0]), - interpolation=cv2.INTER_AREA, - ) - self.mask = np.where(resized_mask == [0]) + self.update_mask() self.save_images = False self.calibrating = True self.blur_radius = blur_radius @@ -236,6 +231,14 @@ class ImprovedMotionDetector(MotionDetector): return motion_boxes + def update_mask(self) -> None: + resized_mask = cv2.resize( + self.config.mask, + dsize=(self.motion_frame_size[1], self.motion_frame_size[0]), + interpolation=cv2.INTER_AREA, + ) + self.mask = np.where(resized_mask == [0]) + def stop(self) -> None: """stop the motion detector.""" pass diff --git a/frigate/test/http_api/base_http_test.py b/frigate/test/http_api/base_http_test.py index 3c4a7ccdc..e758a14dc 100644 --- a/frigate/test/http_api/base_http_test.py +++ b/frigate/test/http_api/base_http_test.py @@ -119,6 +119,7 @@ class BaseTestHttp(unittest.TestCase): None, stats, None, + None, ) def insert_mock_event( diff --git a/frigate/test/test_http.py b/frigate/test/test_http.py index 4d949c543..5761e83aa 100644 --- a/frigate/test/test_http.py +++ b/frigate/test/test_http.py @@ -2,6 +2,7 @@ import datetime import logging import os import unittest +from typing import Any from unittest.mock import Mock from fastapi.testclient import TestClient @@ -112,8 +113,8 @@ class TestHttp(unittest.TestCase): except OSError: pass - def test_get_good_event(self): - app = create_fastapi_app( + def __init_app(self, updater: Any | None = None) -> Any: + return create_fastapi_app( FrigateConfig(**self.minimal_config), self.db, None, @@ -121,8 +122,12 @@ class TestHttp(unittest.TestCase): None, None, None, + updater, None, ) + + def test_get_good_event(self): + app = self.__init_app() id = "123456.random" with TestClient(app) as client: @@ -134,16 +139,7 @@ class TestHttp(unittest.TestCase): assert event["id"] == model_to_dict(Event.get(Event.id == id))["id"] def test_get_bad_event(self): - app = create_fastapi_app( - FrigateConfig(**self.minimal_config), - self.db, - None, - None, - None, - None, - None, - None, - ) + app = self.__init_app() id = "123456.random" bad_id = "654321.other" @@ -154,16 +150,7 @@ class TestHttp(unittest.TestCase): assert event_response.json() == "Event not found" def test_delete_event(self): - app = create_fastapi_app( - FrigateConfig(**self.minimal_config), - self.db, - None, - None, - None, - None, - None, - None, - ) + app = self.__init_app() id = "123456.random" with TestClient(app) as client: @@ -176,16 +163,7 @@ class TestHttp(unittest.TestCase): assert event == "Event not found" def test_event_retention(self): - app = create_fastapi_app( - FrigateConfig(**self.minimal_config), - self.db, - None, - None, - None, - None, - None, - None, - ) + app = self.__init_app() id = "123456.random" with TestClient(app) as client: @@ -202,16 +180,7 @@ class TestHttp(unittest.TestCase): assert event["retain_indefinitely"] is False def test_event_time_filtering(self): - app = create_fastapi_app( - FrigateConfig(**self.minimal_config), - self.db, - None, - None, - None, - None, - None, - None, - ) + app = self.__init_app() morning_id = "123456.random" evening_id = "654321.random" morning = 1656590400 # 06/30/2022 6 am (GMT) @@ -241,16 +210,7 @@ class TestHttp(unittest.TestCase): def test_set_delete_sub_label(self): mock_event_updater = Mock(spec=EventMetadataPublisher) - app = create_fastapi_app( - FrigateConfig(**self.minimal_config), - self.db, - None, - None, - None, - None, - None, - mock_event_updater, - ) + app = app = self.__init_app(updater=mock_event_updater) id = "123456.random" sub_label = "sub" @@ -286,16 +246,7 @@ class TestHttp(unittest.TestCase): def test_sub_label_list(self): mock_event_updater = Mock(spec=EventMetadataPublisher) - app = create_fastapi_app( - FrigateConfig(**self.minimal_config), - self.db, - None, - None, - None, - None, - None, - mock_event_updater, - ) + app = self.__init_app(updater=mock_event_updater) id = "123456.random" sub_label = "sub" @@ -318,16 +269,7 @@ class TestHttp(unittest.TestCase): assert sub_labels == [sub_label] def test_config(self): - app = create_fastapi_app( - FrigateConfig(**self.minimal_config), - self.db, - None, - None, - None, - None, - None, - None, - ) + app = self.__init_app() with TestClient(app) as client: config = client.get("/config").json() @@ -335,16 +277,7 @@ class TestHttp(unittest.TestCase): assert config["cameras"]["front_door"] def test_recordings(self): - app = create_fastapi_app( - FrigateConfig(**self.minimal_config), - self.db, - None, - None, - None, - None, - None, - None, - ) + app = self.__init_app() id = "123456.random" with TestClient(app) as client: diff --git a/frigate/track/object_processing.py b/frigate/track/object_processing.py index 1bc2c40fc..e25c83815 100644 --- a/frigate/track/object_processing.py +++ b/frigate/track/object_processing.py @@ -71,7 +71,8 @@ class TrackedObjectProcessor(threading.Thread): self.ptz_autotracker_thread = ptz_autotracker_thread self.config_subscriber = CameraConfigUpdateSubscriber( - self.config.cameras, [CameraConfigUpdateEnum.enabled] + self.config.cameras, + [CameraConfigUpdateEnum.enabled, CameraConfigUpdateEnum.zones], ) self.requestor = InterProcessRequestor() diff --git a/frigate/video.py b/frigate/video.py index 95fbf2267..5012c31c6 100755 --- a/frigate/video.py +++ b/frigate/video.py @@ -504,8 +504,6 @@ def track_camera( frame_queue = camera_metrics.frame_queue frame_shape = config.frame_shape - objects_to_track = config.objects.track - object_filters = config.objects.filters motion_detector = ImprovedMotionDetector( frame_shape, @@ -538,8 +536,6 @@ def track_camera( object_tracker, detected_objects_queue, camera_metrics, - objects_to_track, - object_filters, stop_event, ptz_metrics, region_grid, @@ -604,8 +600,6 @@ def process_frames( object_tracker: ObjectTracker, detected_objects_queue: Queue, camera_metrics: CameraMetrics, - objects_to_track: list[str], - object_filters, stop_event: MpEvent, ptz_metrics: PTZMetrics, region_grid: list[list[dict[str, Any]]], @@ -618,6 +612,7 @@ def process_frames( CameraConfigUpdateEnum.detect, CameraConfigUpdateEnum.enabled, CameraConfigUpdateEnum.motion, + CameraConfigUpdateEnum.objects, ], ) @@ -660,6 +655,9 @@ def process_frames( prev_enabled = camera_enabled camera_enabled = camera_config.enabled + if "motion" in updated_configs: + motion_detector.update_mask() + if ( not camera_enabled and prev_enabled != camera_enabled @@ -832,8 +830,8 @@ def process_frames( frame, model_config, region, - objects_to_track, - object_filters, + camera_config.objects.track, + camera_config.objects.filters, ) ) diff --git a/web/src/components/settings/MotionMaskEditPane.tsx b/web/src/components/settings/MotionMaskEditPane.tsx index bdfc12c3d..b59005d0b 100644 --- a/web/src/components/settings/MotionMaskEditPane.tsx +++ b/web/src/components/settings/MotionMaskEditPane.tsx @@ -163,6 +163,7 @@ export default function MotionMaskEditPane({ axios .put(`config/set?${queryString}`, { requires_restart: 0, + update_topic: `config/cameras/${polygon.camera}/motion`, }) .then((res) => { if (res.status === 200) { diff --git a/web/src/components/settings/ObjectMaskEditPane.tsx b/web/src/components/settings/ObjectMaskEditPane.tsx index 8fc1b6338..c898531c4 100644 --- a/web/src/components/settings/ObjectMaskEditPane.tsx +++ b/web/src/components/settings/ObjectMaskEditPane.tsx @@ -196,6 +196,7 @@ export default function ObjectMaskEditPane({ axios .put(`config/set?${queryString}`, { requires_restart: 0, + update_topic: `config/cameras/${polygon.camera}/objects`, }) .then((res) => { if (res.status === 200) { diff --git a/web/src/components/settings/ZoneEditPane.tsx b/web/src/components/settings/ZoneEditPane.tsx index bc93fa781..17029728b 100644 --- a/web/src/components/settings/ZoneEditPane.tsx +++ b/web/src/components/settings/ZoneEditPane.tsx @@ -329,6 +329,7 @@ export default function ZoneEditPane({ `config/set?cameras.${polygon.camera}.zones.${polygon.name}${renameAlertQueries}${renameDetectionQueries}`, { requires_restart: 0, + update_topic: `config/cameras/${polygon.camera}/zones`, }, ); @@ -412,7 +413,10 @@ export default function ZoneEditPane({ axios .put( `config/set?cameras.${polygon?.camera}.zones.${zoneName}.coordinates=${coordinates}${inertiaQuery}${loiteringTimeQuery}${speedThresholdQuery}${distancesQuery}${objectQueries}${alertQueries}${detectionQueries}`, - { requires_restart: 0 }, + { + requires_restart: 0, + update_topic: `config/cameras/${polygon.camera}/zones`, + }, ) .then((res) => { if (res.status === 200) { diff --git a/web/src/views/settings/MasksAndZonesView.tsx b/web/src/views/settings/MasksAndZonesView.tsx index 5cd083f6c..c9ba9971e 100644 --- a/web/src/views/settings/MasksAndZonesView.tsx +++ b/web/src/views/settings/MasksAndZonesView.tsx @@ -1,14 +1,7 @@ import { FrigateConfig } from "@/types/frigateConfig"; import useSWR from "swr"; import ActivityIndicator from "@/components/indicators/activity-indicator"; -import { - useCallback, - useContext, - useEffect, - useMemo, - useRef, - useState, -} from "react"; +import { useCallback, useEffect, useMemo, useRef, useState } from "react"; import { PolygonCanvas } from "@/components/settings/PolygonCanvas"; import { Polygon, PolygonType } from "@/types/canvas"; import { interpolatePoints, parseCoordinates } from "@/utils/canvasUtil"; @@ -36,7 +29,6 @@ import ObjectMaskEditPane from "@/components/settings/ObjectMaskEditPane"; import PolygonItem from "@/components/settings/PolygonItem"; import { Link } from "react-router-dom"; import { isDesktop } from "react-device-detect"; -import { StatusBarMessagesContext } from "@/context/statusbar-provider"; import { useSearchEffect } from "@/hooks/use-overlay-state"; import { useTranslation } from "react-i18next"; @@ -72,8 +64,6 @@ export default function MasksAndZonesView({ const [activeLine, setActiveLine] = useState(); const [snapPoints, setSnapPoints] = useState(false); - const { addMessage } = useContext(StatusBarMessagesContext)!; - const cameraConfig = useMemo(() => { if (config && selectedCamera) { return config.cameras[selectedCamera]; @@ -196,13 +186,7 @@ export default function MasksAndZonesView({ setAllPolygons([...(editingPolygons ?? [])]); setHoveredPolygonIndex(null); setUnsavedChanges(false); - addMessage( - "masks_zones", - t("masksAndZones.restart_required"), - undefined, - "masks_zones", - ); - }, [t, editingPolygons, setUnsavedChanges, addMessage]); + }, [editingPolygons, setUnsavedChanges]); useEffect(() => { if (isLoading) { From e1340443f50287cd137a3e1a49c4024da7c940ed Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Fri, 23 May 2025 08:46:53 -0600 Subject: [PATCH 004/530] Initial custom classification model config support (#18362) * Add basic config for defining a teachable machine model * Add model type * Add basic config for teachable machine models * Adjust config for state and object * Use config to process * Correctly check for objects * Remove debug * Rename to not be teachable machine specific * Cleanup --- frigate/config/classification.py | 27 +++ .../real_time/custom_classification.py | 178 ++++++++++++++++++ frigate/embeddings/maintainer.py | 29 ++- 3 files changed, 229 insertions(+), 5 deletions(-) create mode 100644 frigate/data_processing/real_time/custom_classification.py diff --git a/frigate/config/classification.py b/frigate/config/classification.py index 06e69a774..134316148 100644 --- a/frigate/config/classification.py +++ b/frigate/config/classification.py @@ -34,10 +34,37 @@ class BirdClassificationConfig(FrigateBaseModel): ) +class CustomClassificationStateCameraConfig(FrigateBaseModel): + crop: list[int, int, int, int] = Field( + title="Crop of image frame on this camera to run classification on." + ) + + +class CustomClassificationStateConfig(FrigateBaseModel): + cameras: Dict[str, CustomClassificationStateCameraConfig] = Field( + title="Cameras to run classification on." + ) + + +class CustomClassificationObjectConfig(FrigateBaseModel): + objects: list[str] = Field(title="Object types to classify.") + + +class CustomClassificationConfig(FrigateBaseModel): + enabled: bool = Field(default=True, title="Enable running the model.") + model_path: str = Field(title="Path to custom classification tflite model.") + labelmap_path: str = Field(title="Path to custom classification model labelmap.") + object_config: CustomClassificationObjectConfig | None = Field(default=None) + state_config: CustomClassificationStateConfig | None = Field(default=None) + + class ClassificationConfig(FrigateBaseModel): bird: BirdClassificationConfig = Field( default_factory=BirdClassificationConfig, title="Bird classification config." ) + custom: Dict[str, CustomClassificationConfig] = Field( + default={}, title="Custom Classification Model Configs." + ) class SemanticSearchConfig(FrigateBaseModel): diff --git a/frigate/data_processing/real_time/custom_classification.py b/frigate/data_processing/real_time/custom_classification.py new file mode 100644 index 000000000..1848968bb --- /dev/null +++ b/frigate/data_processing/real_time/custom_classification.py @@ -0,0 +1,178 @@ +"""Real time processor that works with classification tflite models.""" + +import logging +from typing import Any + +import cv2 +import numpy as np + +from frigate.comms.event_metadata_updater import ( + EventMetadataPublisher, + EventMetadataTypeEnum, +) +from frigate.config import FrigateConfig +from frigate.config.classification import CustomClassificationConfig +from frigate.util.builtin import load_labels +from frigate.util.object import calculate_region + +from ..types import DataProcessorMetrics +from .api import RealTimeProcessorApi + +try: + from tflite_runtime.interpreter import Interpreter +except ModuleNotFoundError: + from tensorflow.lite.python.interpreter import Interpreter + +logger = logging.getLogger(__name__) + + +class CustomStateClassificationProcessor(RealTimeProcessorApi): + def __init__( + self, + config: FrigateConfig, + model_config: CustomClassificationConfig, + metrics: DataProcessorMetrics, + ): + super().__init__(config, metrics) + self.model_config = model_config + self.interpreter: Interpreter = None + self.tensor_input_details: dict[str, Any] = None + self.tensor_output_details: dict[str, Any] = None + self.labelmap: dict[int, str] = {} + self.__build_detector() + + def __build_detector(self) -> None: + self.interpreter = Interpreter( + model_path=self.model_config.model_path, + num_threads=2, + ) + self.interpreter.allocate_tensors() + self.tensor_input_details = self.interpreter.get_input_details() + self.tensor_output_details = self.interpreter.get_output_details() + self.labelmap = load_labels(self.model_config.labelmap_path, prefill=0) + + def process_frame(self, frame_data: dict[str, Any], frame: np.ndarray): + camera = frame_data.get("camera") + if camera not in self.model_config.state_config.cameras: + return + + camera_config = self.model_config.state_config.cameras[camera] + x, y, x2, y2 = calculate_region( + frame.shape, + camera_config.crop[0], + camera_config.crop[1], + camera_config.crop[2], + camera_config.crop[3], + 224, + 1.0, + ) + + rgb = cv2.cvtColor(frame, cv2.COLOR_YUV2RGB_I420) + input = rgb[ + y:y2, + x:x2, + ] + + if input.shape != (224, 224): + input = cv2.resize(input, (224, 224)) + + input = np.expand_dims(input, axis=0) + self.interpreter.set_tensor(self.tensor_input_details[0]["index"], input) + self.interpreter.invoke() + res: np.ndarray = self.interpreter.get_tensor( + self.tensor_output_details[0]["index"] + )[0] + print(f"the gate res is {res}") + probs = res / res.sum(axis=0) + best_id = np.argmax(probs) + score = round(probs[best_id], 2) + + print(f"got {self.labelmap[best_id]} with score {score}") + + def handle_request(self, topic, request_data): + return None + + def expire_object(self, object_id, camera): + pass + + +class CustomObjectClassificationProcessor(RealTimeProcessorApi): + def __init__( + self, + config: FrigateConfig, + model_config: CustomClassificationConfig, + sub_label_publisher: EventMetadataPublisher, + metrics: DataProcessorMetrics, + ): + super().__init__(config, metrics) + self.model_config = model_config + self.interpreter: Interpreter = None + self.sub_label_publisher = sub_label_publisher + self.tensor_input_details: dict[str, Any] = None + self.tensor_output_details: dict[str, Any] = None + self.detected_objects: dict[str, float] = {} + self.labelmap: dict[int, str] = {} + self.__build_detector() + + def __build_detector(self) -> None: + self.interpreter = Interpreter( + model_path=self.model_config.model_path, + num_threads=2, + ) + self.interpreter.allocate_tensors() + self.tensor_input_details = self.interpreter.get_input_details() + self.tensor_output_details = self.interpreter.get_output_details() + self.labelmap = load_labels(self.model_config.labelmap_path, prefill=0) + + def process_frame(self, obj_data, frame): + if obj_data["label"] not in self.model_config.object_config.objects: + return + + x, y, x2, y2 = calculate_region( + frame.shape, + obj_data["box"][0], + obj_data["box"][1], + obj_data["box"][2], + obj_data["box"][3], + 224, + 1.0, + ) + + rgb = cv2.cvtColor(frame, cv2.COLOR_YUV2RGB_I420) + input = rgb[ + y:y2, + x:x2, + ] + + if input.shape != (224, 224): + input = cv2.resize(input, (224, 224)) + + input = np.expand_dims(input, axis=0) + self.interpreter.set_tensor(self.tensor_input_details[0]["index"], input) + self.interpreter.invoke() + res: np.ndarray = self.interpreter.get_tensor( + self.tensor_output_details[0]["index"] + )[0] + probs = res / res.sum(axis=0) + best_id = np.argmax(probs) + + score = round(probs[best_id], 2) + + previous_score = self.detected_objects.get(obj_data["id"], 0.0) + + if score <= previous_score: + logger.debug(f"Score {score} is worse than previous score {previous_score}") + return + + self.sub_label_publisher.publish( + EventMetadataTypeEnum.sub_label, + (obj_data["id"], self.labelmap[best_id], score), + ) + self.detected_objects[obj_data["id"]] = score + + def handle_request(self, topic, request_data): + return None + + def expire_object(self, object_id, camera): + if object_id in self.detected_objects: + self.detected_objects.pop(object_id) diff --git a/frigate/embeddings/maintainer.py b/frigate/embeddings/maintainer.py index 86bc75737..9838f4a21 100644 --- a/frigate/embeddings/maintainer.py +++ b/frigate/embeddings/maintainer.py @@ -42,6 +42,10 @@ from frigate.data_processing.post.license_plate import ( ) from frigate.data_processing.real_time.api import RealTimeProcessorApi from frigate.data_processing.real_time.bird import BirdRealTimeProcessor +from frigate.data_processing.real_time.custom_classification import ( + CustomObjectClassificationProcessor, + CustomStateClassificationProcessor, +) from frigate.data_processing.real_time.face import FaceRealTimeProcessor from frigate.data_processing.real_time.license_plate import ( LicensePlateRealTimeProcessor, @@ -143,6 +147,18 @@ class EmbeddingMaintainer(threading.Thread): ) ) + for model in self.config.classification.custom.values(): + self.realtime_processors.append( + CustomStateClassificationProcessor(self.config, model, self.metrics) + if model.state_config != None + else CustomObjectClassificationProcessor( + self.config, + model, + self.event_metadata_publisher, + self.metrics, + ) + ) + # post processors self.post_processors: list[PostProcessorApi] = [] @@ -172,7 +188,7 @@ class EmbeddingMaintainer(threading.Thread): self._process_requests() self._process_updates() self._process_recordings_updates() - self._process_dedicated_lpr() + self._process_frame_updates() self._expire_dedicated_lpr() self._process_finalized() self._process_event_metadata() @@ -449,7 +465,7 @@ class EmbeddingMaintainer(threading.Thread): event_id, RegenerateDescriptionEnum(source) ) - def _process_dedicated_lpr(self) -> None: + def _process_frame_updates(self) -> None: """Process event updates""" (topic, data) = self.detection_subscriber.check_for_update() @@ -458,7 +474,7 @@ class EmbeddingMaintainer(threading.Thread): camera, frame_name, _, _, motion_boxes, _ = data - if not camera or not self.config.lpr.enabled or len(motion_boxes) == 0: + if not camera or len(motion_boxes) == 0: return camera_config = self.config.cameras[camera] @@ -466,8 +482,8 @@ class EmbeddingMaintainer(threading.Thread): if ( camera_config.type != CameraTypeEnum.lpr or "license_plate" in camera_config.objects.track - ): - # we're not a dedicated lpr camera or we are one but we're using frigate+ + ) and len(self.config.classification.custom) == 0: + # no active features that use this data return try: @@ -487,6 +503,9 @@ class EmbeddingMaintainer(threading.Thread): if isinstance(processor, LicensePlateRealTimeProcessor): processor.process_frame(camera, yuv_frame, True) + if isinstance(processor, CustomStateClassificationProcessor): + processor.process_frame({"camera": camera}, yuv_frame) + self.frame_manager.close(frame_name) def _create_thumbnail(self, yuv_frame, box, height=500) -> Optional[bytes]: From 4ebc4f6d2122d7301d160dba42ccf099428233ed Mon Sep 17 00:00:00 2001 From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> Date: Fri, 23 May 2025 09:55:48 -0500 Subject: [PATCH 005/530] Implement support for no recordings indicator on timeline (#18363) * Indicate no recordings on the history timeline with gray hash marks This commit includes a new backend API endpoint and the frontend changes needed to support this functionality * don't show slashes for now --- .../api/defs/query/media_query_parameters.py | 10 ++- frigate/api/media.py | 64 ++++++++++++++++++- .../timeline/MotionReviewTimeline.tsx | 15 +++++ web/src/components/timeline/MotionSegment.tsx | 8 +++ .../timeline/VirtualizedMotionSegments.tsx | 6 ++ web/src/views/recording/RecordingView.tsx | 17 ++++- web/tailwind.config.cjs | 4 ++ 7 files changed, 121 insertions(+), 3 deletions(-) diff --git a/frigate/api/defs/query/media_query_parameters.py b/frigate/api/defs/query/media_query_parameters.py index 4750d3277..cf06c71e1 100644 --- a/frigate/api/defs/query/media_query_parameters.py +++ b/frigate/api/defs/query/media_query_parameters.py @@ -1,7 +1,8 @@ from enum import Enum -from typing import Optional +from typing import Optional, Union from pydantic import BaseModel +from pydantic.json_schema import SkipJsonSchema class Extension(str, Enum): @@ -46,3 +47,10 @@ class MediaMjpegFeedQueryParams(BaseModel): class MediaRecordingsSummaryQueryParams(BaseModel): timezone: str = "utc" cameras: Optional[str] = "all" + + +class MediaRecordingsAvailabilityQueryParams(BaseModel): + cameras: str = "all" + before: Union[float, SkipJsonSchema[None]] = None + after: Union[float, SkipJsonSchema[None]] = None + scale: int = 30 diff --git a/frigate/api/media.py b/frigate/api/media.py index b4db46d38..77de2aefe 100644 --- a/frigate/api/media.py +++ b/frigate/api/media.py @@ -8,6 +8,7 @@ import os import subprocess as sp import time from datetime import datetime, timedelta, timezone +from functools import reduce from pathlib import Path as FilePath from typing import Any from urllib.parse import unquote @@ -19,7 +20,7 @@ from fastapi import APIRouter, Path, Query, Request, Response from fastapi.params import Depends from fastapi.responses import FileResponse, JSONResponse, StreamingResponse from pathvalidate import sanitize_filename -from peewee import DoesNotExist, fn +from peewee import DoesNotExist, fn, operator from tzlocal import get_localzone_name from frigate.api.defs.query.media_query_parameters import ( @@ -27,6 +28,7 @@ from frigate.api.defs.query.media_query_parameters import ( MediaEventsSnapshotQueryParams, MediaLatestFrameQueryParams, MediaMjpegFeedQueryParams, + MediaRecordingsAvailabilityQueryParams, MediaRecordingsSummaryQueryParams, ) from frigate.api.defs.tags import Tags @@ -542,6 +544,66 @@ def recordings( return JSONResponse(content=list(recordings)) +@router.get("/recordings/unavailable", response_model=list[dict]) +def no_recordings(params: MediaRecordingsAvailabilityQueryParams = Depends()): + """Get time ranges with no recordings.""" + cameras = params.cameras + before = params.before or datetime.datetime.now().timestamp() + after = ( + params.after + or (datetime.datetime.now() - datetime.timedelta(hours=1)).timestamp() + ) + scale = params.scale + + clauses = [(Recordings.start_time > after) & (Recordings.end_time < before)] + if cameras != "all": + camera_list = cameras.split(",") + clauses.append((Recordings.camera << camera_list)) + + # Get recording start times + data: list[Recordings] = ( + Recordings.select(Recordings.start_time, Recordings.end_time) + .where(reduce(operator.and_, clauses)) + .order_by(Recordings.start_time.asc()) + .dicts() + .iterator() + ) + + # Convert recordings to list of (start, end) tuples + recordings = [(r["start_time"], r["end_time"]) for r in data] + + # Generate all time segments + current = after + no_recording_segments = [] + current_start = None + + while current < before: + segment_end = current + scale + # Check if segment overlaps with any recording + has_recording = any( + start <= segment_end and end >= current for start, end in recordings + ) + if not has_recording: + if current_start is None: + current_start = current # Start a new gap + else: + if current_start is not None: + # End the current gap and append it + no_recording_segments.append( + {"start_time": int(current_start), "end_time": int(current)} + ) + current_start = None + current = segment_end + + # Append the last gap if it exists + if current_start is not None: + no_recording_segments.append( + {"start_time": int(current_start), "end_time": int(before)} + ) + + return JSONResponse(content=no_recording_segments) + + @router.get( "/{camera_name}/start/{start_ts}/end/{end_ts}/clip.mp4", description="For iOS devices, use the master.m3u8 HLS link instead of clip.mp4. Safari does not reliably process progressive mp4 files.", diff --git a/web/src/components/timeline/MotionReviewTimeline.tsx b/web/src/components/timeline/MotionReviewTimeline.tsx index c8ef5ea75..662ccf150 100644 --- a/web/src/components/timeline/MotionReviewTimeline.tsx +++ b/web/src/components/timeline/MotionReviewTimeline.tsx @@ -17,6 +17,7 @@ import { VirtualizedMotionSegments, VirtualizedMotionSegmentsRef, } from "./VirtualizedMotionSegments"; +import { RecordingSegment } from "@/types/record"; export type MotionReviewTimelineProps = { segmentDuration: number; @@ -38,6 +39,7 @@ export type MotionReviewTimelineProps = { setExportEndTime?: React.Dispatch>; events: ReviewSegment[]; motion_events: MotionData[]; + noRecordingRanges?: RecordingSegment[]; contentRef: RefObject; timelineRef?: RefObject; onHandlebarDraggingChange?: (isDragging: boolean) => void; @@ -66,6 +68,7 @@ export function MotionReviewTimeline({ setExportEndTime, events, motion_events, + noRecordingRanges, contentRef, timelineRef, onHandlebarDraggingChange, @@ -97,6 +100,17 @@ export function MotionReviewTimeline({ motion_events, ); + const getRecordingAvailability = useCallback( + (time: number): boolean | undefined => { + if (!noRecordingRanges?.length) return undefined; + + return !noRecordingRanges.some( + (range) => time >= range.start_time && time < range.end_time, + ); + }, + [noRecordingRanges], + ); + const segmentTimes = useMemo(() => { const segments = []; let segmentTime = timelineStartAligned; @@ -206,6 +220,7 @@ export function MotionReviewTimeline({ dense={dense} motionOnly={motionOnly} getMotionSegmentValue={getMotionSegmentValue} + getRecordingAvailability={getRecordingAvailability} /> ); diff --git a/web/src/components/timeline/MotionSegment.tsx b/web/src/components/timeline/MotionSegment.tsx index fa6fdbd80..d87bfdda3 100644 --- a/web/src/components/timeline/MotionSegment.tsx +++ b/web/src/components/timeline/MotionSegment.tsx @@ -15,6 +15,7 @@ type MotionSegmentProps = { timestampSpread: number; firstHalfMotionValue: number; secondHalfMotionValue: number; + hasRecording?: boolean; motionOnly: boolean; showMinimap: boolean; minimapStartTime?: number; @@ -31,6 +32,7 @@ export function MotionSegment({ timestampSpread, firstHalfMotionValue, secondHalfMotionValue, + hasRecording, motionOnly, showMinimap, minimapStartTime, @@ -176,6 +178,12 @@ export function MotionSegment({ segmentClasses, severity[0] && "bg-gradient-to-r", severity[0] && severityColorsBg[severity[0]], + // TODO: will update this for 0.17 + false && + hasRecording == false && + firstHalfMotionValue == 0 && + secondHalfMotionValue == 0 && + "bg-slashes", )} onClick={segmentClick} onTouchEnd={(event) => handleTouchStart(event, segmentClick)} diff --git a/web/src/components/timeline/VirtualizedMotionSegments.tsx b/web/src/components/timeline/VirtualizedMotionSegments.tsx index 3aed75266..fc7a8224f 100644 --- a/web/src/components/timeline/VirtualizedMotionSegments.tsx +++ b/web/src/components/timeline/VirtualizedMotionSegments.tsx @@ -24,6 +24,7 @@ type VirtualizedMotionSegmentsProps = { dense: boolean; motionOnly: boolean; getMotionSegmentValue: (timestamp: number) => number; + getRecordingAvailability: (timestamp: number) => boolean | undefined; }; export interface VirtualizedMotionSegmentsRef { @@ -55,6 +56,7 @@ export const VirtualizedMotionSegments = forwardRef< dense, motionOnly, getMotionSegmentValue, + getRecordingAvailability, }, ref, ) => { @@ -154,6 +156,8 @@ export const VirtualizedMotionSegments = forwardRef< (item.end_time ?? segmentTime) >= motionEnd), ); + const hasRecording = getRecordingAvailability(segmentTime); + if ((!segmentMotion || overlappingReviewItems) && motionOnly) { return null; // Skip rendering this segment in motion only mode } @@ -172,6 +176,7 @@ export const VirtualizedMotionSegments = forwardRef< events={events} firstHalfMotionValue={firstHalfMotionValue} secondHalfMotionValue={secondHalfMotionValue} + hasRecording={hasRecording} segmentDuration={segmentDuration} segmentTime={segmentTime} timestampSpread={timestampSpread} @@ -189,6 +194,7 @@ export const VirtualizedMotionSegments = forwardRef< [ events, getMotionSegmentValue, + getRecordingAvailability, motionOnly, segmentDuration, showMinimap, diff --git a/web/src/views/recording/RecordingView.tsx b/web/src/views/recording/RecordingView.tsx index 7cc06da41..f8ebe6121 100644 --- a/web/src/views/recording/RecordingView.tsx +++ b/web/src/views/recording/RecordingView.tsx @@ -43,7 +43,11 @@ import Logo from "@/components/Logo"; import { Skeleton } from "@/components/ui/skeleton"; import { FaVideo } from "react-icons/fa"; import { VideoResolutionType } from "@/types/live"; -import { ASPECT_VERTICAL_LAYOUT, ASPECT_WIDE_LAYOUT } from "@/types/record"; +import { + ASPECT_VERTICAL_LAYOUT, + ASPECT_WIDE_LAYOUT, + RecordingSegment, +} from "@/types/record"; import { useResizeObserver } from "@/hooks/resize-observer"; import { cn } from "@/lib/utils"; import { useFullscreen } from "@/hooks/use-fullscreen"; @@ -808,6 +812,16 @@ function Timeline({ }, ]); + const { data: noRecordings } = useSWR([ + "recordings/unavailable", + { + before: timeRange.before, + after: timeRange.after, + scale: Math.round(zoomSettings.segmentDuration / 2), + cameras: mainCamera, + }, + ]); + const [exportStart, setExportStartTime] = useState(0); const [exportEnd, setExportEndTime] = useState(0); @@ -853,6 +867,7 @@ function Timeline({ setHandlebarTime={setCurrentTime} events={mainCameraReviewItems} motion_events={motionData ?? []} + noRecordingRanges={noRecordings ?? []} contentRef={contentRef} onHandlebarDraggingChange={(scrubbing) => setScrubbing(scrubbing)} isZooming={isZooming} diff --git a/web/tailwind.config.cjs b/web/tailwind.config.cjs index 92d88c589..27ed5ba74 100644 --- a/web/tailwind.config.cjs +++ b/web/tailwind.config.cjs @@ -42,6 +42,10 @@ module.exports = { wide: "32 / 9", tall: "8 / 9", }, + backgroundImage: { + slashes: + "repeating-linear-gradient(45deg, hsl(var(--primary-variant) / 0.2), hsl(var(--primary-variant) / 0.2) 2px, transparent 2px, transparent 8px)", + }, colors: { border: "hsl(var(--border))", input: "hsl(var(--input))", From 53ff33135b761fd5868ca81c7594617a59c3b4cb Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Fri, 23 May 2025 11:05:04 -0600 Subject: [PATCH 006/530] Update ROCm to 6.4.1 (#18364) * Update rocm to 6.4.1 * Quick fix --- docker/rocm/Dockerfile | 8 ++++---- docker/rocm/requirements-wheels-rocm.txt | 2 +- docker/rocm/rocm.hcl | 2 +- frigate/comms/dispatcher.py | 5 ++++- 4 files changed, 10 insertions(+), 7 deletions(-) diff --git a/docker/rocm/Dockerfile b/docker/rocm/Dockerfile index 752b2d7cc..3bc28cae8 100644 --- a/docker/rocm/Dockerfile +++ b/docker/rocm/Dockerfile @@ -2,7 +2,7 @@ # https://askubuntu.com/questions/972516/debian-frontend-environment-variable ARG DEBIAN_FRONTEND=noninteractive -ARG ROCM=6.4.0 +ARG ROCM=1 ARG AMDGPU=gfx900 ARG HSA_OVERRIDE_GFX_VERSION ARG HSA_OVERRIDE @@ -13,12 +13,12 @@ FROM wget AS rocm ARG ROCM ARG AMDGPU -RUN apt update && \ +RUN apt update -qq && \ apt install -y wget gpg && \ - wget -O rocm.deb https://repo.radeon.com/amdgpu-install/6.4/ubuntu/jammy/amdgpu-install_6.4.60400-1_all.deb && \ + wget -O rocm.deb https://repo.radeon.com/amdgpu-install/6.4.1/ubuntu/jammy/amdgpu-install_6.4.60401-1_all.deb && \ apt install -y ./rocm.deb && \ apt update && \ - apt install -y rocm + apt install -qq -y rocm RUN mkdir -p /opt/rocm-dist/opt/rocm-$ROCM/lib RUN cd /opt/rocm-$ROCM/lib && \ diff --git a/docker/rocm/requirements-wheels-rocm.txt b/docker/rocm/requirements-wheels-rocm.txt index 1cdeaed20..21aebf4bd 100644 --- a/docker/rocm/requirements-wheels-rocm.txt +++ b/docker/rocm/requirements-wheels-rocm.txt @@ -1 +1 @@ -onnxruntime-rocm @ https://github.com/NickM-27/frigate-onnxruntime-rocm/releases/download/v6.4.0/onnxruntime_rocm-1.21.1-cp311-cp311-linux_x86_64.whl \ No newline at end of file +onnxruntime-rocm @ https://github.com/NickM-27/frigate-onnxruntime-rocm/releases/download/v6.4.1/onnxruntime_rocm-1.21.1-cp311-cp311-linux_x86_64.whl \ No newline at end of file diff --git a/docker/rocm/rocm.hcl b/docker/rocm/rocm.hcl index 82ab58782..0745a9f3d 100644 --- a/docker/rocm/rocm.hcl +++ b/docker/rocm/rocm.hcl @@ -2,7 +2,7 @@ variable "AMDGPU" { default = "gfx900" } variable "ROCM" { - default = "6.4.0" + default = "6.4.1" } variable "HSA_OVERRIDE_GFX_VERSION" { default = "" diff --git a/frigate/comms/dispatcher.py b/frigate/comms/dispatcher.py index 33f3ec158..c94ce690f 100644 --- a/frigate/comms/dispatcher.py +++ b/frigate/comms/dispatcher.py @@ -289,7 +289,10 @@ class Dispatcher: logger.info(f"Turning off detection for {camera_name}") detect_settings.enabled = False - self.config_updater.publish(f"config/detect/{camera_name}", detect_settings) + self.config_updater.publish_update( + CameraConfigUpdateTopic(CameraConfigUpdateEnum.detect, camera_name), + detect_settings, + ) self.publish(f"{camera_name}/detect/state", payload, retain=True) def _on_enabled_command(self, camera_name: str, payload: str) -> None: From 723553edb725cec5c7bb0384eac1a7e44e3aeba0 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Sat, 24 May 2025 10:18:46 -0600 Subject: [PATCH 007/530] Add ability to configure when custom classification models run (#18380) * Add config to control when classification models are run * Cleanup --- frigate/config/classification.py | 12 +++++ .../real_time/custom_classification.py | 49 +++++++++++++++++-- frigate/embeddings/maintainer.py | 14 ++++-- 3 files changed, 65 insertions(+), 10 deletions(-) diff --git a/frigate/config/classification.py b/frigate/config/classification.py index 134316148..d809ae18a 100644 --- a/frigate/config/classification.py +++ b/frigate/config/classification.py @@ -38,12 +38,24 @@ class CustomClassificationStateCameraConfig(FrigateBaseModel): crop: list[int, int, int, int] = Field( title="Crop of image frame on this camera to run classification on." ) + threshold: float = Field( + default=0.8, title="Classification score threshold to change the state." + ) class CustomClassificationStateConfig(FrigateBaseModel): cameras: Dict[str, CustomClassificationStateCameraConfig] = Field( title="Cameras to run classification on." ) + motion: bool = Field( + default=False, + title="If classification should be run when motion is detected in the crop.", + ) + interval: int | None = Field( + default=None, + title="Interval to run classification on in seconds.", + gt=0, + ) class CustomClassificationObjectConfig(FrigateBaseModel): diff --git a/frigate/data_processing/real_time/custom_classification.py b/frigate/data_processing/real_time/custom_classification.py index 1848968bb..cd99508c9 100644 --- a/frigate/data_processing/real_time/custom_classification.py +++ b/frigate/data_processing/real_time/custom_classification.py @@ -1,5 +1,6 @@ """Real time processor that works with classification tflite models.""" +import datetime import logging from typing import Any @@ -10,10 +11,11 @@ from frigate.comms.event_metadata_updater import ( EventMetadataPublisher, EventMetadataTypeEnum, ) +from frigate.comms.inter_process import InterProcessRequestor from frigate.config import FrigateConfig from frigate.config.classification import CustomClassificationConfig from frigate.util.builtin import load_labels -from frigate.util.object import calculate_region +from frigate.util.object import box_overlaps, calculate_region from ..types import DataProcessorMetrics from .api import RealTimeProcessorApi @@ -31,14 +33,19 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi): self, config: FrigateConfig, model_config: CustomClassificationConfig, + name: str, + requestor: InterProcessRequestor, metrics: DataProcessorMetrics, ): super().__init__(config, metrics) self.model_config = model_config + self.name = name + self.requestor = requestor self.interpreter: Interpreter = None self.tensor_input_details: dict[str, Any] = None self.tensor_output_details: dict[str, Any] = None self.labelmap: dict[int, str] = {} + self.last_run = datetime.datetime.now().timestamp() self.__build_detector() def __build_detector(self) -> None: @@ -53,16 +60,46 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi): def process_frame(self, frame_data: dict[str, Any], frame: np.ndarray): camera = frame_data.get("camera") + if camera not in self.model_config.state_config.cameras: return camera_config = self.model_config.state_config.cameras[camera] - x, y, x2, y2 = calculate_region( - frame.shape, + crop = [ camera_config.crop[0], camera_config.crop[1], camera_config.crop[2], camera_config.crop[3], + ] + should_run = False + + now = datetime.datetime.now().timestamp() + if ( + self.model_config.state_config.interval + and now > self.last_run + self.model_config.state_config.interval + ): + self.last_run = now + should_run = True + + if ( + not should_run + and self.model_config.state_config.motion + and any([box_overlaps(crop, mb) for mb in frame_data.get("motion", [])]) + ): + # classification should run at most once per second + if now > self.last_run + 1: + self.last_run = now + should_run = True + + if not should_run: + return + + x, y, x2, y2 = calculate_region( + frame.shape, + crop[0], + crop[1], + crop[2], + crop[3], 224, 1.0, ) @@ -82,12 +119,14 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi): res: np.ndarray = self.interpreter.get_tensor( self.tensor_output_details[0]["index"] )[0] - print(f"the gate res is {res}") probs = res / res.sum(axis=0) best_id = np.argmax(probs) score = round(probs[best_id], 2) - print(f"got {self.labelmap[best_id]} with score {score}") + if score >= camera_config.threshold: + self.requestor.send_data( + f"{camera}/classification/{self.name}", self.labelmap[best_id] + ) def handle_request(self, topic, request_data): return None diff --git a/frigate/embeddings/maintainer.py b/frigate/embeddings/maintainer.py index 9838f4a21..6cce9ba98 100644 --- a/frigate/embeddings/maintainer.py +++ b/frigate/embeddings/maintainer.py @@ -147,13 +147,15 @@ class EmbeddingMaintainer(threading.Thread): ) ) - for model in self.config.classification.custom.values(): + for name, model_config in self.config.classification.custom.items(): self.realtime_processors.append( - CustomStateClassificationProcessor(self.config, model, self.metrics) - if model.state_config != None + CustomStateClassificationProcessor( + self.config, model_config, name, self.requestor, self.metrics + ) + if model_config.state_config != None else CustomObjectClassificationProcessor( self.config, - model, + model_config, self.event_metadata_publisher, self.metrics, ) @@ -504,7 +506,9 @@ class EmbeddingMaintainer(threading.Thread): processor.process_frame(camera, yuv_frame, True) if isinstance(processor, CustomStateClassificationProcessor): - processor.process_frame({"camera": camera}, yuv_frame) + processor.process_frame( + {"camera": camera, "motion": motion_boxes}, yuv_frame + ) self.frame_manager.close(frame_name) From cf1d50be30f6ceceb5df6d3ff85bef1c0485f697 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Sat, 24 May 2025 10:47:15 -0600 Subject: [PATCH 008/530] Add basic config editor when Frigate can't startup (#18383) * Start Frigate in safe mode when config does not validate * Add safe mode page that is just the config editor * Adjust Frigate config editor when in safe mode * Cleanup * Improve log message --- frigate/__main__.py | 9 +- frigate/config/config.py | 15 ++- web/public/locales/en/views/configEditor.json | 2 + web/src/App.tsx | 105 +++++++++++------- web/src/pages/ConfigEditor.tsx | 44 +++++--- web/src/types/frigateConfig.ts | 3 + 6 files changed, 120 insertions(+), 58 deletions(-) diff --git a/frigate/__main__.py b/frigate/__main__.py index 4143f7ae6..4c732be80 100644 --- a/frigate/__main__.py +++ b/frigate/__main__.py @@ -93,7 +93,14 @@ def main() -> None: print("*************************************************************") print("*** End Config Validation Errors ***") print("*************************************************************") - sys.exit(1) + + # attempt to start Frigate in recovery mode + try: + config = FrigateConfig.load(install=True, safe_load=True) + print("Starting Frigate in safe mode.") + except ValidationError: + print("Unable to start Frigate in safe mode.") + sys.exit(1) if args.validate_config: print("*************************************************************") print("*** Your config file is valid. ***") diff --git a/frigate/config/config.py b/frigate/config/config.py index 6ec048acd..58427f5d5 100644 --- a/frigate/config/config.py +++ b/frigate/config/config.py @@ -334,6 +334,9 @@ def verify_lpr_and_face( class FrigateConfig(FrigateBaseModel): version: Optional[str] = Field(default=None, title="Current config version.") + safe_mode: bool = Field( + default=False, title="If Frigate should be started in safe mode." + ) # Fields that install global state should be defined first, so that their validators run first. environment_vars: EnvVars = Field( @@ -716,6 +719,7 @@ class FrigateConfig(FrigateBaseModel): @classmethod def load(cls, **kwargs): + """Loads the Frigate config file, runs migrations, and creates the config object.""" config_path = find_config_file() # No configuration file found, create one. @@ -743,7 +747,7 @@ class FrigateConfig(FrigateBaseModel): return FrigateConfig.parse(f, **kwargs) @classmethod - def parse(cls, config, *, is_json=None, **context): + def parse(cls, config, *, is_json=None, safe_load=False, **context): # If config is a file, read its contents. if hasattr(config, "read"): fname = getattr(config, "name", None) @@ -767,6 +771,15 @@ class FrigateConfig(FrigateBaseModel): else: config = yaml.load(config) + # load minimal Frigate config after the full config did not validate + if safe_load: + safe_config = {"safe_mode": True, "cameras": {}, "mqtt": {"enabled": False}} + + # copy over auth and proxy config in case auth needs to be enforced + safe_config["auth"] = config.get("auth", {}) + safe_config["proxy"] = config.get("proxy", {}) + return cls.parse_object(safe_config, **context) + # Validate and return the config dict. return cls.parse_object(config, **context) diff --git a/web/public/locales/en/views/configEditor.json b/web/public/locales/en/views/configEditor.json index ef3035f38..614143c16 100644 --- a/web/public/locales/en/views/configEditor.json +++ b/web/public/locales/en/views/configEditor.json @@ -1,6 +1,8 @@ { "documentTitle": "Config Editor - Frigate", "configEditor": "Config Editor", + "safeConfigEditor": "Config Editor (Safe Mode)", + "safeModeDescription": "Frigate is in safe mode due to a config validation error.", "copyConfig": "Copy Config", "saveAndRestart": "Save & Restart", "saveOnly": "Save Only", diff --git a/web/src/App.tsx b/web/src/App.tsx index a0062549f..d3edbc3a2 100644 --- a/web/src/App.tsx +++ b/web/src/App.tsx @@ -12,6 +12,8 @@ import { cn } from "./lib/utils"; import { isPWA } from "./utils/isPWA"; import ProtectedRoute from "@/components/auth/ProtectedRoute"; import { AuthProvider } from "@/context/auth-context"; +import useSWR from "swr"; +import { FrigateConfig } from "./types/frigateConfig"; const Live = lazy(() => import("@/pages/Live")); const Events = lazy(() => import("@/pages/Events")); @@ -26,52 +28,16 @@ const Logs = lazy(() => import("@/pages/Logs")); const AccessDenied = lazy(() => import("@/pages/AccessDenied")); function App() { + const { data: config } = useSWR("config", { + revalidateOnFocus: false, + }); + return ( -
- {isDesktop && } - {isDesktop && } - {isMobile && } -
- - - - } - > - } /> - } /> - } /> - } /> - } /> - - } - > - } /> - } /> - } /> - } /> - } /> - - } /> - } /> - - -
-
+ {config?.safe_mode ? : }
@@ -79,4 +45,61 @@ function App() { ); } +function DefaultAppView() { + return ( +
+ {isDesktop && } + {isDesktop && } + {isMobile && } +
+ + + } + > + } /> + } /> + } /> + } /> + } /> + + }> + } /> + } /> + } /> + } /> + } /> + + } /> + } /> + + +
+
+ ); +} + +function SafeAppView() { + return ( +
+
+ + + +
+
+ ); +} + export default App; diff --git a/web/src/pages/ConfigEditor.tsx b/web/src/pages/ConfigEditor.tsx index b8c11d028..1f265428c 100644 --- a/web/src/pages/ConfigEditor.tsx +++ b/web/src/pages/ConfigEditor.tsx @@ -16,7 +16,11 @@ import { MdOutlineRestartAlt } from "react-icons/md"; import RestartDialog from "@/components/overlay/dialog/RestartDialog"; import { useTranslation } from "react-i18next"; import { useRestart } from "@/api/ws"; +<<<<<<< HEAD import { useResizeObserver } from "@/hooks/resize-observer"; +======= +import { FrigateConfig } from "@/types/frigateConfig"; +>>>>>>> 5f40e6e2 (Add basic config editor when Frigate can't startup (#18383)) type SaveOptions = "saveonly" | "restart"; @@ -33,7 +37,10 @@ function ConfigEditor() { document.title = t("documentTitle"); }, [t]); - const { data: config } = useSWR("config/raw"); + const { data: config } = useSWR("config", { + revalidateOnFocus: false, + }); + const { data: rawConfig } = useSWR("config/raw"); const { theme, systemTheme } = useTheme(); const [error, setError] = useState(); @@ -103,7 +110,7 @@ function ConfigEditor() { }, [onHandleSaveConfig]); useEffect(() => { - if (!config) { + if (!rawConfig) { return; } @@ -130,9 +137,9 @@ function ConfigEditor() { } if (!modelRef.current) { - modelRef.current = monaco.editor.createModel(config, "yaml", modelUri); + modelRef.current = monaco.editor.createModel(rawConfig, "yaml", modelUri); } else { - modelRef.current.setValue(config); + modelRef.current.setValue(rawConfig); } const container = configRef.current; @@ -165,32 +172,32 @@ function ConfigEditor() { } schemaConfiguredRef.current = false; }; - }, [config, apiHost, systemTheme, theme, onHandleSaveConfig]); + }, [rawConfig, apiHost, systemTheme, theme, onHandleSaveConfig]); // monitoring state const [hasChanges, setHasChanges] = useState(false); useEffect(() => { - if (!config || !modelRef.current) { + if (!rawConfig || !modelRef.current) { return; } modelRef.current.onDidChangeContent(() => { - if (modelRef.current?.getValue() != config) { + if (modelRef.current?.getValue() != rawConfig) { setHasChanges(true); } else { setHasChanges(false); } }); - }, [config]); + }, [rawConfig]); useEffect(() => { - if (config && modelRef.current) { - modelRef.current.setValue(config); + if (rawConfig && modelRef.current) { + modelRef.current.setValue(rawConfig); setHasChanges(false); } - }, [config]); + }, [rawConfig]); useEffect(() => { let listener: ((e: BeforeUnloadEvent) => void) | undefined; @@ -225,7 +232,7 @@ function ConfigEditor() { } }, [error, width, height]); - if (!config) { + if (!rawConfig) { return ; } @@ -233,9 +240,16 @@ function ConfigEditor() {
- - {t("configEditor")} - +
+ + {t(config?.safe_mode ? "safeConfigEditor" : "configEditor")} + + {config?.safe_mode && ( +
+ {t("safeModeDescription")} +
+ )} +
)} + {config?.cameras[search?.camera].audio_transcription.enabled && + search?.label == "speech" && + search?.end_time && ( + + )}
diff --git a/web/src/pages/Explore.tsx b/web/src/pages/Explore.tsx index 09e576551..b6a4a43e5 100644 --- a/web/src/pages/Explore.tsx +++ b/web/src/pages/Explore.tsx @@ -257,15 +257,13 @@ export default function Explore() { // mutation and revalidation - const trackedObjectUpdate = useTrackedObjectUpdate(); + const { payload: wsUpdate } = useTrackedObjectUpdate(); useEffect(() => { - if (trackedObjectUpdate) { + if (wsUpdate && wsUpdate.type == "description") { mutate(); } - // mutate / revalidate when event description updates come in - // eslint-disable-next-line react-hooks/exhaustive-deps - }, [trackedObjectUpdate]); + }, [wsUpdate, mutate]); // embeddings reindex progress diff --git a/web/src/types/frigateConfig.ts b/web/src/types/frigateConfig.ts index 355d4cb72..cf2bf1476 100644 --- a/web/src/types/frigateConfig.ts +++ b/web/src/types/frigateConfig.ts @@ -41,6 +41,11 @@ export interface CameraConfig { min_volume: number; num_threads: number; }; + audio_transcription: { + enabled: boolean; + enabled_in_config: boolean; + live_enabled: boolean; + }; best_image_timeout: number; birdseye: { enabled: boolean; @@ -296,6 +301,10 @@ export interface FrigateConfig { num_threads: number; }; + audio_transcription: { + enabled: boolean; + }; + birdseye: BirdseyeConfig; cameras: { diff --git a/web/src/types/ws.ts b/web/src/types/ws.ts index 3badd961d..d1e810494 100644 --- a/web/src/types/ws.ts +++ b/web/src/types/ws.ts @@ -58,6 +58,7 @@ export interface FrigateCameraState { snapshots: boolean; record: boolean; audio: boolean; + audio_transcription: boolean; notifications: boolean; notifications_suspended: number; autotracking: boolean; @@ -84,3 +85,21 @@ export type EmbeddingsReindexProgressType = { }; export type ToggleableSetting = "ON" | "OFF"; + +export type TrackedObjectUpdateType = + | "description" + | "lpr" + | "transcription" + | "face"; + +export type TrackedObjectUpdateReturnType = { + type: TrackedObjectUpdateType; + id: string; + camera: string; + description?: string; + name?: string; + plate?: string; + score?: number; + timestamp?: number; + text?: string; +} | null; diff --git a/web/src/views/explore/ExploreView.tsx b/web/src/views/explore/ExploreView.tsx index f680b6566..ca13f2986 100644 --- a/web/src/views/explore/ExploreView.tsx +++ b/web/src/views/explore/ExploreView.tsx @@ -75,13 +75,13 @@ export default function ExploreView({ }, {}); }, [events]); - const trackedObjectUpdate = useTrackedObjectUpdate(); + const { payload: wsUpdate } = useTrackedObjectUpdate(); useEffect(() => { - mutate(); - // mutate / revalidate when event description updates come in - // eslint-disable-next-line react-hooks/exhaustive-deps - }, [trackedObjectUpdate]); + if (wsUpdate && wsUpdate.type == "description") { + mutate(); + } + }, [wsUpdate, mutate]); // update search detail when results change diff --git a/web/src/views/live/LiveCameraView.tsx b/web/src/views/live/LiveCameraView.tsx index b972e1a39..9e9e0e974 100644 --- a/web/src/views/live/LiveCameraView.tsx +++ b/web/src/views/live/LiveCameraView.tsx @@ -1,5 +1,7 @@ import { + useAudioLiveTranscription, useAudioState, + useAudioTranscriptionState, useAutotrackingState, useDetectState, useEnabledState, @@ -90,6 +92,8 @@ import { LuX, } from "react-icons/lu"; import { + MdClosedCaption, + MdClosedCaptionDisabled, MdNoPhotography, MdOutlineRestartAlt, MdPersonOff, @@ -197,6 +201,29 @@ export default function LiveCameraView({ const { payload: enabledState } = useEnabledState(camera.name); const cameraEnabled = enabledState === "ON"; + // for audio transcriptions + + const { payload: audioTranscriptionState, send: sendTranscription } = + useAudioTranscriptionState(camera.name); + const { payload: transcription } = useAudioLiveTranscription(camera.name); + const transcriptionRef = useRef(null); + + useEffect(() => { + if (transcription) { + if (transcriptionRef.current) { + transcriptionRef.current.scrollTop = + transcriptionRef.current.scrollHeight; + } + } + }, [transcription]); + + useEffect(() => { + return () => { + // disable transcriptions when unmounting + if (audioTranscriptionState == "ON") sendTranscription("OFF"); + }; + }, [audioTranscriptionState, sendTranscription]); + // click overlay for ptzs const [clickOverlay, setClickOverlay] = useState(false); @@ -567,6 +594,9 @@ export default function LiveCameraView({ autotrackingEnabled={ camera.onvif.autotracking.enabled_in_config } + transcriptionEnabled={ + camera.audio_transcription.enabled_in_config + } fullscreen={fullscreen} streamName={streamName ?? ""} setStreamName={setStreamName} @@ -626,6 +656,16 @@ export default function LiveCameraView({ />
+ {camera?.audio?.enabled_in_config && + audioTranscriptionState == "ON" && + transcription != null && ( +
+ {transcription} +
+ )} {camera.onvif.host != "" && ( @@ -984,6 +1024,7 @@ type FrigateCameraFeaturesProps = { recordingEnabled: boolean; audioDetectEnabled: boolean; autotrackingEnabled: boolean; + transcriptionEnabled: boolean; fullscreen: boolean; streamName: string; setStreamName?: (value: string | undefined) => void; @@ -1003,6 +1044,7 @@ function FrigateCameraFeatures({ recordingEnabled, audioDetectEnabled, autotrackingEnabled, + transcriptionEnabled, fullscreen, streamName, setStreamName, @@ -1035,6 +1077,8 @@ function FrigateCameraFeatures({ const { payload: audioState, send: sendAudio } = useAudioState(camera.name); const { payload: autotrackingState, send: sendAutotracking } = useAutotrackingState(camera.name); + const { payload: transcriptionState, send: sendTranscription } = + useAudioTranscriptionState(camera.name); // roles @@ -1198,6 +1242,27 @@ function FrigateCameraFeatures({ disabled={!cameraEnabled} /> )} + {audioDetectEnabled && transcriptionEnabled && ( + + sendTranscription(transcriptionState == "ON" ? "OFF" : "ON") + } + disabled={!cameraEnabled || audioState == "OFF"} + /> + )} {autotrackingEnabled && ( )} + {audioDetectEnabled && transcriptionEnabled && ( + + sendTranscription(transcriptionState == "ON" ? "OFF" : "ON") + } + /> + )} {autotrackingEnabled && ( Date: Thu, 29 May 2025 17:51:32 -0600 Subject: [PATCH 011/530] Implement API to train classification models (#18475) --- docker/main/Dockerfile | 3 + docker/main/requirements-wheels.txt | 3 + frigate/api/classification.py | 35 +++++- frigate/config/classification.py | 3 +- frigate/config/config.py | 4 + .../real_time/custom_classification.py | 78 ++++++++++--- frigate/embeddings/maintainer.py | 4 +- frigate/util/classification.py | 108 ++++++++++++++++++ 8 files changed, 219 insertions(+), 19 deletions(-) create mode 100644 frigate/util/classification.py diff --git a/docker/main/Dockerfile b/docker/main/Dockerfile index 1cf752ed5..90e174d10 100644 --- a/docker/main/Dockerfile +++ b/docker/main/Dockerfile @@ -227,6 +227,9 @@ ENV OPENCV_FFMPEG_LOGLEVEL=8 # Set HailoRT to disable logging ENV HAILORT_LOGGER_PATH=NONE +# TensorFlow error only +ENV TF_CPP_MIN_LOG_LEVEL=3 + ENV PATH="/usr/local/go2rtc/bin:/usr/local/tempio/bin:/usr/local/nginx/sbin:${PATH}" # Install dependencies diff --git a/docker/main/requirements-wheels.txt b/docker/main/requirements-wheels.txt index 59cc1ab9c..624983eb4 100644 --- a/docker/main/requirements-wheels.txt +++ b/docker/main/requirements-wheels.txt @@ -11,6 +11,9 @@ joserfc == 1.0.* pathvalidate == 3.2.* markupsafe == 3.0.* python-multipart == 0.0.12 +# Classification Model Training +tensorflow == 2.19.* ; platform_machine == 'aarch64' +tensorflow-cpu == 2.19.* ; platform_machine == 'x86_64' # General mypy == 1.6.1 onvif-zeep-async == 3.1.* diff --git a/frigate/api/classification.py b/frigate/api/classification.py index 19afd3a9a..98b716c67 100644 --- a/frigate/api/classification.py +++ b/frigate/api/classification.py @@ -7,7 +7,7 @@ import shutil from typing import Any import cv2 -from fastapi import APIRouter, Depends, Request, UploadFile +from fastapi import APIRouter, BackgroundTasks, Depends, Request, UploadFile from fastapi.responses import JSONResponse from pathvalidate import sanitize_filename from peewee import DoesNotExist @@ -19,10 +19,12 @@ from frigate.api.defs.request.classification_body import ( RenameFaceBody, ) from frigate.api.defs.tags import Tags +from frigate.config import FrigateConfig from frigate.config.camera import DetectConfig -from frigate.const import FACE_DIR +from frigate.const import FACE_DIR, MODEL_CACHE_DIR from frigate.embeddings import EmbeddingsContext from frigate.models import Event +from frigate.util.classification import train_classification_model from frigate.util.path import get_event_snapshot logger = logging.getLogger(__name__) @@ -442,3 +444,32 @@ def transcribe_audio(request: Request, body: AudioTranscriptionBody): }, status_code=500, ) + + +# custom classification training + + +@router.post("/classification/{name}/train") +async def train_configured_model( + request: Request, name: str, background_tasks: BackgroundTasks +): + config: FrigateConfig = request.app.frigate_config + + if name not in config.classification.custom: + return JSONResponse( + content=( + { + "success": False, + "message": f"{name} is not a known classification model.", + } + ), + status_code=404, + ) + + background_tasks.add_task( + train_classification_model, os.path.join(MODEL_CACHE_DIR, name) + ) + return JSONResponse( + content={"success": True, "message": "Started classification model training."}, + status_code=200, + ) diff --git a/frigate/config/classification.py b/frigate/config/classification.py index cd20a63ad..40a1183cd 100644 --- a/frigate/config/classification.py +++ b/frigate/config/classification.py @@ -85,8 +85,7 @@ class CustomClassificationObjectConfig(FrigateBaseModel): class CustomClassificationConfig(FrigateBaseModel): enabled: bool = Field(default=True, title="Enable running the model.") - model_path: str = Field(title="Path to custom classification tflite model.") - labelmap_path: str = Field(title="Path to custom classification model labelmap.") + name: str | None = Field(default=None, title="Name of classification model.") object_config: CustomClassificationObjectConfig | None = Field(default=None) state_config: CustomClassificationStateConfig | None = Field(default=None) diff --git a/frigate/config/config.py b/frigate/config/config.py index 5bca436b6..d912a574d 100644 --- a/frigate/config/config.py +++ b/frigate/config/config.py @@ -706,6 +706,10 @@ class FrigateConfig(FrigateBaseModel): verify_objects_track(camera_config, labelmap_objects) verify_lpr_and_face(self, camera_config) + # set names on classification configs + for name, config in self.classification.custom.items(): + config.name = name + self.objects.parse_all_objects(self.cameras) self.model.create_colormap(sorted(self.objects.all_objects)) self.model.check_and_load_plus_model(self.plus_api) diff --git a/frigate/data_processing/real_time/custom_classification.py b/frigate/data_processing/real_time/custom_classification.py index cd99508c9..f94c2b28c 100644 --- a/frigate/data_processing/real_time/custom_classification.py +++ b/frigate/data_processing/real_time/custom_classification.py @@ -2,6 +2,7 @@ import datetime import logging +import os from typing import Any import cv2 @@ -14,6 +15,7 @@ from frigate.comms.event_metadata_updater import ( from frigate.comms.inter_process import InterProcessRequestor from frigate.config import FrigateConfig from frigate.config.classification import CustomClassificationConfig +from frigate.const import CLIPS_DIR, MODEL_CACHE_DIR from frigate.util.builtin import load_labels from frigate.util.object import box_overlaps, calculate_region @@ -33,14 +35,14 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi): self, config: FrigateConfig, model_config: CustomClassificationConfig, - name: str, requestor: InterProcessRequestor, metrics: DataProcessorMetrics, ): super().__init__(config, metrics) self.model_config = model_config - self.name = name self.requestor = requestor + self.model_dir = os.path.join(MODEL_CACHE_DIR, self.model_config.name) + self.train_dir = os.path.join(CLIPS_DIR, self.model_config.name) self.interpreter: Interpreter = None self.tensor_input_details: dict[str, Any] = None self.tensor_output_details: dict[str, Any] = None @@ -50,13 +52,16 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi): def __build_detector(self) -> None: self.interpreter = Interpreter( - model_path=self.model_config.model_path, + model_path=os.path.join(self.model_dir, "model.tflite"), num_threads=2, ) self.interpreter.allocate_tensors() self.tensor_input_details = self.interpreter.get_input_details() self.tensor_output_details = self.interpreter.get_output_details() - self.labelmap = load_labels(self.model_config.labelmap_path, prefill=0) + self.labelmap = load_labels( + os.path.join(self.model_dir, "labelmap.txt"), + prefill=0, + ) def process_frame(self, frame_data: dict[str, Any], frame: np.ndarray): camera = frame_data.get("camera") @@ -105,15 +110,15 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi): ) rgb = cv2.cvtColor(frame, cv2.COLOR_YUV2RGB_I420) - input = rgb[ + frame = rgb[ y:y2, x:x2, ] - if input.shape != (224, 224): - input = cv2.resize(input, (224, 224)) + if frame.shape != (224, 224): + frame = cv2.resize(frame, (224, 224)) - input = np.expand_dims(input, axis=0) + input = np.expand_dims(frame, axis=0) self.interpreter.set_tensor(self.tensor_input_details[0]["index"], input) self.interpreter.invoke() res: np.ndarray = self.interpreter.get_tensor( @@ -123,9 +128,18 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi): best_id = np.argmax(probs) score = round(probs[best_id], 2) + write_classification_attempt( + self.train_dir, + cv2.cvtColor(frame, cv2.COLOR_RGB2BGR), + now, + self.labelmap[best_id], + score, + ) + if score >= camera_config.threshold: self.requestor.send_data( - f"{camera}/classification/{self.name}", self.labelmap[best_id] + f"{camera}/classification/{self.model_config.name}", + self.labelmap[best_id], ) def handle_request(self, topic, request_data): @@ -145,6 +159,8 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi): ): super().__init__(config, metrics) self.model_config = model_config + self.model_dir = os.path.join(MODEL_CACHE_DIR, self.model_config.name) + self.train_dir = os.path.join(self.model_dir, "train") self.interpreter: Interpreter = None self.sub_label_publisher = sub_label_publisher self.tensor_input_details: dict[str, Any] = None @@ -155,18 +171,22 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi): def __build_detector(self) -> None: self.interpreter = Interpreter( - model_path=self.model_config.model_path, + model_path=os.path.join(self.model_dir, "model.tflite"), num_threads=2, ) self.interpreter.allocate_tensors() self.tensor_input_details = self.interpreter.get_input_details() self.tensor_output_details = self.interpreter.get_output_details() - self.labelmap = load_labels(self.model_config.labelmap_path, prefill=0) + self.labelmap = load_labels( + os.path.join(self.model_dir, "labelmap.txt"), + prefill=0, + ) def process_frame(self, obj_data, frame): if obj_data["label"] not in self.model_config.object_config.objects: return + now = datetime.datetime.now().timestamp() x, y, x2, y2 = calculate_region( frame.shape, obj_data["box"][0], @@ -194,11 +214,17 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi): )[0] probs = res / res.sum(axis=0) best_id = np.argmax(probs) - score = round(probs[best_id], 2) - previous_score = self.detected_objects.get(obj_data["id"], 0.0) + write_classification_attempt( + self.train_dir, + cv2.cvtColor(frame, cv2.COLOR_RGB2BGR), + now, + self.labelmap[best_id], + score, + ) + if score <= previous_score: logger.debug(f"Score {score} is worse than previous score {previous_score}") return @@ -215,3 +241,29 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi): def expire_object(self, object_id, camera): if object_id in self.detected_objects: self.detected_objects.pop(object_id) + + +@staticmethod +def write_classification_attempt( + folder: str, + frame: np.ndarray, + timestamp: float, + label: str, + score: float, +) -> None: + if "-" in label: + label = label.replace("-", "_") + + file = os.path.join(folder, f"{timestamp}-{label}-{score}.webp") + os.makedirs(folder, exist_ok=True) + cv2.imwrite(file, frame) + + files = sorted( + filter(lambda f: (f.endswith(".webp")), os.listdir(folder)), + key=lambda f: os.path.getctime(os.path.join(folder, f)), + reverse=True, + ) + + # delete oldest face image if maximum is reached + if len(files) > 100: + os.unlink(os.path.join(folder, files[-1])) diff --git a/frigate/embeddings/maintainer.py b/frigate/embeddings/maintainer.py index 25601f014..9a2378221 100644 --- a/frigate/embeddings/maintainer.py +++ b/frigate/embeddings/maintainer.py @@ -150,10 +150,10 @@ class EmbeddingMaintainer(threading.Thread): ) ) - for name, model_config in self.config.classification.custom.items(): + for model_config in self.config.classification.custom.values(): self.realtime_processors.append( CustomStateClassificationProcessor( - self.config, model_config, name, self.requestor, self.metrics + self.config, model_config, self.requestor, self.metrics ) if model_config.state_config != None else CustomObjectClassificationProcessor( diff --git a/frigate/util/classification.py b/frigate/util/classification.py new file mode 100644 index 000000000..4ee5e1d54 --- /dev/null +++ b/frigate/util/classification.py @@ -0,0 +1,108 @@ +"""Util for classification models.""" + +import os + +import cv2 +import numpy as np +import tensorflow as tf +from tensorflow.keras import layers, models, optimizers +from tensorflow.keras.applications import MobileNetV2 +from tensorflow.keras.preprocessing.image import ImageDataGenerator + +BATCH_SIZE = 16 +EPOCHS = 50 +LEARNING_RATE = 0.001 + + +@staticmethod +def generate_representative_dataset_factory(dataset_dir: str): + def generate_representative_dataset(): + image_paths = [] + for root, dirs, files in os.walk(dataset_dir): + for file in files: + if file.lower().endswith((".jpg", ".jpeg", ".png")): + image_paths.append(os.path.join(root, file)) + + for path in image_paths[:300]: + img = cv2.imread(path) + img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) + img = cv2.resize(img, (224, 224)) + img_array = np.array(img, dtype=np.float32) / 255.0 + img_array = img_array[None, ...] + yield [img_array] + + return generate_representative_dataset + + +@staticmethod +def train_classification_model(model_dir: str) -> bool: + """Train a classification model.""" + dataset_dir = os.path.join(model_dir, "dataset") + num_classes = len( + [ + d + for d in os.listdir(dataset_dir) + if os.path.isdir(os.path.join(dataset_dir, d)) + ] + ) + + # Start with imagenet base model with 35% of channels in each layer + base_model = MobileNetV2( + input_shape=(224, 224, 3), + include_top=False, + weights="imagenet", + alpha=0.35, + ) + base_model.trainable = False # Freeze pre-trained layers + + model = models.Sequential( + [ + base_model, + layers.GlobalAveragePooling2D(), + layers.Dense(128, activation="relu"), + layers.Dropout(0.3), + layers.Dense(num_classes, activation="softmax"), + ] + ) + + model.compile( + optimizer=optimizers.Adam(learning_rate=LEARNING_RATE), + loss="categorical_crossentropy", + metrics=["accuracy"], + ) + + # create training set + datagen = ImageDataGenerator(rescale=1.0 / 255, validation_split=0.2) + train_gen = datagen.flow_from_directory( + dataset_dir, + target_size=(224, 224), + batch_size=BATCH_SIZE, + class_mode="categorical", + subset="training", + ) + + # write labelmap + class_indices = train_gen.class_indices + index_to_class = {v: k for k, v in class_indices.items()} + sorted_classes = [index_to_class[i] for i in range(len(index_to_class))] + with open(os.path.join(model_dir, "labelmap.txt"), "w") as f: + for class_name in sorted_classes: + f.write(f"{class_name}\n") + + # train the model + model.fit(train_gen, epochs=EPOCHS, verbose=0) + + # convert model to tflite + converter = tf.lite.TFLiteConverter.from_keras_model(model) + converter.optimizations = [tf.lite.Optimize.DEFAULT] + converter.representative_dataset = generate_representative_dataset_factory( + dataset_dir + ) + converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8] + converter.inference_input_type = tf.uint8 + converter.inference_output_type = tf.uint8 + tflite_model = converter.convert() + + # write model + with open(os.path.join(model_dir, "model.tflite"), "wb") as f: + f.write(tflite_model) From 0b9997015a94eb67b51b9e8f890f692490796d9d Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Fri, 30 May 2025 15:30:21 -0600 Subject: [PATCH 012/530] Intel updates (#18493) * Update openvino and onnxruntime * Install icd and level-zero-gpu deps from intel directly * Install * Add dep * Fix package install --- docker/main/install_deps.sh | 26 ++++++++++++++++++++++++-- docker/main/requirements-wheels.txt | 6 +++--- docker/tensorrt/requirements-amd64.txt | 2 +- 3 files changed, 28 insertions(+), 6 deletions(-) diff --git a/docker/main/install_deps.sh b/docker/main/install_deps.sh index 9684199f8..aed11dff4 100755 --- a/docker/main/install_deps.sh +++ b/docker/main/install_deps.sh @@ -71,11 +71,33 @@ if [[ "${TARGETARCH}" == "amd64" ]]; then echo "deb [arch=amd64 signed-by=/usr/share/keyrings/intel-graphics.gpg] https://repositories.intel.com/gpu/ubuntu jammy client" | tee /etc/apt/sources.list.d/intel-gpu-jammy.list apt-get -qq update apt-get -qq install --no-install-recommends --no-install-suggests -y \ - intel-opencl-icd=24.35.30872.31-996~22.04 intel-level-zero-gpu=1.3.29735.27-914~22.04 intel-media-va-driver-non-free=24.3.3-996~22.04 \ - libmfx1=23.2.2-880~22.04 libmfxgen1=24.2.4-914~22.04 libvpl2=1:2.13.0.0-996~22.04 + intel-media-va-driver-non-free libmfx1 libmfxgen1 libvpl2 + + apt-get -qq install -y ocl-icd-libopencl1 rm -f /usr/share/keyrings/intel-graphics.gpg rm -f /etc/apt/sources.list.d/intel-gpu-jammy.list + + # install legacy and standard intel icd and level-zero-gpu + # see https://github.com/intel/compute-runtime/blob/master/LEGACY_PLATFORMS.md for more info + # needed core package + wget https://github.com/intel/compute-runtime/releases/download/24.52.32224.5/libigdgmm12_22.5.5_amd64.deb + dpkg -i libigdgmm12_22.5.5_amd64.deb + rm libigdgmm12_22.5.5_amd64.deb + + # legacy packages + wget https://github.com/intel/compute-runtime/releases/download/24.35.30872.22/intel-opencl-icd-legacy1_24.35.30872.22_amd64.deb + wget https://github.com/intel/compute-runtime/releases/download/24.35.30872.22/intel-level-zero-gpu-legacy1_1.3.30872.22_amd64.deb + wget https://github.com/intel/intel-graphics-compiler/releases/download/igc-1.0.17537.20/intel-igc-opencl_1.0.17537.20_amd64.deb + wget https://github.com/intel/intel-graphics-compiler/releases/download/igc-1.0.17537.20/intel-igc-core_1.0.17537.20_amd64.deb + # standard packages + wget https://github.com/intel/compute-runtime/releases/download/24.52.32224.5/intel-opencl-icd_24.52.32224.5_amd64.deb + wget https://github.com/intel/compute-runtime/releases/download/24.52.32224.5/intel-level-zero-gpu_1.6.32224.5_amd64.deb + wget https://github.com/intel/intel-graphics-compiler/releases/download/v2.5.6/intel-igc-opencl-2_2.5.6+18417_amd64.deb + wget https://github.com/intel/intel-graphics-compiler/releases/download/v2.5.6/intel-igc-core-2_2.5.6+18417_amd64.deb + + dpkg -i *.deb + rm *.deb fi if [[ "${TARGETARCH}" == "arm64" ]]; then diff --git a/docker/main/requirements-wheels.txt b/docker/main/requirements-wheels.txt index 624983eb4..eabb75bef 100644 --- a/docker/main/requirements-wheels.txt +++ b/docker/main/requirements-wheels.txt @@ -41,9 +41,9 @@ opencv-python-headless == 4.11.0.* opencv-contrib-python == 4.11.0.* scipy == 1.14.* # OpenVino & ONNX -openvino == 2024.4.* -onnxruntime-openvino == 1.20.* ; platform_machine == 'x86_64' -onnxruntime == 1.20.* ; platform_machine == 'aarch64' +openvino == 2025.1.* +onnxruntime-openvino == 1.22.* ; platform_machine == 'x86_64' +onnxruntime == 1.22.* ; platform_machine == 'aarch64' # Embeddings transformers == 4.45.* # Generative AI diff --git a/docker/tensorrt/requirements-amd64.txt b/docker/tensorrt/requirements-amd64.txt index be4aaa066..63c68b583 100644 --- a/docker/tensorrt/requirements-amd64.txt +++ b/docker/tensorrt/requirements-amd64.txt @@ -14,5 +14,5 @@ nvidia_cusparse_cu12==12.5.1.*; platform_machine == 'x86_64' nvidia_nccl_cu12==2.23.4; platform_machine == 'x86_64' nvidia_nvjitlink_cu12==12.5.82; platform_machine == 'x86_64' onnx==1.16.*; platform_machine == 'x86_64' -onnxruntime-gpu==1.20.*; platform_machine == 'x86_64' +onnxruntime-gpu==1.22.*; platform_machine == 'x86_64' protobuf==3.20.3; platform_machine == 'x86_64' From 3f8ec723366c009b15ff729c31f606d3aba56e8b Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Fri, 30 May 2025 17:01:39 -0600 Subject: [PATCH 013/530] Tiered recordings (#18492) * Implement tiered recording * Add migration for record config * Update docs * Update reference docs * Fix preview query * Fix incorrect accesses * Fix * Fix * Fix * Fix --- docs/docs/configuration/record.md | 52 +++++------------------- docs/docs/configuration/reference.md | 16 ++++---- frigate/config/camera/record.py | 28 ++++++++----- frigate/config/config.py | 30 +------------- frigate/record/cleanup.py | 42 +++++++++++++------ frigate/record/maintainer.py | 25 +++++++----- frigate/util/config.py | 60 +++++++++++++++++++++++++++- 7 files changed, 143 insertions(+), 110 deletions(-) diff --git a/docs/docs/configuration/record.md b/docs/docs/configuration/record.md index 52c0f0c88..2745ef27d 100644 --- a/docs/docs/configuration/record.md +++ b/docs/docs/configuration/record.md @@ -13,14 +13,15 @@ H265 recordings can be viewed in Chrome 108+, Edge and Safari only. All other br ### Most conservative: Ensure all video is saved -For users deploying Frigate in environments where it is important to have contiguous video stored even if there was no detectable motion, the following config will store all video for 3 days. After 3 days, only video containing motion and overlapping with alerts or detections will be retained until 30 days have passed. +For users deploying Frigate in environments where it is important to have contiguous video stored even if there was no detectable motion, the following config will store all video for 3 days. After 3 days, only video containing motion will be saved for 7 days. After 7 days, only video containing motion and overlapping with alerts or detections will be retained until 30 days have passed. ```yaml record: enabled: True - retain: + continuous: days: 3 - mode: all + motion: + days: 7 alerts: retain: days: 30 @@ -38,9 +39,8 @@ In order to reduce storage requirements, you can adjust your config to only reta ```yaml record: enabled: True - retain: + motion: days: 3 - mode: motion alerts: retain: days: 30 @@ -58,7 +58,7 @@ If you only want to retain video that occurs during a tracked object, this confi ```yaml record: enabled: True - retain: + continuous: days: 0 alerts: retain: @@ -80,15 +80,17 @@ Retention configs support decimals meaning they can be configured to retain `0.5 ::: -### Continuous Recording +### Continuous and Motion Recording -The number of days to retain continuous recordings can be set via the following config where X is a number, by default continuous recording is disabled. +The number of days to retain continuous and motion recordings can be set via the following config where X is a number, by default continuous recording is disabled. ```yaml record: enabled: True - retain: + continuous: days: 1 # <- number of days to keep continuous recordings + motion: + days: 2 # <- number of days to keep motion recordings ``` Continuous recording supports different retention modes [which are described below](#what-do-the-different-retain-modes-mean) @@ -112,38 +114,6 @@ This configuration will retain recording segments that overlap with alerts and d **WARNING**: Recordings still must be enabled in the config. If a camera has recordings disabled in the config, enabling via the methods listed above will have no effect. -## What do the different retain modes mean? - -Frigate saves from the stream with the `record` role in 10 second segments. These options determine which recording segments are kept for continuous recording (but can also affect tracked objects). - -Let's say you have Frigate configured so that your doorbell camera would retain the last **2** days of continuous recording. - -- With the `all` option all 48 hours of those two days would be kept and viewable. -- With the `motion` option the only parts of those 48 hours would be segments that Frigate detected motion. This is the middle ground option that won't keep all 48 hours, but will likely keep all segments of interest along with the potential for some extra segments. -- With the `active_objects` option the only segments that would be kept are those where there was a true positive object that was not considered stationary. - -The same options are available with alerts and detections, except it will only save the recordings when it overlaps with a review item of that type. - -A configuration example of the above retain modes where all `motion` segments are stored for 7 days and `active objects` are stored for 14 days would be as follows: - -```yaml -record: - enabled: True - retain: - days: 7 - mode: motion - alerts: - retain: - days: 14 - mode: active_objects - detections: - retain: - days: 14 - mode: active_objects -``` - -The above configuration example can be added globally or on a per camera basis. - ## Can I have "continuous" recordings, but only at certain times? Using Frigate UI, Home Assistant, or MQTT, cameras can be automated to only record in certain situations or at certain times. diff --git a/docs/docs/configuration/reference.md b/docs/docs/configuration/reference.md index ab6374452..4be10000d 100644 --- a/docs/docs/configuration/reference.md +++ b/docs/docs/configuration/reference.md @@ -440,18 +440,18 @@ record: expire_interval: 60 # Optional: Two-way sync recordings database with disk on startup and once a day (default: shown below). sync_recordings: False - # Optional: Retention settings for recording - retain: + # Optional: Continuous retention settings + continuous: + # Optional: Number of days to retain recordings regardless of tracked objects or motion (default: shown below) + # NOTE: This should be set to 0 and retention should be defined in alerts and detections section below + # if you only want to retain recordings of alerts and detections. + days: 0 + # Optional: Motion retention settings + motion: # Optional: Number of days to retain recordings regardless of tracked objects (default: shown below) # NOTE: This should be set to 0 and retention should be defined in alerts and detections section below # if you only want to retain recordings of alerts and detections. days: 0 - # Optional: Mode for retention. Available options are: all, motion, and active_objects - # all - save all recording segments regardless of activity - # motion - save all recordings segments with any detected motion - # active_objects - save all recording segments with active/moving objects - # NOTE: this mode only applies when the days setting above is greater than 0 - mode: all # Optional: Recording Export Settings export: # Optional: Timelapse Output Args (default: shown below). diff --git a/frigate/config/camera/record.py b/frigate/config/camera/record.py index 52d11e2a5..09a7a84d5 100644 --- a/frigate/config/camera/record.py +++ b/frigate/config/camera/record.py @@ -22,27 +22,31 @@ __all__ = [ DEFAULT_TIME_LAPSE_FFMPEG_ARGS = "-vf setpts=0.04*PTS -r 30" +class RecordRetainConfig(FrigateBaseModel): + days: float = Field(default=0, ge=0, title="Default retention period.") + + class RetainModeEnum(str, Enum): all = "all" motion = "motion" active_objects = "active_objects" -class RecordRetainConfig(FrigateBaseModel): - days: float = Field(default=0, title="Default retention period.") - mode: RetainModeEnum = Field(default=RetainModeEnum.all, title="Retain mode.") - - class ReviewRetainConfig(FrigateBaseModel): - days: float = Field(default=10, title="Default retention period.") + days: float = Field(default=10, ge=0, title="Default retention period.") mode: RetainModeEnum = Field(default=RetainModeEnum.motion, title="Retain mode.") class EventsConfig(FrigateBaseModel): pre_capture: int = Field( - default=5, title="Seconds to retain before event starts.", le=MAX_PRE_CAPTURE + default=5, + title="Seconds to retain before event starts.", + le=MAX_PRE_CAPTURE, + ge=0, + ) + post_capture: int = Field( + default=5, ge=0, title="Seconds to retain after event ends." ) - post_capture: int = Field(default=5, title="Seconds to retain after event ends.") retain: ReviewRetainConfig = Field( default_factory=ReviewRetainConfig, title="Event retention settings." ) @@ -77,8 +81,12 @@ class RecordConfig(FrigateBaseModel): default=60, title="Number of minutes to wait between cleanup runs.", ) - retain: RecordRetainConfig = Field( - default_factory=RecordRetainConfig, title="Record retention settings." + continuous: RecordRetainConfig = Field( + default_factory=RecordRetainConfig, + title="Continuous recording retention settings.", + ) + motion: RecordRetainConfig = Field( + default_factory=RecordRetainConfig, title="Motion recording retention settings." ) detections: EventsConfig = Field( default_factory=EventsConfig, title="Detection specific retention settings." diff --git a/frigate/config/config.py b/frigate/config/config.py index d912a574d..49e57f3cf 100644 --- a/frigate/config/config.py +++ b/frigate/config/config.py @@ -48,7 +48,7 @@ from .camera.genai import GenAIConfig from .camera.motion import MotionConfig from .camera.notification import NotificationConfig from .camera.objects import FilterConfig, ObjectConfig -from .camera.record import RecordConfig, RetainModeEnum +from .camera.record import RecordConfig from .camera.review import ReviewConfig from .camera.snapshots import SnapshotsConfig from .camera.timestamp import TimestampStyleConfig @@ -204,33 +204,6 @@ def verify_valid_live_stream_names( ) -def verify_recording_retention(camera_config: CameraConfig) -> None: - """Verify that recording retention modes are ranked correctly.""" - rank_map = { - RetainModeEnum.all: 0, - RetainModeEnum.motion: 1, - RetainModeEnum.active_objects: 2, - } - - if ( - camera_config.record.retain.days != 0 - and rank_map[camera_config.record.retain.mode] - > rank_map[camera_config.record.alerts.retain.mode] - ): - logger.warning( - f"{camera_config.name}: Recording retention is configured for {camera_config.record.retain.mode} and alert retention is configured for {camera_config.record.alerts.retain.mode}. The more restrictive retention policy will be applied." - ) - - if ( - camera_config.record.retain.days != 0 - and rank_map[camera_config.record.retain.mode] - > rank_map[camera_config.record.detections.retain.mode] - ): - logger.warning( - f"{camera_config.name}: Recording retention is configured for {camera_config.record.retain.mode} and detection retention is configured for {camera_config.record.detections.retain.mode}. The more restrictive retention policy will be applied." - ) - - def verify_recording_segments_setup_with_reasonable_time( camera_config: CameraConfig, ) -> None: @@ -697,7 +670,6 @@ class FrigateConfig(FrigateBaseModel): verify_config_roles(camera_config) verify_valid_live_stream_names(self, camera_config) - verify_recording_retention(camera_config) verify_recording_segments_setup_with_reasonable_time(camera_config) verify_zone_objects_are_tracked(camera_config) verify_required_zones_exist(camera_config) diff --git a/frigate/record/cleanup.py b/frigate/record/cleanup.py index 1de08a899..9d1e28306 100644 --- a/frigate/record/cleanup.py +++ b/frigate/record/cleanup.py @@ -100,7 +100,11 @@ class RecordingCleanup(threading.Thread): ).execute() def expire_existing_camera_recordings( - self, expire_date: float, config: CameraConfig, reviews: ReviewSegment + self, + continuous_expire_date: float, + motion_expire_date: float, + config: CameraConfig, + reviews: ReviewSegment, ) -> None: """Delete recordings for existing camera based on retention config.""" # Get the timestamp for cutoff of retained days @@ -116,8 +120,14 @@ class RecordingCleanup(threading.Thread): Recordings.motion, ) .where( - Recordings.camera == config.name, - Recordings.end_time < expire_date, + (Recordings.camera == config.name) + & ( + ( + (Recordings.end_time < continuous_expire_date) + & (Recordings.motion == 0) + ) + | (Recordings.end_time < motion_expire_date) + ) ) .order_by(Recordings.start_time) .namedtuples() @@ -188,7 +198,7 @@ class RecordingCleanup(threading.Thread): Recordings.id << deleted_recordings_list[i : i + max_deletes] ).execute() - previews: Previews = ( + previews: list[Previews] = ( Previews.select( Previews.id, Previews.start_time, @@ -196,8 +206,9 @@ class RecordingCleanup(threading.Thread): Previews.path, ) .where( - Previews.camera == config.name, - Previews.end_time < expire_date, + (Previews.camera == config.name) + & (Previews.end_time < continuous_expire_date) + & (Previews.end_time < motion_expire_date) ) .order_by(Previews.start_time) .namedtuples() @@ -253,7 +264,9 @@ class RecordingCleanup(threading.Thread): logger.debug("Start deleted cameras.") # Handle deleted cameras - expire_days = self.config.record.retain.days + expire_days = max( + self.config.record.continuous.days, self.config.record.motion.days + ) expire_before = ( datetime.datetime.now() - datetime.timedelta(days=expire_days) ).timestamp() @@ -291,9 +304,12 @@ class RecordingCleanup(threading.Thread): now = datetime.datetime.now() self.expire_review_segments(config, now) - - expire_days = config.record.retain.days - expire_date = (now - datetime.timedelta(days=expire_days)).timestamp() + continuous_expire_date = ( + now - datetime.timedelta(days=config.record.continuous.days) + ).timestamp() + motion_expire_date = ( + now - datetime.timedelta(days=config.record.motion.days) + ).timestamp() # Get all the reviews to check against reviews: ReviewSegment = ( @@ -306,13 +322,15 @@ class RecordingCleanup(threading.Thread): ReviewSegment.camera == camera, # need to ensure segments for all reviews starting # before the expire date are included - ReviewSegment.start_time < expire_date, + ReviewSegment.start_time < motion_expire_date, ) .order_by(ReviewSegment.start_time) .namedtuples() ) - self.expire_existing_camera_recordings(expire_date, config, reviews) + self.expire_existing_camera_recordings( + continuous_expire_date, motion_expire_date, config, reviews + ) logger.debug(f"End camera: {camera}.") logger.debug("End all cameras.") diff --git a/frigate/record/maintainer.py b/frigate/record/maintainer.py index 7f13451d6..ace9a5d24 100644 --- a/frigate/record/maintainer.py +++ b/frigate/record/maintainer.py @@ -285,12 +285,16 @@ class RecordingMaintainer(threading.Thread): Path(cache_path).unlink(missing_ok=True) return - # if cached file's start_time is earlier than the retain days for the camera - # meaning continuous recording is not enabled - if start_time <= ( - datetime.datetime.now().astimezone(datetime.timezone.utc) - - datetime.timedelta(days=self.config.cameras[camera].record.retain.days) - ): + record_config = self.config.cameras[camera].record + highest = None + + if record_config.continuous.days > 0: + highest = "continuous" + elif record_config.motion.days > 0: + highest = "motion" + + # continuous / motion recording is not enabled + if highest is None: # if the cached segment overlaps with the review items: overlaps = False for review in reviews: @@ -344,8 +348,7 @@ class RecordingMaintainer(threading.Thread): ).astimezone(datetime.timezone.utc) if end_time < retain_cutoff: self.drop_segment(cache_path) - # else retain days includes this segment - # meaning continuous recording is enabled + # continuous / motion is enabled else: # assume that empty means the relevant recording info has not been received yet camera_info = self.object_recordings_info[camera] @@ -360,7 +363,11 @@ class RecordingMaintainer(threading.Thread): ).astimezone(datetime.timezone.utc) >= end_time ): - record_mode = self.config.cameras[camera].record.retain.mode + record_mode = ( + RetainModeEnum.all + if highest == "continuous" + else RetainModeEnum.motion + ) return await self.move_segment( camera, start_time, end_time, duration, cache_path, record_mode ) diff --git a/frigate/util/config.py b/frigate/util/config.py index 70492adbc..98267b9ea 100644 --- a/frigate/util/config.py +++ b/frigate/util/config.py @@ -13,7 +13,7 @@ from frigate.util.services import get_video_properties logger = logging.getLogger(__name__) -CURRENT_CONFIG_VERSION = "0.16-0" +CURRENT_CONFIG_VERSION = "0.17-0" DEFAULT_CONFIG_FILE = os.path.join(CONFIG_DIR, "config.yml") @@ -91,6 +91,13 @@ def migrate_frigate_config(config_file: str): yaml.dump(new_config, f) previous_version = "0.16-0" + if previous_version < "0.17-0": + logger.info(f"Migrating frigate config from {previous_version} to 0.17-0...") + new_config = migrate_017_0(config) + with open(config_file, "w") as f: + yaml.dump(new_config, f) + previous_version = "0.17-0" + logger.info("Finished frigate config migration...") @@ -340,6 +347,57 @@ def migrate_016_0(config: dict[str, dict[str, Any]]) -> dict[str, dict[str, Any] return new_config +def migrate_017_0(config: dict[str, dict[str, Any]]) -> dict[str, dict[str, Any]]: + """Handle migrating frigate config to 0.16-0""" + new_config = config.copy() + + # migrate global to new recording configuration + global_record_retain = config.get("record", {}).get("retain") + + if global_record_retain: + continuous = {"days": 0} + motion = {"days": 0} + days = global_record_retain.get("days") + mode = global_record_retain.get("mode", "all") + + if days: + if mode == "all": + continuous["days"] = days + else: + motion["days"] = days + + new_config["record"]["continuous"] = continuous + new_config["record"]["motion"] = motion + + del new_config["record"]["retain"] + + for name, camera in config.get("cameras", {}).items(): + camera_config: dict[str, dict[str, Any]] = camera.copy() + camera_record_retain = camera_config.get("record", {}).get("retain") + + if camera_record_retain: + continuous = {"days": 0} + motion = {"days": 0} + days = camera_record_retain.get("days") + mode = camera_record_retain.get("mode", "all") + + if days: + if mode == "all": + continuous["days"] = days + else: + motion["days"] = days + + camera_config["record"]["continuous"] = continuous + camera_config["record"]["motion"] = motion + + del camera_config["record"]["retain"] + + new_config["cameras"][name] = camera_config + + new_config["version"] = "0.17-0" + return new_config + + def get_relative_coordinates( mask: Optional[Union[str, list]], frame_shape: tuple[int, int] ) -> Union[str, list]: From b77e6f5ebcae74cb16dc99160b5e72d60e4955ae Mon Sep 17 00:00:00 2001 From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> Date: Sun, 1 Jun 2025 15:21:12 -0500 Subject: [PATCH 014/530] Upgrade PaddleOCR models to v4 (rec) and v5 (det) (#18505) The PP_OCRv5 text detection models have greatly improved over v3. The v5 recognition model makes improvements to challenging handwriting and uncommon characters, which are not necessary for LPR, so using v4 seemed like a better choice to continue to keep inference time as low as possible. Also included is the full dictionary for Chinese character support. --- .../common/license_plate/mixin.py | 226 +++++++++--------- frigate/embeddings/onnx/lpr_embedding.py | 11 +- 2 files changed, 126 insertions(+), 111 deletions(-) diff --git a/frigate/data_processing/common/license_plate/mixin.py b/frigate/data_processing/common/license_plate/mixin.py index 2c68ce374..2d63c1c69 100644 --- a/frigate/data_processing/common/license_plate/mixin.py +++ b/frigate/data_processing/common/license_plate/mixin.py @@ -22,7 +22,7 @@ from frigate.comms.event_metadata_updater import ( EventMetadataPublisher, EventMetadataTypeEnum, ) -from frigate.const import CLIPS_DIR +from frigate.const import CLIPS_DIR, MODEL_CACHE_DIR from frigate.embeddings.onnx.lpr_embedding import LPR_EMBEDDING_SIZE from frigate.types import TrackedObjectUpdateTypesEnum from frigate.util.builtin import EventsPerSecond, InferenceSpeed @@ -43,7 +43,11 @@ class LicensePlateProcessingMixin: self.plates_det_second = EventsPerSecond() self.plates_det_second.start() self.event_metadata_publisher = EventMetadataPublisher() - self.ctc_decoder = CTCDecoder() + self.ctc_decoder = CTCDecoder( + character_dict_path=os.path.join( + MODEL_CACHE_DIR, "paddleocr-onnx", "ppocr_keys_v1.txt" + ) + ) self.batch_size = 6 # Detection specific parameters @@ -1595,113 +1599,121 @@ class CTCDecoder: for each decoded character sequence. """ - def __init__(self): + def __init__(self, character_dict_path=None): """ - Initialize the CTCDecoder with a list of characters and a character map. + Initializes the CTCDecoder. + :param character_dict_path: Path to the character dictionary file. + If None, a default (English-focused) list is used. + For Chinese models, this should point to the correct + character dictionary file provided with the model. + """ + self.characters = [] + if character_dict_path and os.path.exists(character_dict_path): + with open(character_dict_path, "r", encoding="utf-8") as f: + self.characters = ["blank"] + [ + line.strip() for line in f if line.strip() + ] + else: + self.characters = [ + "blank", + "0", + "1", + "2", + "3", + "4", + "5", + "6", + "7", + "8", + "9", + ":", + ";", + "<", + "=", + ">", + "?", + "@", + "A", + "B", + "C", + "D", + "E", + "F", + "G", + "H", + "I", + "J", + "K", + "L", + "M", + "N", + "O", + "P", + "Q", + "R", + "S", + "T", + "U", + "V", + "W", + "X", + "Y", + "Z", + "[", + "\\", + "]", + "^", + "_", + "`", + "a", + "b", + "c", + "d", + "e", + "f", + "g", + "h", + "i", + "j", + "k", + "l", + "m", + "n", + "o", + "p", + "q", + "r", + "s", + "t", + "u", + "v", + "w", + "x", + "y", + "z", + "{", + "|", + "}", + "~", + "!", + '"', + "#", + "$", + "%", + "&", + "'", + "(", + ")", + "*", + "+", + ",", + "-", + ".", + "/", + " ", + " ", + ] - The character set includes digits, letters, special characters, and a "blank" token - (used by the CTC model for decoding purposes). A character map is created to map - indices to characters. - """ - self.characters = [ - "blank", - "0", - "1", - "2", - "3", - "4", - "5", - "6", - "7", - "8", - "9", - ":", - ";", - "<", - "=", - ">", - "?", - "@", - "A", - "B", - "C", - "D", - "E", - "F", - "G", - "H", - "I", - "J", - "K", - "L", - "M", - "N", - "O", - "P", - "Q", - "R", - "S", - "T", - "U", - "V", - "W", - "X", - "Y", - "Z", - "[", - "\\", - "]", - "^", - "_", - "`", - "a", - "b", - "c", - "d", - "e", - "f", - "g", - "h", - "i", - "j", - "k", - "l", - "m", - "n", - "o", - "p", - "q", - "r", - "s", - "t", - "u", - "v", - "w", - "x", - "y", - "z", - "{", - "|", - "}", - "~", - "!", - '"', - "#", - "$", - "%", - "&", - "'", - "(", - ")", - "*", - "+", - ",", - "-", - ".", - "/", - " ", - " ", - ] self.char_map = {i: char for i, char in enumerate(self.characters)} def __call__( diff --git a/frigate/embeddings/onnx/lpr_embedding.py b/frigate/embeddings/onnx/lpr_embedding.py index ac981da8d..1b5b9acd0 100644 --- a/frigate/embeddings/onnx/lpr_embedding.py +++ b/frigate/embeddings/onnx/lpr_embedding.py @@ -32,13 +32,15 @@ class PaddleOCRDetection(BaseEmbedding): device: str = "AUTO", ): model_file = ( - "detection-large.onnx" if model_size == "large" else "detection-small.onnx" + "detection_v5-large.onnx" + if model_size == "large" + else "detection_v5-small.onnx" ) super().__init__( model_name="paddleocr-onnx", model_file=model_file, download_urls={ - model_file: f"https://github.com/hawkeye217/paddleocr-onnx/raw/refs/heads/master/models/{model_file}" + model_file: f"https://github.com/hawkeye217/paddleocr-onnx/raw/refs/heads/master/models/v5/{model_file}" }, ) self.requestor = requestor @@ -156,9 +158,10 @@ class PaddleOCRRecognition(BaseEmbedding): ): super().__init__( model_name="paddleocr-onnx", - model_file="recognition.onnx", + model_file="recognition_v4.onnx", download_urls={ - "recognition.onnx": "https://github.com/hawkeye217/paddleocr-onnx/raw/refs/heads/master/models/recognition.onnx" + "recognition_v4.onnx": "https://github.com/hawkeye217/paddleocr-onnx/raw/refs/heads/master/models/v4/recognition_v4.onnx", + "ppocr_keys_v1.txt": "https://github.com/hawkeye217/paddleocr-onnx/raw/refs/heads/master/models/v4/ppocr_keys_v1.txt", }, ) self.requestor = requestor From ac7fb29b326fea16dcdc031f41c9d460a9f38757 Mon Sep 17 00:00:00 2001 From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> Date: Tue, 3 Jun 2025 06:53:48 -0500 Subject: [PATCH 015/530] Audio transcription tweaks (#18540) * use model runner * unload whisper model when live transcription is complete --- .../common/audio_transcription/model.py | 81 +++++++ .../real_time/audio_transcription.py | 205 +++++++++--------- .../real_time/whisper_online.py | 5 +- frigate/data_processing/types.py | 7 + frigate/events/audio.py | 42 ++-- 5 files changed, 220 insertions(+), 120 deletions(-) create mode 100644 frigate/data_processing/common/audio_transcription/model.py diff --git a/frigate/data_processing/common/audio_transcription/model.py b/frigate/data_processing/common/audio_transcription/model.py new file mode 100644 index 000000000..0fe5ddb5c --- /dev/null +++ b/frigate/data_processing/common/audio_transcription/model.py @@ -0,0 +1,81 @@ +"""Set up audio transcription models based on model size.""" + +import logging +import os + +import sherpa_onnx +from faster_whisper.utils import download_model + +from frigate.comms.inter_process import InterProcessRequestor +from frigate.const import MODEL_CACHE_DIR +from frigate.data_processing.types import AudioTranscriptionModel +from frigate.util.downloader import ModelDownloader + +logger = logging.getLogger(__name__) + + +class AudioTranscriptionModelRunner: + def __init__( + self, + device: str = "CPU", + model_size: str = "small", + ): + self.model: AudioTranscriptionModel = None + self.requestor = InterProcessRequestor() + + if model_size == "large": + # use the Whisper download function instead of our own + logger.debug("Downloading Whisper audio transcription model") + download_model( + size_or_id="small" if device == "cuda" else "tiny", + local_files_only=False, + cache_dir=os.path.join(MODEL_CACHE_DIR, "whisper"), + ) + logger.debug("Whisper audio transcription model downloaded") + + else: + # small model as default + download_path = os.path.join(MODEL_CACHE_DIR, "sherpa-onnx") + HF_ENDPOINT = os.environ.get("HF_ENDPOINT", "https://huggingface.co") + self.model_files = { + "encoder.onnx": f"{HF_ENDPOINT}/csukuangfj/sherpa-onnx-streaming-zipformer-en-2023-06-26/resolve/main/encoder-epoch-99-avg-1-chunk-16-left-128.onnx", + "decoder.onnx": f"{HF_ENDPOINT}/csukuangfj/sherpa-onnx-streaming-zipformer-en-2023-06-26/resolve/main/decoder-epoch-99-avg-1-chunk-16-left-128.onnx", + "joiner.onnx": f"{HF_ENDPOINT}/csukuangfj/sherpa-onnx-streaming-zipformer-en-2023-06-26/resolve/main/joiner-epoch-99-avg-1-chunk-16-left-128.onnx", + "tokens.txt": f"{HF_ENDPOINT}/csukuangfj/sherpa-onnx-streaming-zipformer-en-2023-06-26/resolve/main/tokens.txt", + } + + if not all( + os.path.exists(os.path.join(download_path, n)) + for n in self.model_files.keys() + ): + self.downloader = ModelDownloader( + model_name="sherpa-onnx", + download_path=download_path, + file_names=self.model_files.keys(), + download_func=self.__download_models, + ) + self.downloader.ensure_model_files() + self.downloader.wait_for_download() + + self.model = sherpa_onnx.OnlineRecognizer.from_transducer( + tokens=os.path.join(MODEL_CACHE_DIR, "sherpa-onnx/tokens.txt"), + encoder=os.path.join(MODEL_CACHE_DIR, "sherpa-onnx/encoder.onnx"), + decoder=os.path.join(MODEL_CACHE_DIR, "sherpa-onnx/decoder.onnx"), + joiner=os.path.join(MODEL_CACHE_DIR, "sherpa-onnx/joiner.onnx"), + num_threads=2, + sample_rate=16000, + feature_dim=80, + enable_endpoint_detection=True, + rule1_min_trailing_silence=2.4, + rule2_min_trailing_silence=1.2, + rule3_min_utterance_length=300, + decoding_method="greedy_search", + provider="cpu", + ) + + def __download_models(self, path: str) -> None: + try: + file_name = os.path.basename(path) + ModelDownloader.download_from_url(self.model_files[file_name], path) + except Exception as e: + logger.error(f"Failed to download {path}: {e}") diff --git a/frigate/data_processing/real_time/audio_transcription.py b/frigate/data_processing/real_time/audio_transcription.py index 7ed644498..2e6d599eb 100644 --- a/frigate/data_processing/real_time/audio_transcription.py +++ b/frigate/data_processing/real_time/audio_transcription.py @@ -7,16 +7,20 @@ import threading from typing import Optional import numpy as np -import sherpa_onnx from frigate.comms.inter_process import InterProcessRequestor from frigate.config import CameraConfig, FrigateConfig from frigate.const import MODEL_CACHE_DIR -from frigate.util.downloader import ModelDownloader +from frigate.data_processing.common.audio_transcription.model import ( + AudioTranscriptionModelRunner, +) +from frigate.data_processing.real_time.whisper_online import ( + FasterWhisperASR, + OnlineASRProcessor, +) from ..types import DataProcessorMetrics from .api import RealTimeProcessorApi -from .whisper_online import FasterWhisperASR, OnlineASRProcessor logger = logging.getLogger(__name__) @@ -27,6 +31,7 @@ class AudioTranscriptionRealTimeProcessor(RealTimeProcessorApi): config: FrigateConfig, camera_config: CameraConfig, requestor: InterProcessRequestor, + model_runner: AudioTranscriptionModelRunner, metrics: DataProcessorMetrics, stop_event: threading.Event, ): @@ -34,95 +39,55 @@ class AudioTranscriptionRealTimeProcessor(RealTimeProcessorApi): self.config = config self.camera_config = camera_config self.requestor = requestor - self.recognizer = None self.stream = None + self.whisper_model = None + self.model_runner = model_runner self.transcription_segments = [] self.audio_queue = queue.Queue() self.stop_event = stop_event - if self.config.audio_transcription.model_size == "large": - self.asr = FasterWhisperASR( - modelsize="tiny", - device="cuda" - if self.config.audio_transcription.device == "GPU" - else "cpu", - lan=config.audio_transcription.language, - model_dir=os.path.join(MODEL_CACHE_DIR, "whisper"), - ) - self.asr.use_vad() # Enable Silero VAD for low-RMS audio - - else: - # small model as default - download_path = os.path.join(MODEL_CACHE_DIR, "sherpa-onnx") - HF_ENDPOINT = os.environ.get("HF_ENDPOINT", "https://huggingface.co") - self.model_files = { - "encoder.onnx": f"{HF_ENDPOINT}/csukuangfj/sherpa-onnx-streaming-zipformer-en-2023-06-26/resolve/main/encoder-epoch-99-avg-1-chunk-16-left-128.onnx", - "decoder.onnx": f"{HF_ENDPOINT}/csukuangfj/sherpa-onnx-streaming-zipformer-en-2023-06-26/resolve/main/decoder-epoch-99-avg-1-chunk-16-left-128.onnx", - "joiner.onnx": f"{HF_ENDPOINT}/csukuangfj/sherpa-onnx-streaming-zipformer-en-2023-06-26/resolve/main/joiner-epoch-99-avg-1-chunk-16-left-128.onnx", - "tokens.txt": f"{HF_ENDPOINT}/csukuangfj/sherpa-onnx-streaming-zipformer-en-2023-06-26/resolve/main/tokens.txt", - } - - if not all( - os.path.exists(os.path.join(download_path, n)) - for n in self.model_files.keys() - ): - self.downloader = ModelDownloader( - model_name="sherpa-onnx", - download_path=download_path, - file_names=self.model_files.keys(), - download_func=self.__download_models, - complete_func=self.__build_recognizer, - ) - self.downloader.ensure_model_files() - - self.__build_recognizer() - - def __download_models(self, path: str) -> None: - try: - file_name = os.path.basename(path) - ModelDownloader.download_from_url(self.model_files[file_name], path) - except Exception as e: - logger.error(f"Failed to download {path}: {e}") - def __build_recognizer(self) -> None: try: if self.config.audio_transcription.model_size == "large": - self.online = OnlineASRProcessor( - asr=self.asr, + # Whisper models need to be per-process and can only run one stream at a time + # TODO: try parallel: https://github.com/SYSTRAN/faster-whisper/issues/100 + logger.debug(f"Loading Whisper model for {self.camera_config.name}") + self.whisper_model = FasterWhisperASR( + modelsize="tiny", + device="cuda" + if self.config.audio_transcription.device == "GPU" + else "cpu", + lan=self.config.audio_transcription.language, + model_dir=os.path.join(MODEL_CACHE_DIR, "whisper"), + ) + self.whisper_model.use_vad() + self.stream = OnlineASRProcessor( + asr=self.whisper_model, ) else: - self.recognizer = sherpa_onnx.OnlineRecognizer.from_transducer( - tokens=os.path.join(MODEL_CACHE_DIR, "sherpa-onnx/tokens.txt"), - encoder=os.path.join(MODEL_CACHE_DIR, "sherpa-onnx/encoder.onnx"), - decoder=os.path.join(MODEL_CACHE_DIR, "sherpa-onnx/decoder.onnx"), - joiner=os.path.join(MODEL_CACHE_DIR, "sherpa-onnx/joiner.onnx"), - num_threads=2, - sample_rate=16000, - feature_dim=80, - enable_endpoint_detection=True, - rule1_min_trailing_silence=2.4, - rule2_min_trailing_silence=1.2, - rule3_min_utterance_length=300, - decoding_method="greedy_search", - provider="cpu", - ) - self.stream = self.recognizer.create_stream() - logger.debug("Audio transcription (live) initialized") + logger.debug(f"Loading sherpa stream for {self.camera_config.name}") + self.stream = self.model_runner.model.create_stream() + logger.debug( + f"Audio transcription (live) initialized for {self.camera_config.name}" + ) except Exception as e: logger.error( f"Failed to initialize live streaming audio transcription: {e}" ) - self.recognizer = None def __process_audio_stream( self, audio_data: np.ndarray ) -> Optional[tuple[str, bool]]: - if (not self.recognizer or not self.stream) and not self.online: - logger.debug( - "Audio transcription (streaming) recognizer or stream not initialized" - ) + if ( + self.model_runner.model is None + and self.config.audio_transcription.model_size == "small" + ): + logger.debug("Audio transcription (live) model not initialized") return None + if not self.stream: + self.__build_recognizer() + try: if audio_data.dtype != np.float32: audio_data = audio_data.astype(np.float32) @@ -135,10 +100,14 @@ class AudioTranscriptionRealTimeProcessor(RealTimeProcessorApi): if self.config.audio_transcription.model_size == "large": # large model - self.online.insert_audio_chunk(audio_data) - output = self.online.process_iter() + self.stream.insert_audio_chunk(audio_data) + output = self.stream.process_iter() text = output[2].strip() - is_endpoint = text.endswith((".", "!", "?")) + is_endpoint = ( + text.endswith((".", "!", "?")) + and sum(len(str(lines)) for lines in self.transcription_segments) + > 300 + ) if text: self.transcription_segments.append(text) @@ -150,11 +119,11 @@ class AudioTranscriptionRealTimeProcessor(RealTimeProcessorApi): # small model self.stream.accept_waveform(16000, audio_data) - while self.recognizer.is_ready(self.stream): - self.recognizer.decode_stream(self.stream) + while self.model_runner.model.is_ready(self.stream): + self.model_runner.model.decode_stream(self.stream) - text = self.recognizer.get_result(self.stream).strip() - is_endpoint = self.recognizer.is_endpoint(self.stream) + text = self.model_runner.model.get_result(self.stream).strip() + is_endpoint = self.model_runner.model.is_endpoint(self.stream) logger.debug(f"Transcription result: '{text}'") @@ -166,7 +135,7 @@ class AudioTranscriptionRealTimeProcessor(RealTimeProcessorApi): if is_endpoint and self.config.audio_transcription.model_size == "small": # reset sherpa if we've reached an endpoint - self.recognizer.reset(self.stream) + self.model_runner.model.reset(self.stream) return text, is_endpoint except Exception as e: @@ -190,10 +159,17 @@ class AudioTranscriptionRealTimeProcessor(RealTimeProcessorApi): logger.debug( f"Starting audio transcription thread for {self.camera_config.name}" ) + + # start with an empty transcription + self.requestor.send_data( + f"{self.camera_config.name}/audio/transcription", + "", + ) + while not self.stop_event.is_set(): try: # Get audio data from queue with a timeout to check stop_event - obj_data, audio = self.audio_queue.get(timeout=0.1) + _, audio = self.audio_queue.get(timeout=0.1) result = self.__process_audio_stream(audio) if not result: @@ -209,7 +185,7 @@ class AudioTranscriptionRealTimeProcessor(RealTimeProcessorApi): self.audio_queue.task_done() if is_endpoint: - self.reset(obj_data["camera"]) + self.reset() except queue.Empty: continue @@ -221,23 +197,7 @@ class AudioTranscriptionRealTimeProcessor(RealTimeProcessorApi): f"Stopping audio transcription thread for {self.camera_config.name}" ) - def reset(self, camera: str) -> None: - if self.config.audio_transcription.model_size == "large": - # get final output from whisper - output = self.online.finish() - self.transcription_segments = [] - - self.requestor.send_data( - f"{self.camera_config.name}/audio/transcription", - (output[2].strip() + " "), - ) - - # reset whisper - self.online.init() - else: - # reset sherpa - self.recognizer.reset(self.stream) - + def clear_audio_queue(self) -> None: # Clear the audio queue while not self.audio_queue.empty(): try: @@ -246,8 +206,54 @@ class AudioTranscriptionRealTimeProcessor(RealTimeProcessorApi): except queue.Empty: break + def reset(self) -> None: + if self.config.audio_transcription.model_size == "large": + # get final output from whisper + output = self.stream.finish() + self.transcription_segments = [] + + self.requestor.send_data( + f"{self.camera_config.name}/audio/transcription", + (output[2].strip() + " "), + ) + + # reset whisper + self.stream.init() + self.transcription_segments = [] + else: + # reset sherpa + self.model_runner.model.reset(self.stream) + logger.debug("Stream reset") + def check_unload_model(self) -> None: + # regularly called in the loop in audio maintainer + if ( + self.config.audio_transcription.model_size == "large" + and self.whisper_model is not None + ): + logger.debug(f"Unloading Whisper model for {self.camera_config.name}") + self.clear_audio_queue() + self.transcription_segments = [] + self.stream = None + self.whisper_model = None + + self.requestor.send_data( + f"{self.camera_config.name}/audio/transcription", + "", + ) + if ( + self.config.audio_transcription.model_size == "small" + and self.stream is not None + ): + logger.debug(f"Clearing sherpa stream for {self.camera_config.name}") + self.stream = None + + self.requestor.send_data( + f"{self.camera_config.name}/audio/transcription", + "", + ) + def stop(self) -> None: """Stop the transcription thread and clean up.""" self.stop_event.set() @@ -266,7 +272,6 @@ class AudioTranscriptionRealTimeProcessor(RealTimeProcessorApi): self, topic: str, request_data: dict[str, any] ) -> dict[str, any] | None: if topic == "clear_audio_recognizer": - self.recognizer = None self.stream = None self.__build_recognizer() return {"message": "Audio recognizer cleared and rebuilt", "success": True} diff --git a/frigate/data_processing/real_time/whisper_online.py b/frigate/data_processing/real_time/whisper_online.py index 96c1ce0cf..9b81d7fbe 100644 --- a/frigate/data_processing/real_time/whisper_online.py +++ b/frigate/data_processing/real_time/whisper_online.py @@ -139,8 +139,11 @@ class FasterWhisperASR(ASRBase): return model def transcribe(self, audio, init_prompt=""): + from faster_whisper import BatchedInferencePipeline + # tested: beam_size=5 is faster and better than 1 (on one 200 second document from En ESIC, min chunk 0.01) - segments, info = self.model.transcribe( + batched_model = BatchedInferencePipeline(model=self.model) + segments, info = batched_model.transcribe( audio, language=self.original_language, initial_prompt=init_prompt, diff --git a/frigate/data_processing/types.py b/frigate/data_processing/types.py index a19a856bf..5d083b32e 100644 --- a/frigate/data_processing/types.py +++ b/frigate/data_processing/types.py @@ -4,6 +4,10 @@ import multiprocessing as mp from enum import Enum from multiprocessing.sharedctypes import Synchronized +import sherpa_onnx + +from frigate.data_processing.real_time.whisper_online import FasterWhisperASR + class DataProcessorMetrics: image_embeddings_speed: Synchronized @@ -41,3 +45,6 @@ class PostProcessDataEnum(str, Enum): recording = "recording" review = "review" tracked_object = "tracked_object" + + +AudioTranscriptionModel = FasterWhisperASR | sherpa_onnx.OnlineRecognizer | None diff --git a/frigate/events/audio.py b/frigate/events/audio.py index dc6ee7128..aeeaf3b4f 100644 --- a/frigate/events/audio.py +++ b/frigate/events/audio.py @@ -30,6 +30,9 @@ from frigate.const import ( AUDIO_MIN_CONFIDENCE, AUDIO_SAMPLE_RATE, ) +from frigate.data_processing.common.audio_transcription.model import ( + AudioTranscriptionModelRunner, +) from frigate.data_processing.real_time.audio_transcription import ( AudioTranscriptionRealTimeProcessor, ) @@ -87,6 +90,10 @@ class AudioProcessor(util.Process): self.camera_metrics = camera_metrics self.cameras = cameras self.config = config + self.transcription_model_runner = AudioTranscriptionModelRunner( + self.config.audio_transcription.device, + self.config.audio_transcription.model_size, + ) def run(self) -> None: audio_threads: list[AudioEventMaintainer] = [] @@ -101,6 +108,7 @@ class AudioProcessor(util.Process): camera, self.config, self.camera_metrics, + self.transcription_model_runner, self.stop_event, ) audio_threads.append(audio_thread) @@ -130,6 +138,7 @@ class AudioEventMaintainer(threading.Thread): camera: CameraConfig, config: FrigateConfig, camera_metrics: dict[str, CameraMetrics], + audio_transcription_model_runner: AudioTranscriptionModelRunner, stop_event: threading.Event, ) -> None: super().__init__(name=f"{camera.name}_audio_event_processor") @@ -146,6 +155,7 @@ class AudioEventMaintainer(threading.Thread): self.ffmpeg_cmd = get_ffmpeg_command(self.camera_config.ffmpeg) self.logpipe = LogPipe(f"ffmpeg.{self.camera_config.name}.audio") self.audio_listener = None + self.audio_transcription_model_runner = audio_transcription_model_runner self.transcription_processor = None self.transcription_thread = None @@ -168,6 +178,7 @@ class AudioEventMaintainer(threading.Thread): config=self.config, camera_config=self.camera_config, requestor=self.requestor, + model_runner=self.audio_transcription_model_runner, metrics=self.camera_metrics[self.camera_config.name], stop_event=self.stop_event, ) @@ -223,18 +234,18 @@ class AudioEventMaintainer(threading.Thread): ) # run audio transcription - if self.transcription_processor is not None and ( - self.camera_config.audio_transcription.live_enabled - ): - self.transcribing = True - # process audio until we've reached the endpoint - self.transcription_processor.process_audio( - { - "id": f"{self.camera_config.name}_audio", - "camera": self.camera_config.name, - }, - audio, - ) + if self.transcription_processor is not None: + if self.camera_config.audio_transcription.live_enabled: + # process audio until we've reached the endpoint + self.transcription_processor.process_audio( + { + "id": f"{self.camera_config.name}_audio", + "camera": self.camera_config.name, + }, + audio, + ) + else: + self.transcription_processor.check_unload_model() self.expire_detections() @@ -309,13 +320,6 @@ class AudioEventMaintainer(threading.Thread): ) self.detections[detection["label"]] = None - # clear real-time transcription - if self.transcription_processor is not None: - self.transcription_processor.reset(self.camera_config.name) - self.requestor.send_data( - f"{self.camera_config.name}/audio/transcription", "" - ) - def expire_all_detections(self) -> None: """Immediately end all current detections""" now = datetime.datetime.now().timestamp() From 1c75ff59f1730cfe4ad9499467019eed8ee87d55 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Wed, 4 Jun 2025 17:09:55 -0600 Subject: [PATCH 016/530] Classification Model UI (#18571) * Setup basic training structure * Build out route * Handle model configs * Add image fetch APIs * Implement model training screen with dataset selection * Implement viewing of training images * Adjust directories * Implement viewing of images * Add support for deleting images * Implement full deletion * Implement classification model training * Improve naming * More renaming * Improve layout * Reduce logging * Cleanup --- frigate/api/classification.py | 173 ++++- .../real_time/custom_classification.py | 2 +- frigate/util/classification.py | 10 +- .../locales/en/views/classificationModel.json | 49 ++ web/src/App.tsx | 2 + .../overlay/ClassificationSelectionDialog.tsx | 155 ++++ web/src/hooks/use-navigation.ts | 11 +- web/src/pages/ClassificationModel.tsx | 18 + web/src/types/frigateConfig.ts | 20 + .../classification/ModelSelectionView.tsx | 63 ++ .../classification/ModelTrainingView.tsx | 661 ++++++++++++++++++ 11 files changed, 1156 insertions(+), 8 deletions(-) create mode 100644 web/public/locales/en/views/classificationModel.json create mode 100644 web/src/components/overlay/ClassificationSelectionDialog.tsx create mode 100644 web/src/pages/ClassificationModel.tsx create mode 100644 web/src/views/classification/ModelSelectionView.tsx create mode 100644 web/src/views/classification/ModelTrainingView.tsx diff --git a/frigate/api/classification.py b/frigate/api/classification.py index 98b716c67..da5d11d88 100644 --- a/frigate/api/classification.py +++ b/frigate/api/classification.py @@ -21,7 +21,7 @@ from frigate.api.defs.request.classification_body import ( from frigate.api.defs.tags import Tags from frigate.config import FrigateConfig from frigate.config.camera import DetectConfig -from frigate.const import FACE_DIR, MODEL_CACHE_DIR +from frigate.const import CLIPS_DIR, FACE_DIR from frigate.embeddings import EmbeddingsContext from frigate.models import Event from frigate.util.classification import train_classification_model @@ -449,6 +449,50 @@ def transcribe_audio(request: Request, body: AudioTranscriptionBody): # custom classification training +@router.get("/classification/{name}/dataset") +def get_classification_dataset(name: str): + dataset_dict: dict[str, list[str]] = {} + + dataset_dir = os.path.join(CLIPS_DIR, sanitize_filename(name), "dataset") + + if not os.path.exists(dataset_dir): + return JSONResponse(status_code=200, content={}) + + for name in os.listdir(dataset_dir): + category_dir = os.path.join(dataset_dir, name) + + if not os.path.isdir(category_dir): + continue + + dataset_dict[name] = [] + + for file in filter( + lambda f: (f.lower().endswith((".webp", ".png", ".jpg", ".jpeg"))), + os.listdir(category_dir), + ): + dataset_dict[name].append(file) + + return JSONResponse(status_code=200, content=dataset_dict) + + +@router.get("/classification/{name}/train") +def get_classification_images(name: str): + train_dir = os.path.join(CLIPS_DIR, sanitize_filename(name), "train") + + if not os.path.exists(train_dir): + return JSONResponse(status_code=200, content=[]) + + return JSONResponse( + status_code=200, + content=list( + filter( + lambda f: (f.lower().endswith((".webp", ".png", ".jpg", ".jpeg"))), + os.listdir(train_dir), + ) + ), + ) + + @router.post("/classification/{name}/train") async def train_configured_model( request: Request, name: str, background_tasks: BackgroundTasks @@ -466,10 +510,131 @@ async def train_configured_model( status_code=404, ) - background_tasks.add_task( - train_classification_model, os.path.join(MODEL_CACHE_DIR, name) - ) + background_tasks.add_task(train_classification_model, name) return JSONResponse( content={"success": True, "message": "Started classification model training."}, status_code=200, ) + + +@router.post( + "/classification/{name}/dataset/{category}/delete", + dependencies=[Depends(require_role(["admin"]))], +) +def delete_classification_dataset_images( + request: Request, name: str, category: str, body: dict = None +): + config: FrigateConfig = request.app.frigate_config + + if name not in config.classification.custom: + return JSONResponse( + content=( + { + "success": False, + "message": f"{name} is not a known classification model.", + } + ), + status_code=404, + ) + + json: dict[str, Any] = body or {} + list_of_ids = json.get("ids", "") + folder = os.path.join( + CLIPS_DIR, sanitize_filename(name), "dataset", sanitize_filename(category) + ) + + for id in list_of_ids: + file_path = os.path.join(folder, id) + + if os.path.isfile(file_path): + os.unlink(file_path) + + return JSONResponse( + content=({"success": True, "message": "Successfully deleted faces."}), + status_code=200, + ) + + +@router.post( + "/classification/{name}/dataset/categorize", + dependencies=[Depends(require_role(["admin"]))], +) +def categorize_classification_image(request: Request, name: str, body: dict = None): + config: FrigateConfig = request.app.frigate_config + + if name not in config.classification.custom: + return JSONResponse( + content=( + { + "success": False, + "message": f"{name} is not a known classification model.", + } + ), + status_code=404, + ) + + json: dict[str, Any] = body or {} + category = sanitize_filename(json.get("category", "")) + training_file_name = sanitize_filename(json.get("training_file", "")) + training_file = os.path.join(CLIPS_DIR, name, "train", training_file_name) + + if training_file_name and not os.path.isfile(training_file): + return JSONResponse( + content=( + { + "success": False, + "message": f"Invalid filename or no file exists: {training_file_name}", + } + ), + status_code=404, + ) + + new_name = f"{category}-{datetime.datetime.now().timestamp()}.png" + new_file_folder = os.path.join(CLIPS_DIR, name, "dataset", category) + + if not os.path.exists(new_file_folder): + os.mkdir(new_file_folder) + + # use opencv because webp images can not be used to train + img = cv2.imread(training_file) + cv2.imwrite(os.path.join(new_file_folder, new_name), img) + os.unlink(training_file) + + return JSONResponse( + content=({"success": True, "message": "Successfully deleted faces."}), + status_code=200, + ) + + +@router.post( + "/classification/{name}/train/delete", + dependencies=[Depends(require_role(["admin"]))], +) +def delete_classification_train_images(request: Request, name: str, body: dict = None): + config: FrigateConfig = request.app.frigate_config + + if name not in config.classification.custom: + return JSONResponse( + content=( + { + "success": False, + "message": f"{name} is not a known classification model.", + } + ), + status_code=404, + ) + + json: dict[str, Any] = body or {} + list_of_ids = json.get("ids", "") + folder = os.path.join(CLIPS_DIR, sanitize_filename(name), "train") + + for id in list_of_ids: + file_path = os.path.join(folder, id) + + if os.path.isfile(file_path): + os.unlink(file_path) + + return JSONResponse( + content=({"success": True, "message": "Successfully deleted faces."}), + status_code=200, + ) diff --git a/frigate/data_processing/real_time/custom_classification.py b/frigate/data_processing/real_time/custom_classification.py index f94c2b28c..0e254ab0d 100644 --- a/frigate/data_processing/real_time/custom_classification.py +++ b/frigate/data_processing/real_time/custom_classification.py @@ -42,7 +42,7 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi): self.model_config = model_config self.requestor = requestor self.model_dir = os.path.join(MODEL_CACHE_DIR, self.model_config.name) - self.train_dir = os.path.join(CLIPS_DIR, self.model_config.name) + self.train_dir = os.path.join(CLIPS_DIR, self.model_config.name, "train") self.interpreter: Interpreter = None self.tensor_input_details: dict[str, Any] = None self.tensor_output_details: dict[str, Any] = None diff --git a/frigate/util/classification.py b/frigate/util/classification.py index 4ee5e1d54..a8624870b 100644 --- a/frigate/util/classification.py +++ b/frigate/util/classification.py @@ -1,5 +1,6 @@ """Util for classification models.""" +import logging import os import cv2 @@ -9,6 +10,8 @@ from tensorflow.keras import layers, models, optimizers from tensorflow.keras.applications import MobileNetV2 from tensorflow.keras.preprocessing.image import ImageDataGenerator +from frigate.const import CLIPS_DIR, MODEL_CACHE_DIR + BATCH_SIZE = 16 EPOCHS = 50 LEARNING_RATE = 0.001 @@ -35,9 +38,10 @@ def generate_representative_dataset_factory(dataset_dir: str): @staticmethod -def train_classification_model(model_dir: str) -> bool: +def train_classification_model(model_name: str) -> bool: """Train a classification model.""" - dataset_dir = os.path.join(model_dir, "dataset") + dataset_dir = os.path.join(CLIPS_DIR, model_name, "dataset") + model_dir = os.path.join(MODEL_CACHE_DIR, model_name) num_classes = len( [ d @@ -46,6 +50,8 @@ def train_classification_model(model_dir: str) -> bool: ] ) + tf.get_logger().setLevel(logging.ERROR) + # Start with imagenet base model with 35% of channels in each layer base_model = MobileNetV2( input_shape=(224, 224, 3), diff --git a/web/public/locales/en/views/classificationModel.json b/web/public/locales/en/views/classificationModel.json new file mode 100644 index 000000000..eb09ecaa0 --- /dev/null +++ b/web/public/locales/en/views/classificationModel.json @@ -0,0 +1,49 @@ +{ + "button": { + "deleteClassificationAttempts": "Delete Classification Images", + "renameCategory": "Rename Class", + "deleteCategory": "Delete Class", + "deleteImages": "Delete Images" + }, + "toast": { + "success": { + "deletedCategory": "Deleted Class", + "deletedImage": "Deleted Images", + "categorizedImage": "Successfully Classified Image" + }, + "error": { + "deleteImageFailed": "Failed to delete: {{errorMessage}}", + "deleteCategoryFailed": "Failed to delete class: {{errorMessage}}", + "categorizeFailed": "Failed to categorize image: {{errorMessage}}" + } + }, + "deleteCategory": { + "title": "Delete Class", + "desc": "Are you sure you want to delete the class {{name}}? This will permanently delete all associated images and require re-training the model." + }, + "deleteDatasetImages": { + "title": "Delete Dataset Images", + "desc": "Are you sure you want to delete {{count}} images from {{dataset}}? This action cannot be undone and will require re-training the model." + }, + "deleteTrainImages": { + "title": "Delete Train Images", + "desc": "Are you sure you want to delete {{count}} images? This action cannot be undone." + }, + "renameCategory": { + "title": "Rename Class", + "desc": "Enter a new name for {{name}}. You will be required to retrain the model for the name change to take affect." + }, + "description": { + "invalidName": "Invalid name. Names can only include letters, numbers, spaces, apostrophes, underscores, and hyphens." + }, + "train": { + "title": "Train", + "aria": "Select Train" + }, + "categories": "Classes", + "createCategory": { + "new": "Create New Class" + }, + "categorizeImageAs": "Classify Image As:", + "categorizeImage": "Classify Image" +} diff --git a/web/src/App.tsx b/web/src/App.tsx index d3edbc3a2..cd7906e97 100644 --- a/web/src/App.tsx +++ b/web/src/App.tsx @@ -24,6 +24,7 @@ const System = lazy(() => import("@/pages/System")); const Settings = lazy(() => import("@/pages/Settings")); const UIPlayground = lazy(() => import("@/pages/UIPlayground")); const FaceLibrary = lazy(() => import("@/pages/FaceLibrary")); +const Classification = lazy(() => import("@/pages/ClassificationModel")); const Logs = lazy(() => import("@/pages/Logs")); const AccessDenied = lazy(() => import("@/pages/AccessDenied")); @@ -76,6 +77,7 @@ function DefaultAppView() { } /> } /> } /> + } /> } /> } /> diff --git a/web/src/components/overlay/ClassificationSelectionDialog.tsx b/web/src/components/overlay/ClassificationSelectionDialog.tsx new file mode 100644 index 000000000..7cb8ca156 --- /dev/null +++ b/web/src/components/overlay/ClassificationSelectionDialog.tsx @@ -0,0 +1,155 @@ +import { + Drawer, + DrawerClose, + DrawerContent, + DrawerDescription, + DrawerHeader, + DrawerTitle, + DrawerTrigger, +} from "@/components/ui/drawer"; +import { + DropdownMenu, + DropdownMenuContent, + DropdownMenuItem, + DropdownMenuLabel, + DropdownMenuTrigger, +} from "@/components/ui/dropdown-menu"; +import { + Tooltip, + TooltipContent, + TooltipTrigger, +} from "@/components/ui/tooltip"; +import { isDesktop, isMobile } from "react-device-detect"; +import { LuPlus } from "react-icons/lu"; +import { useTranslation } from "react-i18next"; +import { cn } from "@/lib/utils"; +import React, { ReactNode, useCallback, useMemo, useState } from "react"; +import TextEntryDialog from "./dialog/TextEntryDialog"; +import { Button } from "../ui/button"; +import { MdCategory } from "react-icons/md"; +import axios from "axios"; +import { toast } from "sonner"; + +type ClassificationSelectionDialogProps = { + className?: string; + classes: string[]; + modelName: string; + image: string; + onRefresh: () => void; + children: ReactNode; +}; +export default function ClassificationSelectionDialog({ + className, + classes, + modelName, + image, + onRefresh, + children, +}: ClassificationSelectionDialogProps) { + const { t } = useTranslation(["views/classificationModel"]); + + const onCategorizeImage = useCallback( + (category: string) => { + axios + .post(`/classification/${modelName}/dataset/categorize`, { + category, + training_file: image, + }) + .then((resp) => { + if (resp.status == 200) { + toast.success(t("toast.success.categorizedImage"), { + position: "top-center", + }); + onRefresh(); + } + }) + .catch((error) => { + const errorMessage = + error.response?.data?.message || + error.response?.data?.detail || + "Unknown error"; + toast.error(t("toast.error.categorizeFailed", { errorMessage }), { + position: "top-center", + }); + }); + }, + [modelName, image, onRefresh, t], + ); + + const isChildButton = useMemo( + () => React.isValidElement(children) && children.type === Button, + [children], + ); + + // control + const [newFace, setNewFace] = useState(false); + + // components + const Selector = isDesktop ? DropdownMenu : Drawer; + const SelectorTrigger = isDesktop ? DropdownMenuTrigger : DrawerTrigger; + const SelectorContent = isDesktop ? DropdownMenuContent : DrawerContent; + const SelectorItem = isDesktop + ? DropdownMenuItem + : (props: React.HTMLAttributes) => ( + +
+ + ); + + return ( +
+ {newFace && ( + onCategorizeImage(newCat)} + /> + )} + + + + + {children} + + + {isMobile && ( + + Details + Details + + )} + {t("categorizeImageAs")} +
+ setNewFace(true)} + > + + {t("createCategory.new")} + + {classes.sort().map((category) => ( + onCategorizeImage(category)} + > + + {category} + + ))} +
+
+
+ {t("categorizeImage")} +
+
+ ); +} diff --git a/web/src/hooks/use-navigation.ts b/web/src/hooks/use-navigation.ts index 41ec7227f..d9bd6f6a4 100644 --- a/web/src/hooks/use-navigation.ts +++ b/web/src/hooks/use-navigation.ts @@ -6,7 +6,7 @@ import { isDesktop } from "react-device-detect"; import { FaCompactDisc, FaVideo } from "react-icons/fa"; import { IoSearch } from "react-icons/io5"; import { LuConstruction } from "react-icons/lu"; -import { MdVideoLibrary } from "react-icons/md"; +import { MdCategory, MdVideoLibrary } from "react-icons/md"; import { TbFaceId } from "react-icons/tb"; import useSWR from "swr"; @@ -16,6 +16,7 @@ export const ID_EXPLORE = 3; export const ID_EXPORT = 4; export const ID_PLAYGROUND = 5; export const ID_FACE_LIBRARY = 6; +export const ID_CLASSIFICATION = 7; export default function useNavigation( variant: "primary" | "secondary" = "primary", @@ -71,6 +72,14 @@ export default function useNavigation( url: "/faces", enabled: isDesktop && config?.face_recognition.enabled, }, + { + id: ID_CLASSIFICATION, + variant, + icon: MdCategory, + title: "menu.classification", + url: "/classification", + enabled: isDesktop, + }, ] as NavData[], [config?.face_recognition?.enabled, variant], ); diff --git a/web/src/pages/ClassificationModel.tsx b/web/src/pages/ClassificationModel.tsx new file mode 100644 index 000000000..c37d0b454 --- /dev/null +++ b/web/src/pages/ClassificationModel.tsx @@ -0,0 +1,18 @@ +import { useOverlayState } from "@/hooks/use-overlay-state"; +import { CustomClassificationModelConfig } from "@/types/frigateConfig"; +import ModelSelectionView from "@/views/classification/ModelSelectionView"; +import ModelTrainingView from "@/views/classification/ModelTrainingView"; + +export default function ClassificationModelPage() { + // training + + const [model, setModel] = useOverlayState( + "classificationModel", + ); + + if (model == undefined) { + return ; + } + + return ; +} diff --git a/web/src/types/frigateConfig.ts b/web/src/types/frigateConfig.ts index cf2bf1476..3ccc5b06d 100644 --- a/web/src/types/frigateConfig.ts +++ b/web/src/types/frigateConfig.ts @@ -279,6 +279,23 @@ export type CameraStreamingSettings = { volume: number; }; +export type CustomClassificationModelConfig = { + enabled: boolean; + name: string; + object_config: null | { + objects: string[]; + }; + state_config: null | { + cameras: { + [cameraName: string]: { + crop: [number, number, number, number]; + threshold: number; + }; + }; + motion: boolean; + }; +}; + export type GroupStreamingSettings = { [cameraName: string]: CameraStreamingSettings; }; @@ -316,6 +333,9 @@ export interface FrigateConfig { enabled: boolean; threshold: number; }; + custom: { + [modelKey: string]: CustomClassificationModelConfig; + }; }; database: { diff --git a/web/src/views/classification/ModelSelectionView.tsx b/web/src/views/classification/ModelSelectionView.tsx new file mode 100644 index 000000000..63133842a --- /dev/null +++ b/web/src/views/classification/ModelSelectionView.tsx @@ -0,0 +1,63 @@ +import ActivityIndicator from "@/components/indicators/activity-indicator"; +import { cn } from "@/lib/utils"; +import { + CustomClassificationModelConfig, + FrigateConfig, +} from "@/types/frigateConfig"; +import { useMemo } from "react"; +import { isMobile } from "react-device-detect"; +import useSWR from "swr"; + +type ModelSelectionViewProps = { + onClick: (model: CustomClassificationModelConfig) => void; +}; +export default function ModelSelectionView({ + onClick, +}: ModelSelectionViewProps) { + const { data: config } = useSWR("config", { + revalidateOnFocus: false, + }); + + const classificationConfigs = useMemo(() => { + if (!config) { + return []; + } + + return Object.values(config.classification.custom); + }, [config]); + + if (!config) { + return ; + } + + if (classificationConfigs.length == 0) { + return
You need to setup a custom model configuration.
; + } + + return ( +
+ {classificationConfigs.map((config) => ( +
onClick(config)} + onContextMenu={() => { + // e.stopPropagation(); + // e.preventDefault(); + // handleClickEvent(true); + }} + > +
+
+ {config.name} ({config.state_config != null ? "State" : "Object"}{" "} + Classification) +
+
+ ))} +
+ ); +} diff --git a/web/src/views/classification/ModelTrainingView.tsx b/web/src/views/classification/ModelTrainingView.tsx new file mode 100644 index 000000000..53ef7fa66 --- /dev/null +++ b/web/src/views/classification/ModelTrainingView.tsx @@ -0,0 +1,661 @@ +import { baseUrl } from "@/api/baseUrl"; +import TextEntryDialog from "@/components/overlay/dialog/TextEntryDialog"; +import { Button, buttonVariants } from "@/components/ui/button"; +import { + AlertDialog, + AlertDialogAction, + AlertDialogCancel, + AlertDialogContent, + AlertDialogDescription, + AlertDialogFooter, + AlertDialogHeader, + AlertDialogTitle, +} from "@/components/ui/alert-dialog"; +import { + Dialog, + DialogContent, + DialogDescription, + DialogHeader, + DialogTitle, +} from "@/components/ui/dialog"; +import { + DropdownMenu, + DropdownMenuContent, + DropdownMenuItem, + DropdownMenuSeparator, + DropdownMenuTrigger, +} from "@/components/ui/dropdown-menu"; +import { Toaster } from "@/components/ui/sonner"; +import { + Tooltip, + TooltipContent, + TooltipTrigger, +} from "@/components/ui/tooltip"; +import useKeyboardListener from "@/hooks/use-keyboard-listener"; +import useOptimisticState from "@/hooks/use-optimistic-state"; +import { cn } from "@/lib/utils"; +import { CustomClassificationModelConfig } from "@/types/frigateConfig"; +import { TooltipPortal } from "@radix-ui/react-tooltip"; +import axios from "axios"; +import { useCallback, useEffect, useMemo, useState } from "react"; +import { isDesktop, isMobile } from "react-device-detect"; +import { Trans, useTranslation } from "react-i18next"; +import { LuPencil, LuTrash2 } from "react-icons/lu"; +import { toast } from "sonner"; +import useSWR from "swr"; +import ClassificationSelectionDialog from "@/components/overlay/ClassificationSelectionDialog"; +import { TbCategoryPlus } from "react-icons/tb"; + +type ModelTrainingViewProps = { + model: CustomClassificationModelConfig; +}; +export default function ModelTrainingView({ model }: ModelTrainingViewProps) { + const { t } = useTranslation(["views/classificationModel"]); + const [page, setPage] = useState("train"); + const [pageToggle, setPageToggle] = useOptimisticState(page, setPage, 100); + + // dataset + + const { data: trainImages, mutate: refreshTrain } = useSWR( + `classification/${model.name}/train`, + ); + const { data: dataset, mutate: refreshDataset } = useSWR<{ + [id: string]: string[]; + }>(`classification/${model.name}/dataset`); + + // image multiselect + + const [selectedImages, setSelectedImages] = useState([]); + + const onClickImages = useCallback( + (images: string[], ctrl: boolean) => { + if (selectedImages.length == 0 && !ctrl) { + return; + } + + let newSelectedImages = [...selectedImages]; + + images.forEach((imageId) => { + const index = newSelectedImages.indexOf(imageId); + + if (index != -1) { + if (selectedImages.length == 1) { + newSelectedImages = []; + } else { + const copy = [ + ...newSelectedImages.slice(0, index), + ...newSelectedImages.slice(index + 1), + ]; + newSelectedImages = copy; + } + } else { + newSelectedImages.push(imageId); + } + }); + + setSelectedImages(newSelectedImages); + }, + [selectedImages, setSelectedImages], + ); + + // actions + + const trainModel = useCallback(() => { + axios.post(`classification/${model.name}/train`); + }, [model]); + + const [deleteDialogOpen, setDeleteDialogOpen] = useState( + null, + ); + + const onDelete = useCallback( + (ids: string[], isName: boolean = false) => { + const api = + pageToggle == "train" + ? `/classification/${model.name}/train/delete` + : `/classification/${model.name}/dataset/${pageToggle}/delete`; + + axios + .post(api, { ids }) + .then((resp) => { + setSelectedImages([]); + + if (resp.status == 200) { + if (isName) { + toast.success( + t("toast.success.deletedCategory", { count: ids.length }), + { + position: "top-center", + }, + ); + } else { + toast.success( + t("toast.success.deletedImage", { count: ids.length }), + { + position: "top-center", + }, + ); + } + + if (pageToggle == "train") { + refreshTrain(); + } else { + refreshDataset(); + } + } + }) + .catch((error) => { + const errorMessage = + error.response?.data?.message || + error.response?.data?.detail || + "Unknown error"; + if (isName) { + toast.error( + t("toast.error.deleteCategoryFailed", { errorMessage }), + { + position: "top-center", + }, + ); + } else { + toast.error(t("toast.error.deleteImageFailed", { errorMessage }), { + position: "top-center", + }); + } + }); + }, + [pageToggle, model, refreshTrain, refreshDataset, t], + ); + + // keyboard + + useKeyboardListener(["a", "Escape"], (key, modifiers) => { + if (modifiers.repeat || !modifiers.down) { + return; + } + + switch (key) { + case "a": + if (modifiers.ctrl) { + if (selectedImages.length) { + setSelectedImages([]); + } else { + setSelectedImages([ + ...(pageToggle === "train" + ? trainImages || [] + : dataset?.[pageToggle] || []), + ]); + } + } + break; + case "Escape": + setSelectedImages([]); + break; + } + }); + + useEffect(() => { + setSelectedImages([]); + }, [pageToggle]); + + return ( +
+ + + setDeleteDialogOpen(null)} + > + + + + {t( + pageToggle == "train" + ? "deleteTrainImages.title" + : "deleteDatasetImages.title", + )} + + + + + {pageToggle == "train" + ? "deleteTrainImages.desc" + : "deleteDatasetImages.desc"} + + + + + {t("button.cancel", { ns: "common" })} + + { + if (deleteDialogOpen) { + onDelete(deleteDialogOpen); + setDeleteDialogOpen(null); + } + }} + > + {t("button.delete", { ns: "common" })} + + + + + +
+ {}} + /> + {selectedImages?.length > 0 ? ( +
+
+
{`${selectedImages.length} selected`}
+
{"|"}
+
setSelectedImages([])} + > + {t("button.unselect", { ns: "common" })} +
+
+ +
+ ) : ( + + )} +
+ {pageToggle == "train" ? ( + + ) : ( + + )} +
+ ); +} + +type LibrarySelectorProps = { + pageToggle: string | undefined; + dataset: { [id: string]: string[] }; + trainImages: string[]; + setPageToggle: (toggle: string) => void; + onDelete: (ids: string[], isName: boolean) => void; + onRename: (old_name: string, new_name: string) => void; +}; +function LibrarySelector({ + pageToggle, + dataset, + trainImages, + setPageToggle, + onDelete, + onRename, +}: LibrarySelectorProps) { + const { t } = useTranslation(["views/classificationModel"]); + const [confirmDelete, setConfirmDelete] = useState(null); + const [renameFace, setRenameFace] = useState(null); + + const handleDeleteFace = useCallback( + (name: string) => { + // Get all image IDs for this face + const imageIds = dataset?.[name] || []; + + onDelete(imageIds, true); + setPageToggle("train"); + }, + [dataset, onDelete, setPageToggle], + ); + + const handleSetOpen = useCallback( + (open: boolean) => { + setRenameFace(open ? renameFace : null); + }, + [renameFace], + ); + + return ( + <> + !open && setConfirmDelete(null)} + > + + + {t("deleteCategory.title")} + + {t("deleteCategory.desc", { name: confirmDelete })} + + +
+ + +
+
+
+ + { + onRename(renameFace!, newName); + setRenameFace(null); + }} + defaultValue={renameFace || ""} + regexPattern={/^[\p{L}\p{N}\s'_-]{1,50}$/u} + regexErrorMessage={t("description.invalidName")} + /> + + + + + + + setPageToggle("train")} + > +
{t("train.title")}
+
+ ({trainImages.length}) +
+
+ {trainImages.length > 0 && Object.keys(dataset).length > 0 && ( + <> + +
+ {t("categories")} +
+ + )} + {Object.keys(dataset).map((id) => ( + +
setPageToggle(id)} + > + {id} + + ({dataset?.[id].length}) + +
+
+ + + + + + + {t("button.renameCategory")} + + + + + + + + + + {t("button.deleteCategory")} + + + +
+
+ ))} +
+
+ + ); +} + +type DatasetGridProps = { + modelName: string; + categoryName: string; + images: string[]; + selectedImages: string[]; + onClickImages: (images: string[], ctrl: boolean) => void; + onDelete: (ids: string[]) => void; +}; +function DatasetGrid({ + modelName, + categoryName, + images, + selectedImages, + onClickImages, + onDelete, +}: DatasetGridProps) { + const { t } = useTranslation(["views/classificationModel"]); + + return ( +
+ {images.map((image) => ( +
{ + e.stopPropagation(); + + if (e.ctrlKey || e.metaKey) { + onClickImages([image], true); + } + }} + > +
+ +
+
+
+
+ + + { + e.stopPropagation(); + onDelete([image]); + }} + /> + + + {t("button.deleteClassificationAttempts")} + + +
+
+
+
+ ))} +
+ ); +} + +type TrainGridProps = { + model: CustomClassificationModelConfig; + classes: string[]; + trainImages: string[]; + selectedImages: string[]; + onClickImages: (images: string[], ctrl: boolean) => void; + onRefresh: () => void; + onDelete: (ids: string[]) => void; +}; +function TrainGrid({ + model, + classes, + trainImages, + selectedImages, + onClickImages, + onRefresh, + onDelete, +}: TrainGridProps) { + const { t } = useTranslation(["views/classificationModel"]); + + const trainData = useMemo( + () => + trainImages + .map((raw) => { + const parts = raw.replaceAll(".webp", "").split("-"); + return { + raw, + timestamp: parts[0], + label: parts[1], + score: Number.parseFloat(parts[2]) * 100, + }; + }) + .sort((a, b) => b.timestamp.localeCompare(a.timestamp)), + [trainImages], + ); + + return ( +
+ {trainData?.map((data) => ( +
{ + e.stopPropagation(); + onClickImages([data.raw], e.ctrlKey || e.metaKey); + }} + > +
+ +
+
+
+
+
{data.label}
+
{data.score}%
+
+
+ + + + + + { + e.stopPropagation(); + onDelete([data.raw]); + }} + /> + + + {t("button.deleteClassificationAttempts")} + + +
+
+
+
+ ))} +
+ ); +} From 765a28d8121b292fab94f55319d53d9e16005470 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Thu, 5 Jun 2025 09:13:12 -0600 Subject: [PATCH 017/530] Live classification model training (#18583) * Implement model training via ZMQ and add model states to represent training * Get model updates working * Improve toasts and model state * Clean up logging * Add back in --- frigate/api/classification.py | 10 ++- frigate/comms/embeddings_updater.py | 16 +++-- frigate/config/logger.py | 2 + .../real_time/custom_classification.py | 68 ++++++++++++++++++- frigate/embeddings/__init__.py | 5 ++ frigate/types.py | 2 + frigate/util/classification.py | 14 +++- .../locales/en/views/classificationModel.json | 7 +- web/src/types/ws.ts | 4 +- .../classification/ModelTrainingView.tsx | 62 ++++++++++++++++- 10 files changed, 168 insertions(+), 22 deletions(-) diff --git a/frigate/api/classification.py b/frigate/api/classification.py index da5d11d88..f234e5cae 100644 --- a/frigate/api/classification.py +++ b/frigate/api/classification.py @@ -7,7 +7,7 @@ import shutil from typing import Any import cv2 -from fastapi import APIRouter, BackgroundTasks, Depends, Request, UploadFile +from fastapi import APIRouter, Depends, Request, UploadFile from fastapi.responses import JSONResponse from pathvalidate import sanitize_filename from peewee import DoesNotExist @@ -24,7 +24,6 @@ from frigate.config.camera import DetectConfig from frigate.const import CLIPS_DIR, FACE_DIR from frigate.embeddings import EmbeddingsContext from frigate.models import Event -from frigate.util.classification import train_classification_model from frigate.util.path import get_event_snapshot logger = logging.getLogger(__name__) @@ -494,9 +493,7 @@ def get_classification_images(name: str): @router.post("/classification/{name}/train") -async def train_configured_model( - request: Request, name: str, background_tasks: BackgroundTasks -): +async def train_configured_model(request: Request, name: str): config: FrigateConfig = request.app.frigate_config if name not in config.classification.custom: @@ -510,7 +507,8 @@ async def train_configured_model( status_code=404, ) - background_tasks.add_task(train_classification_model, name) + context: EmbeddingsContext = request.app.embeddings + context.start_classification_training(name) return JSONResponse( content={"success": True, "message": "Started classification model training."}, status_code=200, diff --git a/frigate/comms/embeddings_updater.py b/frigate/comms/embeddings_updater.py index 00bc88b3d..5edb9e77d 100644 --- a/frigate/comms/embeddings_updater.py +++ b/frigate/comms/embeddings_updater.py @@ -9,16 +9,22 @@ SOCKET_REP_REQ = "ipc:///tmp/cache/embeddings" class EmbeddingsRequestEnum(Enum): + # audio + transcribe_audio = "transcribe_audio" + # custom classification + train_classification = "train_classification" + # face clear_face_classifier = "clear_face_classifier" - embed_description = "embed_description" - embed_thumbnail = "embed_thumbnail" - generate_search = "generate_search" recognize_face = "recognize_face" register_face = "register_face" reprocess_face = "reprocess_face" - reprocess_plate = "reprocess_plate" + # semantic search + embed_description = "embed_description" + embed_thumbnail = "embed_thumbnail" + generate_search = "generate_search" reindex = "reindex" - transcribe_audio = "transcribe_audio" + # LPR + reprocess_plate = "reprocess_plate" class EmbeddingsResponder: diff --git a/frigate/config/logger.py b/frigate/config/logger.py index e6e1c06d3..a3eed23d0 100644 --- a/frigate/config/logger.py +++ b/frigate/config/logger.py @@ -29,7 +29,9 @@ class LoggerConfig(FrigateBaseModel): logging.getLogger().setLevel(self.default.value.upper()) log_levels = { + "absl": LogLevel.error, "httpx": LogLevel.error, + "tensorflow": LogLevel.error, "werkzeug": LogLevel.error, "ws4py": LogLevel.error, **self.logs, diff --git a/frigate/data_processing/real_time/custom_classification.py b/frigate/data_processing/real_time/custom_classification.py index 0e254ab0d..df4baf70b 100644 --- a/frigate/data_processing/real_time/custom_classification.py +++ b/frigate/data_processing/real_time/custom_classification.py @@ -3,11 +3,13 @@ import datetime import logging import os +import threading from typing import Any import cv2 import numpy as np +from frigate.comms.embeddings_updater import EmbeddingsRequestEnum from frigate.comms.event_metadata_updater import ( EventMetadataPublisher, EventMetadataTypeEnum, @@ -15,8 +17,10 @@ from frigate.comms.event_metadata_updater import ( from frigate.comms.inter_process import InterProcessRequestor from frigate.config import FrigateConfig from frigate.config.classification import CustomClassificationConfig -from frigate.const import CLIPS_DIR, MODEL_CACHE_DIR +from frigate.const import CLIPS_DIR, MODEL_CACHE_DIR, UPDATE_MODEL_STATE +from frigate.types import ModelStatusTypesEnum from frigate.util.builtin import load_labels +from frigate.util.classification import train_classification_model from frigate.util.object import box_overlaps, calculate_region from ..types import DataProcessorMetrics @@ -63,6 +67,18 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi): prefill=0, ) + def __retrain_model(self) -> None: + train_classification_model(self.model_config.name) + self.__build_detector() + self.requestor.send_data( + UPDATE_MODEL_STATE, + { + "model": self.model_config.name, + "state": ModelStatusTypesEnum.complete, + }, + ) + logger.info(f"Successfully loaded updated model for {self.model_config.name}") + def process_frame(self, frame_data: dict[str, Any], frame: np.ndarray): camera = frame_data.get("camera") @@ -143,7 +159,24 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi): ) def handle_request(self, topic, request_data): - return None + if topic == EmbeddingsRequestEnum.train_classification.value: + if request_data.get("model_name") == self.model_config.name: + self.requestor.send_data( + UPDATE_MODEL_STATE, + { + "model": self.model_config.name, + "state": ModelStatusTypesEnum.training, + }, + ) + threading.Thread(target=self.__retrain_model).start() + return { + "success": True, + "message": f"Began training {self.model_config.name} model.", + } + else: + return None + else: + return None def expire_object(self, object_id, camera): pass @@ -182,6 +215,18 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi): prefill=0, ) + def __retrain_model(self) -> None: + train_classification_model(self.model_config.name) + self.__build_detector() + self.requestor.send_data( + UPDATE_MODEL_STATE, + { + "model": self.model_config.name, + "state": ModelStatusTypesEnum.complete, + }, + ) + logger.info(f"Successfully loaded updated model for {self.model_config.name}") + def process_frame(self, obj_data, frame): if obj_data["label"] not in self.model_config.object_config.objects: return @@ -236,7 +281,24 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi): self.detected_objects[obj_data["id"]] = score def handle_request(self, topic, request_data): - return None + if topic == EmbeddingsRequestEnum.train_classification.value: + if request_data.get("model_name") == self.model_config.name: + self.requestor.send_data( + UPDATE_MODEL_STATE, + { + "model": self.model_config.name, + "state": ModelStatusTypesEnum.training, + }, + ) + threading.Thread(target=self.__retrain_model).start() + return { + "success": True, + "message": f"Began training {self.model_config.name} model.", + } + else: + return None + else: + return None def expire_object(self, object_id, camera): if object_id in self.detected_objects: diff --git a/frigate/embeddings/__init__.py b/frigate/embeddings/__init__.py index a86edf76c..5c2a9005f 100644 --- a/frigate/embeddings/__init__.py +++ b/frigate/embeddings/__init__.py @@ -301,6 +301,11 @@ class EmbeddingsContext: def reindex_embeddings(self) -> dict[str, Any]: return self.requestor.send_data(EmbeddingsRequestEnum.reindex.value, {}) + def start_classification_training(self, model_name: str) -> dict[str, Any]: + return self.requestor.send_data( + EmbeddingsRequestEnum.train_classification.value, {"model_name": model_name} + ) + def transcribe_audio(self, event: dict[str, any]) -> dict[str, any]: return self.requestor.send_data( EmbeddingsRequestEnum.transcribe_audio.value, {"event": event} diff --git a/frigate/types.py b/frigate/types.py index ee48cc02b..a9e27ba90 100644 --- a/frigate/types.py +++ b/frigate/types.py @@ -21,6 +21,8 @@ class ModelStatusTypesEnum(str, Enum): downloading = "downloading" downloaded = "downloaded" error = "error" + training = "training" + complete = "complete" class TrackedObjectUpdateTypesEnum(str, Enum): diff --git a/frigate/util/classification.py b/frigate/util/classification.py index a8624870b..92da7c93e 100644 --- a/frigate/util/classification.py +++ b/frigate/util/classification.py @@ -1,7 +1,7 @@ """Util for classification models.""" -import logging import os +import sys import cv2 import numpy as np @@ -50,7 +50,13 @@ def train_classification_model(model_name: str) -> bool: ] ) - tf.get_logger().setLevel(logging.ERROR) + # TF and Keras are very loud with logging + # we want to avoid these logs so we + # temporarily redirect stdout / stderr + original_stdout = sys.stdout + original_stderr = sys.stderr + sys.stdout = open(os.devnull, "w") + sys.stderr = open(os.devnull, "w") # Start with imagenet base model with 35% of channels in each layer base_model = MobileNetV2( @@ -112,3 +118,7 @@ def train_classification_model(model_name: str) -> bool: # write model with open(os.path.join(model_dir, "model.tflite"), "wb") as f: f.write(tflite_model) + + # restore original stdout / stderr + sys.stdout = original_stdout + sys.stderr = original_stderr diff --git a/web/public/locales/en/views/classificationModel.json b/web/public/locales/en/views/classificationModel.json index eb09ecaa0..0af0179b9 100644 --- a/web/public/locales/en/views/classificationModel.json +++ b/web/public/locales/en/views/classificationModel.json @@ -9,12 +9,15 @@ "success": { "deletedCategory": "Deleted Class", "deletedImage": "Deleted Images", - "categorizedImage": "Successfully Classified Image" + "categorizedImage": "Successfully Classified Image", + "trainedModel": "Successfully trained model.", + "trainingModel": "Successfully started model training." }, "error": { "deleteImageFailed": "Failed to delete: {{errorMessage}}", "deleteCategoryFailed": "Failed to delete class: {{errorMessage}}", - "categorizeFailed": "Failed to categorize image: {{errorMessage}}" + "categorizeFailed": "Failed to categorize image: {{errorMessage}}", + "trainingFailed": "Failed to start model training: {{errorMessage}}" } }, "deleteCategory": { diff --git a/web/src/types/ws.ts b/web/src/types/ws.ts index d1e810494..06ec9ae1d 100644 --- a/web/src/types/ws.ts +++ b/web/src/types/ws.ts @@ -73,7 +73,9 @@ export type ModelState = | "not_downloaded" | "downloading" | "downloaded" - | "error"; + | "error" + | "training" + | "complete"; export type EmbeddingsReindexProgressType = { thumbnails: number; diff --git a/web/src/views/classification/ModelTrainingView.tsx b/web/src/views/classification/ModelTrainingView.tsx index 53ef7fa66..1f62a4f53 100644 --- a/web/src/views/classification/ModelTrainingView.tsx +++ b/web/src/views/classification/ModelTrainingView.tsx @@ -45,6 +45,9 @@ import { toast } from "sonner"; import useSWR from "swr"; import ClassificationSelectionDialog from "@/components/overlay/ClassificationSelectionDialog"; import { TbCategoryPlus } from "react-icons/tb"; +import { useModelState } from "@/api/ws"; +import { ModelState } from "@/types/ws"; +import ActivityIndicator from "@/components/indicators/activity-indicator"; type ModelTrainingViewProps = { model: CustomClassificationModelConfig; @@ -54,6 +57,33 @@ export default function ModelTrainingView({ model }: ModelTrainingViewProps) { const [page, setPage] = useState("train"); const [pageToggle, setPageToggle] = useOptimisticState(page, setPage, 100); + // model state + + const [wasTraining, setWasTraining] = useState(false); + const { payload: lastModelState } = useModelState(model.name, true); + const modelState = useMemo(() => { + if (!lastModelState || lastModelState == "downloaded") { + return "complete"; + } + + return lastModelState; + }, [lastModelState]); + + useEffect(() => { + if (!wasTraining) { + return; + } + + if (modelState == "complete") { + toast.success(t("toast.success.trainedModel"), { + position: "top-center", + }); + setWasTraining(false); + } + // only refresh when modelState changes + // eslint-disable-next-line react-hooks/exhaustive-deps + }, [modelState]); + // dataset const { data: trainImages, mutate: refreshTrain } = useSWR( @@ -101,8 +131,27 @@ export default function ModelTrainingView({ model }: ModelTrainingViewProps) { // actions const trainModel = useCallback(() => { - axios.post(`classification/${model.name}/train`); - }, [model]); + axios + .post(`classification/${model.name}/train`) + .then((resp) => { + if (resp.status == 200) { + setWasTraining(true); + toast.success(t("toast.success.trainingModel"), { + position: "top-center", + }); + } + }) + .catch((error) => { + const errorMessage = + error.response?.data?.message || + error.response?.data?.detail || + "Unknown error"; + + toast.error(t("toast.error.trainingFailed", { errorMessage }), { + position: "top-center", + }); + }); + }, [model, t]); const [deleteDialogOpen, setDeleteDialogOpen] = useState( null, @@ -274,7 +323,14 @@ export default function ModelTrainingView({ model }: ModelTrainingViewProps) {
) : ( - + )} {pageToggle == "train" ? ( From b1a65c88e8b37fdc8a7fc9ba42505d63bb2e8600 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Fri, 6 Jun 2025 10:29:44 -0600 Subject: [PATCH 018/530] Classification Model Metrics (#18595) * Add speed and rate metrics for custom classification models * Use metrics for classification models * Use keys * Cast to list --- frigate/app.py | 3 +- .../real_time/custom_classification.py | 28 ++++++++++++++++++- frigate/data_processing/types.py | 11 +++++++- frigate/stats/util.py | 8 ++++++ 4 files changed, 47 insertions(+), 3 deletions(-) diff --git a/frigate/app.py b/frigate/app.py index b6dd6c7b9..f534de6e0 100644 --- a/frigate/app.py +++ b/frigate/app.py @@ -92,11 +92,12 @@ class FrigateApp: self.log_queue: Queue = mp.Queue() self.camera_metrics: dict[str, CameraMetrics] = {} self.embeddings_metrics: DataProcessorMetrics | None = ( - DataProcessorMetrics() + DataProcessorMetrics(list(config.classification.custom.keys())) if ( config.semantic_search.enabled or config.lpr.enabled or config.face_recognition.enabled + or len(config.classification.custom) > 0 ) else None ) diff --git a/frigate/data_processing/real_time/custom_classification.py b/frigate/data_processing/real_time/custom_classification.py index df4baf70b..a718956e2 100644 --- a/frigate/data_processing/real_time/custom_classification.py +++ b/frigate/data_processing/real_time/custom_classification.py @@ -19,7 +19,7 @@ from frigate.config import FrigateConfig from frigate.config.classification import CustomClassificationConfig from frigate.const import CLIPS_DIR, MODEL_CACHE_DIR, UPDATE_MODEL_STATE from frigate.types import ModelStatusTypesEnum -from frigate.util.builtin import load_labels +from frigate.util.builtin import EventsPerSecond, InferenceSpeed, load_labels from frigate.util.classification import train_classification_model from frigate.util.object import box_overlaps, calculate_region @@ -51,6 +51,10 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi): self.tensor_input_details: dict[str, Any] = None self.tensor_output_details: dict[str, Any] = None self.labelmap: dict[int, str] = {} + self.classifications_per_second = EventsPerSecond() + self.inference_speed = InferenceSpeed( + self.metrics.classification_speeds[self.model_config.name] + ) self.last_run = datetime.datetime.now().timestamp() self.__build_detector() @@ -66,6 +70,7 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi): os.path.join(self.model_dir, "labelmap.txt"), prefill=0, ) + self.classifications_per_second.start() def __retrain_model(self) -> None: train_classification_model(self.model_config.name) @@ -79,7 +84,14 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi): ) logger.info(f"Successfully loaded updated model for {self.model_config.name}") + def __update_metrics(self, duration: float) -> None: + self.classifications_per_second.update() + self.inference_speed.update(duration) + def process_frame(self, frame_data: dict[str, Any], frame: np.ndarray): + self.metrics.classification_cps[ + self.model_config.name + ].value = self.classifications_per_second.eps() camera = frame_data.get("camera") if camera not in self.model_config.state_config.cameras: @@ -143,6 +155,7 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi): probs = res / res.sum(axis=0) best_id = np.argmax(probs) score = round(probs[best_id], 2) + self.__update_metrics(datetime.datetime.now().timestamp() - now) write_classification_attempt( self.train_dir, @@ -200,6 +213,10 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi): self.tensor_output_details: dict[str, Any] = None self.detected_objects: dict[str, float] = {} self.labelmap: dict[int, str] = {} + self.classifications_per_second = EventsPerSecond() + self.inference_speed = InferenceSpeed( + self.metrics.classification_speeds[self.model_config.name] + ) self.__build_detector() def __build_detector(self) -> None: @@ -227,7 +244,15 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi): ) logger.info(f"Successfully loaded updated model for {self.model_config.name}") + def __update_metrics(self, duration: float) -> None: + self.classifications_per_second.update() + self.inference_speed.update(duration) + def process_frame(self, obj_data, frame): + self.metrics.classification_cps[ + self.model_config.name + ].value = self.classifications_per_second.eps() + if obj_data["label"] not in self.model_config.object_config.objects: return @@ -261,6 +286,7 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi): best_id = np.argmax(probs) score = round(probs[best_id], 2) previous_score = self.detected_objects.get(obj_data["id"], 0.0) + self.__update_metrics(datetime.datetime.now().timestamp() - now) write_classification_attempt( self.train_dir, diff --git a/frigate/data_processing/types.py b/frigate/data_processing/types.py index 5d083b32e..783b0798e 100644 --- a/frigate/data_processing/types.py +++ b/frigate/data_processing/types.py @@ -20,8 +20,10 @@ class DataProcessorMetrics: alpr_pps: Synchronized yolov9_lpr_speed: Synchronized yolov9_lpr_pps: Synchronized + classification_speeds: dict[str, Synchronized] + classification_cps: dict[str, Synchronized] - def __init__(self): + def __init__(self, custom_classification_models: list[str]): self.image_embeddings_speed = mp.Value("d", 0.0) self.image_embeddings_eps = mp.Value("d", 0.0) self.text_embeddings_speed = mp.Value("d", 0.0) @@ -33,6 +35,13 @@ class DataProcessorMetrics: self.yolov9_lpr_speed = mp.Value("d", 0.0) self.yolov9_lpr_pps = mp.Value("d", 0.0) + if custom_classification_models: + self.classification_speeds = {} + self.classification_cps = {} + for key in custom_classification_models: + self.classification_speeds[key] = mp.Value("d", 0.0) + self.classification_cps[key] = mp.Value("d", 0.0) + class DataProcessorModelRunner: def __init__(self, requestor, device: str = "CPU", model_size: str = "large"): diff --git a/frigate/stats/util.py b/frigate/stats/util.py index e098bc541..f5807e1e6 100644 --- a/frigate/stats/util.py +++ b/frigate/stats/util.py @@ -354,6 +354,14 @@ def stats_snapshot( embeddings_metrics.yolov9_lpr_pps.value, 2 ) + for key in embeddings_metrics.classification_speeds.keys(): + stats["embeddings"][f"{key}_classification_speed"] = round( + embeddings_metrics.classification_speeds[key].value * 1000, 2 + ) + stats["embeddings"][f"{key}_classification"] = round( + embeddings_metrics.classification_cps[key].value, 2 + ) + get_processing_stats(config, stats, hwaccel_errors) stats["service"] = { From 7ce26087f7452ceb8d152413083f5f0d2c3ea043 Mon Sep 17 00:00:00 2001 From: Jimmy Date: Fri, 6 Jun 2025 14:41:04 -0500 Subject: [PATCH 019/530] Add Mesa Teflon as a TFLite detector (#18310) * Refactor common functions for tflite detector implementations * Add detector using mesa teflon delegate Non-EdgeTPU TFLite can use the standard .tflite format * Add mesa-teflon-delegate from bookworm-backports to arm64 images --- docker/main/install_deps.sh | 8 +++ frigate/config/config.py | 4 +- frigate/detectors/detector_utils.py | 74 +++++++++++++++++++++++++ frigate/detectors/plugins/cpu_tfl.py | 36 ++---------- frigate/detectors/plugins/teflon_tfl.py | 38 +++++++++++++ 5 files changed, 128 insertions(+), 32 deletions(-) create mode 100644 frigate/detectors/detector_utils.py create mode 100644 frigate/detectors/plugins/teflon_tfl.py diff --git a/docker/main/install_deps.sh b/docker/main/install_deps.sh index aed11dff4..bd9f363e9 100755 --- a/docker/main/install_deps.sh +++ b/docker/main/install_deps.sh @@ -31,6 +31,14 @@ unset DEBIAN_FRONTEND yes | dpkg -i /tmp/libedgetpu1-max.deb && export DEBIAN_FRONTEND=noninteractive rm /tmp/libedgetpu1-max.deb +# install mesa-teflon-delegate from bookworm-backports +# Only available for arm64 at the moment +if [[ "${TARGETARCH}" == "arm64" ]]; then + echo "deb http://deb.debian.org/debian bookworm-backports main" | tee /etc/apt/sources.list.d/bookworm-backports.list + apt-get -qq update + apt-get -qq install --no-install-recommends --no-install-suggests -y mesa-teflon-delegate/bookworm-backports +fi + # ffmpeg -> amd64 if [[ "${TARGETARCH}" == "amd64" ]]; then mkdir -p /usr/lib/ffmpeg/5.0 diff --git a/frigate/config/config.py b/frigate/config/config.py index 49e57f3cf..62c931c96 100644 --- a/frigate/config/config.py +++ b/frigate/config/config.py @@ -487,7 +487,9 @@ class FrigateConfig(FrigateBaseModel): model_config["path"] = detector_config.model_path if "path" not in model_config: - if detector_config.type == "cpu": + if detector_config.type == "cpu" or detector_config.type.endswith( + "_tfl" + ): model_config["path"] = "/cpu_model.tflite" elif detector_config.type == "edgetpu": model_config["path"] = "/edgetpu_model.tflite" diff --git a/frigate/detectors/detector_utils.py b/frigate/detectors/detector_utils.py new file mode 100644 index 000000000..d732de871 --- /dev/null +++ b/frigate/detectors/detector_utils.py @@ -0,0 +1,74 @@ +import logging +import os + +import numpy as np + +try: + from tflite_runtime.interpreter import Interpreter, load_delegate +except ModuleNotFoundError: + from tensorflow.lite.python.interpreter import Interpreter, load_delegate + + +logger = logging.getLogger(__name__) + + +def tflite_init(self, interpreter): + self.interpreter = interpreter + + self.interpreter.allocate_tensors() + + self.tensor_input_details = self.interpreter.get_input_details() + self.tensor_output_details = self.interpreter.get_output_details() + + +def tflite_detect_raw(self, tensor_input): + self.interpreter.set_tensor(self.tensor_input_details[0]["index"], tensor_input) + self.interpreter.invoke() + + boxes = self.interpreter.tensor(self.tensor_output_details[0]["index"])()[0] + class_ids = self.interpreter.tensor(self.tensor_output_details[1]["index"])()[0] + scores = self.interpreter.tensor(self.tensor_output_details[2]["index"])()[0] + count = int(self.interpreter.tensor(self.tensor_output_details[3]["index"])()[0]) + + detections = np.zeros((20, 6), np.float32) + + for i in range(count): + if scores[i] < 0.4 or i == 20: + break + detections[i] = [ + class_ids[i], + float(scores[i]), + boxes[i][0], + boxes[i][1], + boxes[i][2], + boxes[i][3], + ] + + return detections + + +def tflite_load_delegate_interpreter( + delegate_library: str, detector_config, device_config +): + try: + logger.info("Attempting to load NPU") + tf_delegate = load_delegate(delegate_library, device_config) + logger.info("NPU found") + interpreter = Interpreter( + model_path=detector_config.model.path, + experimental_delegates=[tf_delegate], + ) + return interpreter + except ValueError: + _, ext = os.path.splitext(detector_config.model.path) + + if ext and ext != ".tflite": + logger.error( + "Incorrect model used with NPU. Only .tflite models can be used with a TFLite delegate." + ) + else: + logger.error( + "No NPU was detected. If you do not have a TFLite device yet, you must configure CPU detectors." + ) + + raise diff --git a/frigate/detectors/plugins/cpu_tfl.py b/frigate/detectors/plugins/cpu_tfl.py index 8a54363e1..fc8db0f4b 100644 --- a/frigate/detectors/plugins/cpu_tfl.py +++ b/frigate/detectors/plugins/cpu_tfl.py @@ -1,12 +1,13 @@ import logging -import numpy as np from pydantic import Field from typing_extensions import Literal from frigate.detectors.detection_api import DetectionApi from frigate.detectors.detector_config import BaseDetectorConfig +from ..detector_utils import tflite_detect_raw, tflite_init + try: from tflite_runtime.interpreter import Interpreter except ModuleNotFoundError: @@ -27,39 +28,12 @@ class CpuTfl(DetectionApi): type_key = DETECTOR_KEY def __init__(self, detector_config: CpuDetectorConfig): - self.interpreter = Interpreter( + interpreter = Interpreter( model_path=detector_config.model.path, num_threads=detector_config.num_threads or 3, ) - self.interpreter.allocate_tensors() - - self.tensor_input_details = self.interpreter.get_input_details() - self.tensor_output_details = self.interpreter.get_output_details() + tflite_init(self, interpreter) def detect_raw(self, tensor_input): - self.interpreter.set_tensor(self.tensor_input_details[0]["index"], tensor_input) - self.interpreter.invoke() - - boxes = self.interpreter.tensor(self.tensor_output_details[0]["index"])()[0] - class_ids = self.interpreter.tensor(self.tensor_output_details[1]["index"])()[0] - scores = self.interpreter.tensor(self.tensor_output_details[2]["index"])()[0] - count = int( - self.interpreter.tensor(self.tensor_output_details[3]["index"])()[0] - ) - - detections = np.zeros((20, 6), np.float32) - - for i in range(count): - if scores[i] < 0.4 or i == 20: - break - detections[i] = [ - class_ids[i], - float(scores[i]), - boxes[i][0], - boxes[i][1], - boxes[i][2], - boxes[i][3], - ] - - return detections + return tflite_detect_raw(self, tensor_input) diff --git a/frigate/detectors/plugins/teflon_tfl.py b/frigate/detectors/plugins/teflon_tfl.py new file mode 100644 index 000000000..7e29d6630 --- /dev/null +++ b/frigate/detectors/plugins/teflon_tfl.py @@ -0,0 +1,38 @@ +import logging + +from typing_extensions import Literal + +from frigate.detectors.detection_api import DetectionApi +from frigate.detectors.detector_config import BaseDetectorConfig + +from ..detector_utils import ( + tflite_detect_raw, + tflite_init, + tflite_load_delegate_interpreter, +) + +logger = logging.getLogger(__name__) + +# Use _tfl suffix to default tflite model +DETECTOR_KEY = "teflon_tfl" + + +class TeflonDetectorConfig(BaseDetectorConfig): + type: Literal[DETECTOR_KEY] + + +class TeflonTfl(DetectionApi): + type_key = DETECTOR_KEY + + def __init__(self, detector_config: TeflonDetectorConfig): + # Location in Debian's mesa-teflon-delegate + delegate_library = "/usr/lib/teflon/libteflon.so" + device_config = {} + + interpreter = tflite_load_delegate_interpreter( + delegate_library, detector_config, device_config + ) + tflite_init(self, interpreter) + + def detect_raw(self, tensor_input): + return tflite_detect_raw(self, tensor_input) From 13b760346a20c77c1bc0162f3f46cfec0bc2cb37 Mon Sep 17 00:00:00 2001 From: FL42 <46161216+fl42@users.noreply.github.com> Date: Sat, 7 Jun 2025 20:43:29 +0200 Subject: [PATCH 020/530] feat: enable using GenAI for cameras with GenAI disabled from the API (#18616) --- docs/docs/configuration/genai.md | 2 +- .../api/defs/query/regenerate_query_parameters.py | 6 +++++- frigate/api/event.py | 5 +++-- frigate/embeddings/maintainer.py | 14 ++++++++++---- 4 files changed, 19 insertions(+), 8 deletions(-) diff --git a/docs/docs/configuration/genai.md b/docs/docs/configuration/genai.md index f76c075b7..51c0fee10 100644 --- a/docs/docs/configuration/genai.md +++ b/docs/docs/configuration/genai.md @@ -9,7 +9,7 @@ Requests for a description are sent off automatically to your AI provider at the ## Configuration -Generative AI can be enabled for all cameras or only for specific cameras. There are currently 3 native providers available to integrate with Frigate. Other providers that support the OpenAI standard API can also be used. See the OpenAI section below. +Generative AI can be enabled for all cameras or only for specific cameras. If GenAI is disabled for a camera, you can still manually generate descriptions for events using the HTTP API. There are currently 3 native providers available to integrate with Frigate. Other providers that support the OpenAI standard API can also be used. See the OpenAI section below. To use Generative AI, you must define a single provider at the global level of your Frigate configuration. If the provider you choose requires an API key, you may either directly paste it in your configuration, or store it in an environment variable prefixed with `FRIGATE_`. diff --git a/frigate/api/defs/query/regenerate_query_parameters.py b/frigate/api/defs/query/regenerate_query_parameters.py index bcce47b1b..af50ada2c 100644 --- a/frigate/api/defs/query/regenerate_query_parameters.py +++ b/frigate/api/defs/query/regenerate_query_parameters.py @@ -1,9 +1,13 @@ from typing import Optional -from pydantic import BaseModel +from pydantic import BaseModel, Field from frigate.events.types import RegenerateDescriptionEnum class RegenerateQueryParameters(BaseModel): source: Optional[RegenerateDescriptionEnum] = RegenerateDescriptionEnum.thumbnails + force: Optional[bool] = Field( + default=False, + description="Force (re)generating the description even if GenAI is disabled for this camera.", + ) diff --git a/frigate/api/event.py b/frigate/api/event.py index 27353e4b5..24a6c6f4a 100644 --- a/frigate/api/event.py +++ b/frigate/api/event.py @@ -1234,9 +1234,10 @@ def regenerate_description( camera_config = request.app.frigate_config.cameras[event.camera] - if camera_config.genai.enabled: + if camera_config.genai.enabled or params.force: request.app.event_metadata_updater.publish( - EventMetadataTypeEnum.regenerate_description, (event.id, params.source) + EventMetadataTypeEnum.regenerate_description, + (event.id, params.source, params.force), ) return JSONResponse( diff --git a/frigate/embeddings/maintainer.py b/frigate/embeddings/maintainer.py index 9a2378221..ce81c2bc4 100644 --- a/frigate/embeddings/maintainer.py +++ b/frigate/embeddings/maintainer.py @@ -473,11 +473,11 @@ class EmbeddingMaintainer(threading.Thread): if topic is None: return - event_id, source = payload + event_id, source, force = payload if event_id: self.handle_regenerate_description( - event_id, RegenerateDescriptionEnum(source) + event_id, RegenerateDescriptionEnum(source), force ) def _process_frame_updates(self) -> None: @@ -678,15 +678,21 @@ class EmbeddingMaintainer(threading.Thread): except Exception: return None - def handle_regenerate_description(self, event_id: str, source: str) -> None: + def handle_regenerate_description( + self, event_id: str, source: str, force: bool + ) -> None: try: event: Event = Event.get(Event.id == event_id) except DoesNotExist: logger.error(f"Event {event_id} not found for description regeneration") return + if self.genai_client is None: + logger.error("GenAI not enabled") + return + camera_config = self.config.cameras[event.camera] - if not camera_config.genai.enabled or self.genai_client is None: + if not camera_config.genai.enabled and not force: logger.error(f"GenAI not enabled for camera {event.camera}") return From 937459be479ef0cf963b42a126ced341999a0afe Mon Sep 17 00:00:00 2001 From: FL42 <46161216+fl42@users.noreply.github.com> Date: Sun, 8 Jun 2025 14:55:29 +0200 Subject: [PATCH 021/530] fix: Initialize GenAI client if GenAI is enabled globally (#18623) --- frigate/genai/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frigate/genai/__init__.py b/frigate/genai/__init__.py index 2c0aadbd9..28ea4af6e 100644 --- a/frigate/genai/__init__.py +++ b/frigate/genai/__init__.py @@ -63,7 +63,7 @@ def get_genai_client(config: FrigateConfig) -> Optional[GenAIClient]: c for c in config.cameras.values() if c.enabled and c.genai.enabled ] - if genai_cameras: + if genai_cameras or genai_config.enabled: load_providers() provider = PROVIDERS.get(genai_config.provider) if provider: From 40ab7d6c3807819085aab862828e404ac25a08c2 Mon Sep 17 00:00:00 2001 From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> Date: Sun, 8 Jun 2025 13:06:17 -0500 Subject: [PATCH 022/530] Make Birdseye clickable (#18628) * keep track of layout changes and publish on change * websocket hook * clickable overlay div to navigate to full camera view --- frigate/comms/dispatcher.py | 13 ++++ frigate/const.py | 1 + frigate/output/birdseye.py | 66 +++++++++++----- web/src/api/ws.tsx | 34 ++++++++ .../components/player/BirdseyeLivePlayer.tsx | 6 +- web/src/views/live/LiveBirdseyeView.tsx | 77 ++++++++++++++++++- 6 files changed, 175 insertions(+), 22 deletions(-) diff --git a/frigate/comms/dispatcher.py b/frigate/comms/dispatcher.py index 6fee166b7..0a9c439f4 100644 --- a/frigate/comms/dispatcher.py +++ b/frigate/comms/dispatcher.py @@ -21,6 +21,7 @@ from frigate.const import ( INSERT_PREVIEW, NOTIFICATION_TEST, REQUEST_REGION_GRID, + UPDATE_BIRDSEYE_LAYOUT, UPDATE_CAMERA_ACTIVITY, UPDATE_EMBEDDINGS_REINDEX_PROGRESS, UPDATE_EVENT_DESCRIPTION, @@ -55,6 +56,7 @@ class Dispatcher: self.camera_activity = CameraActivityManager(config, self.publish) self.model_state = {} self.embeddings_reindex = {} + self.birdseye_layout = {} self._camera_settings_handlers: dict[str, Callable] = { "audio": self._on_audio_command, @@ -168,6 +170,14 @@ class Dispatcher: json.dumps(self.embeddings_reindex.copy()), ) + def handle_update_birdseye_layout(): + if payload: + self.birdseye_layout = payload + self.publish("birdseye_layout", json.dumps(self.birdseye_layout)) + + def handle_birdseye_layout(): + self.publish("birdseye_layout", json.dumps(self.birdseye_layout.copy())) + def handle_on_connect(): camera_status = self.camera_activity.last_camera_activity.copy() cameras_with_status = camera_status.keys() @@ -205,6 +215,7 @@ class Dispatcher: "embeddings_reindex_progress", json.dumps(self.embeddings_reindex.copy()), ) + self.publish("birdseye_layout", json.dumps(self.birdseye_layout.copy())) def handle_notification_test(): self.publish("notification_test", "Test notification") @@ -220,10 +231,12 @@ class Dispatcher: UPDATE_EVENT_DESCRIPTION: handle_update_event_description, UPDATE_MODEL_STATE: handle_update_model_state, UPDATE_EMBEDDINGS_REINDEX_PROGRESS: handle_update_embeddings_reindex_progress, + UPDATE_BIRDSEYE_LAYOUT: handle_update_birdseye_layout, NOTIFICATION_TEST: handle_notification_test, "restart": handle_restart, "embeddingsReindexProgress": handle_embeddings_reindex_progress, "modelState": handle_model_state, + "birdseyeLayout": handle_birdseye_layout, "onConnect": handle_on_connect, } diff --git a/frigate/const.py b/frigate/const.py index 699a194ac..893e6eb52 100644 --- a/frigate/const.py +++ b/frigate/const.py @@ -112,6 +112,7 @@ UPDATE_CAMERA_ACTIVITY = "update_camera_activity" UPDATE_EVENT_DESCRIPTION = "update_event_description" UPDATE_MODEL_STATE = "update_model_state" UPDATE_EMBEDDINGS_REINDEX_PROGRESS = "handle_embeddings_reindex_progress" +UPDATE_BIRDSEYE_LAYOUT = "update_birdseye_layout" NOTIFICATION_TEST = "notification_test" # Stats Values diff --git a/frigate/output/birdseye.py b/frigate/output/birdseye.py index 78686fd63..a19436d5e 100644 --- a/frigate/output/birdseye.py +++ b/frigate/output/birdseye.py @@ -15,8 +15,9 @@ from typing import Any, Optional import cv2 import numpy as np +from frigate.comms.inter_process import InterProcessRequestor from frigate.config import BirdseyeModeEnum, FfmpegConfig, FrigateConfig -from frigate.const import BASE_DIR, BIRDSEYE_PIPE, INSTALL_DIR +from frigate.const import BASE_DIR, BIRDSEYE_PIPE, INSTALL_DIR, UPDATE_BIRDSEYE_LAYOUT from frigate.util.image import ( SharedMemoryFrameManager, copy_yuv_to_position, @@ -380,10 +381,24 @@ class BirdsEyeFrameManager: if mode == BirdseyeModeEnum.objects and object_box_count > 0: return True - def update_frame(self, frame: Optional[np.ndarray] = None) -> bool: + def get_camera_coordinates(self) -> dict[str, dict[str, int]]: + """Return the coordinates of each camera in the current layout.""" + coordinates = {} + for row in self.camera_layout: + for position in row: + camera_name, (x, y, width, height) = position + coordinates[camera_name] = { + "x": x, + "y": y, + "width": width, + "height": height, + } + return coordinates + + def update_frame(self, frame: Optional[np.ndarray] = None) -> tuple[bool, bool]: """ Update birdseye, optionally with a new frame. - When no frame is passed, check the layout and update for any disabled cameras. + Returns (frame_changed, layout_changed) to indicate if the frame or layout changed. """ # determine how many cameras are tracking objects within the last inactivity_threshold seconds @@ -421,19 +436,21 @@ class BirdsEyeFrameManager: max_camera_refresh = True self.last_refresh_time = now - # Track if the frame changes + # Track if the frame or layout changes frame_changed = False + layout_changed = False # If no active cameras and layout is already empty, no update needed if len(active_cameras) == 0: # if the layout is already cleared if len(self.camera_layout) == 0: - return False + return False, False # if the layout needs to be cleared self.camera_layout = [] self.active_cameras = set() self.clear_frame() frame_changed = True + layout_changed = True else: # Determine if layout needs resetting if len(self.active_cameras) - len(active_cameras) == 0: @@ -453,7 +470,7 @@ class BirdsEyeFrameManager: logger.debug("Resetting Birdseye layout...") self.clear_frame() self.active_cameras = active_cameras - + layout_changed = True # Layout is changing due to reset # this also converts added_cameras from a set to a list since we need # to pop elements in order active_cameras_to_add = sorted( @@ -503,7 +520,7 @@ class BirdsEyeFrameManager: # decrease scaling coefficient until height of all cameras can fit into the birdseye canvas while calculating: if self.stop_event.is_set(): - return + return frame_changed, layout_changed layout_candidate = self.calculate_layout( active_cameras_to_add, coefficient @@ -517,7 +534,7 @@ class BirdsEyeFrameManager: logger.error( "Error finding appropriate birdseye layout" ) - return + return frame_changed, layout_changed calculating = False self.canvas.set_coefficient(len(active_cameras), coefficient) @@ -535,7 +552,7 @@ class BirdsEyeFrameManager: if frame is not None: # Frame presence indicates a potential change frame_changed = True - return frame_changed + return frame_changed, layout_changed def calculate_layout( self, @@ -687,7 +704,11 @@ class BirdsEyeFrameManager: motion_count: int, frame_time: float, frame: np.ndarray, - ) -> bool: + ) -> tuple[bool, bool]: + """ + Update birdseye for a specific camera with new frame data. + Returns (frame_changed, layout_changed) to indicate if the frame or layout changed. + """ # don't process if birdseye is disabled for this camera camera_config = self.config.cameras[camera] force_update = False @@ -700,7 +721,7 @@ class BirdsEyeFrameManager: self.cameras[camera]["last_active_frame"] = 0 force_update = True else: - return False + return False, False # update the last active frame for the camera self.cameras[camera]["current_frame"] = frame.copy() @@ -712,21 +733,22 @@ class BirdsEyeFrameManager: # limit output to 10 fps if not force_update and (now - self.last_output_time) < 1 / 10: - return False + return False, False try: - updated_frame = self.update_frame(frame) + frame_changed, layout_changed = self.update_frame(frame) except Exception: - updated_frame = False + frame_changed, layout_changed = False, False self.active_cameras = [] self.camera_layout = [] print(traceback.format_exc()) # if the frame was updated or the fps is too low, send frame - if force_update or updated_frame or (now - self.last_output_time) > 1: + if force_update or frame_changed or (now - self.last_output_time) > 1: self.last_output_time = now - return True - return False + return True, layout_changed + + return False, layout_changed class Birdseye: @@ -755,6 +777,7 @@ class Birdseye: self.birdseye_manager = BirdsEyeFrameManager(config, stop_event) self.frame_manager = SharedMemoryFrameManager() self.stop_event = stop_event + self.requestor = InterProcessRequestor() if config.birdseye.restream: self.birdseye_buffer = self.frame_manager.create( @@ -789,15 +812,20 @@ class Birdseye: frame_time: float, frame: np.ndarray, ) -> None: - if self.birdseye_manager.update( + frame_changed, frame_layout_changed = self.birdseye_manager.update( camera, len([o for o in current_tracked_objects if not o["stationary"]]), len(motion_boxes), frame_time, frame, - ): + ) + if frame_changed: self.__send_new_frame() + if frame_layout_changed: + coordinates = self.birdseye_manager.get_camera_coordinates() + self.requestor.send_data(UPDATE_BIRDSEYE_LAYOUT, coordinates) + def stop(self) -> None: self.converter.join() self.broadcaster.join() diff --git a/web/src/api/ws.tsx b/web/src/api/ws.tsx index 79bf9e79d..78c596e13 100644 --- a/web/src/api/ws.tsx +++ b/web/src/api/ws.tsx @@ -426,6 +426,40 @@ export function useEmbeddingsReindexProgress( return { payload: data }; } +export function useBirdseyeLayout(revalidateOnFocus: boolean = true): { + payload: string; +} { + const { + value: { payload }, + send: sendCommand, + } = useWs("birdseye_layout", "birdseyeLayout"); + + const data = useDeepMemo(JSON.parse(payload as string)); + + useEffect(() => { + let listener = undefined; + if (revalidateOnFocus) { + sendCommand("birdseyeLayout"); + listener = () => { + if (document.visibilityState == "visible") { + sendCommand("birdseyeLayout"); + } + }; + addEventListener("visibilitychange", listener); + } + + return () => { + if (listener) { + removeEventListener("visibilitychange", listener); + } + }; + // we know that these deps are correct + // eslint-disable-next-line react-hooks/exhaustive-deps + }, [revalidateOnFocus]); + + return { payload: data }; +} + export function useMotionActivity(camera: string): { payload: string } { const { value: { payload }, diff --git a/web/src/components/player/BirdseyeLivePlayer.tsx b/web/src/components/player/BirdseyeLivePlayer.tsx index 286f19216..2e9461293 100644 --- a/web/src/components/player/BirdseyeLivePlayer.tsx +++ b/web/src/components/player/BirdseyeLivePlayer.tsx @@ -13,6 +13,7 @@ type LivePlayerProps = { liveMode: LivePlayerMode; pip?: boolean; containerRef: React.MutableRefObject; + playerRef?: React.MutableRefObject; onClick?: () => void; }; @@ -22,6 +23,7 @@ export default function BirdseyeLivePlayer({ liveMode, pip, containerRef, + playerRef, onClick, }: LivePlayerProps) { let player; @@ -76,7 +78,9 @@ export default function BirdseyeLivePlayer({ >
-
{player}
+
+ {player} +
); } diff --git a/web/src/views/live/LiveBirdseyeView.tsx b/web/src/views/live/LiveBirdseyeView.tsx index ca28180bf..efded68f5 100644 --- a/web/src/views/live/LiveBirdseyeView.tsx +++ b/web/src/views/live/LiveBirdseyeView.tsx @@ -1,11 +1,13 @@ +import { useBirdseyeLayout } from "@/api/ws"; import CameraFeatureToggle from "@/components/dynamic/CameraFeatureToggle"; import ActivityIndicator from "@/components/indicators/activity-indicator"; import BirdseyeLivePlayer from "@/components/player/BirdseyeLivePlayer"; import { Button } from "@/components/ui/button"; import { TooltipProvider } from "@/components/ui/tooltip"; import { useResizeObserver } from "@/hooks/resize-observer"; +import { cn } from "@/lib/utils"; import { FrigateConfig } from "@/types/frigateConfig"; -import { useEffect, useMemo, useRef, useState } from "react"; +import { useCallback, useEffect, useMemo, useRef, useState } from "react"; import { isDesktop, isFirefox, @@ -122,6 +124,72 @@ export default function LiveBirdseyeView({ return "mse"; }, [config]); + const birdseyeLayout = useBirdseyeLayout(); + + // Click overlay handling + + const playerRef = useRef(null); + const handleOverlayClick = useCallback( + ( + e: React.MouseEvent | React.TouchEvent, + ) => { + let clientX; + let clientY; + if ("TouchEvent" in window && e.nativeEvent instanceof TouchEvent) { + clientX = e.nativeEvent.touches[0].clientX; + clientY = e.nativeEvent.touches[0].clientY; + } else if (e.nativeEvent instanceof MouseEvent) { + clientX = e.nativeEvent.clientX; + clientY = e.nativeEvent.clientY; + } + + if ( + playerRef.current && + clientX && + clientY && + config && + birdseyeLayout?.payload + ) { + const playerRect = playerRef.current.getBoundingClientRect(); + + // Calculate coordinates relative to player div, accounting for offset + const rawX = clientX - playerRect.left; + const rawY = clientY - playerRect.top; + + // Ensure click is within player bounds + if ( + rawX < 0 || + rawX > playerRect.width || + rawY < 0 || + rawY > playerRect.height + ) { + return; + } + + // Scale click coordinates to birdseye canvas resolution + const canvasX = rawX * (config.birdseye.width / playerRect.width); + const canvasY = rawY * (config.birdseye.height / playerRect.height); + + for (const [cameraName, coords] of Object.entries( + birdseyeLayout.payload, + )) { + const parsedCoords = + typeof coords === "string" ? JSON.parse(coords) : coords; + if ( + canvasX >= parsedCoords.x && + canvasX < parsedCoords.x + parsedCoords.width && + canvasY >= parsedCoords.y && + canvasY < parsedCoords.y + parsedCoords.height + ) { + navigate(`/#${cameraName}`); + break; + } + } + } + }, + [playerRef, config, birdseyeLayout, navigate], + ); + if (!config) { return ; } @@ -215,16 +283,21 @@ export default function LiveBirdseyeView({ }} >
From 4b57e5e26505f987bc8b0a0818cb2e2f39e8f438 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Mon, 9 Jun 2025 08:25:33 -0600 Subject: [PATCH 023/530] Refactor TensorRT (#18643) * Combine base and arm trt detectors * Remove unused deps for amd64 build * Add missing packages and cleanup ldconfig * Expand packages for tensorflow model training * Cleanup * Refactor training to not reserve memory --- docker/tensorrt/requirements-amd64.txt | 1 + frigate/comms/embeddings_updater.py | 2 +- .../real_time/custom_classification.py | 56 ++++--------------- frigate/data_processing/types.py | 6 +- frigate/embeddings/__init__.py | 10 +++- frigate/util/classification.py | 51 +++++++++++++++-- 6 files changed, 68 insertions(+), 58 deletions(-) diff --git a/docker/tensorrt/requirements-amd64.txt b/docker/tensorrt/requirements-amd64.txt index 63c68b583..a7853aeec 100644 --- a/docker/tensorrt/requirements-amd64.txt +++ b/docker/tensorrt/requirements-amd64.txt @@ -13,6 +13,7 @@ nvidia_cusolver_cu12==11.6.3.*; platform_machine == 'x86_64' nvidia_cusparse_cu12==12.5.1.*; platform_machine == 'x86_64' nvidia_nccl_cu12==2.23.4; platform_machine == 'x86_64' nvidia_nvjitlink_cu12==12.5.82; platform_machine == 'x86_64' +tensorflow==2.19.*; platform_machine == 'x86_64' onnx==1.16.*; platform_machine == 'x86_64' onnxruntime-gpu==1.22.*; platform_machine == 'x86_64' protobuf==3.20.3; platform_machine == 'x86_64' diff --git a/frigate/comms/embeddings_updater.py b/frigate/comms/embeddings_updater.py index 5edb9e77d..f97319051 100644 --- a/frigate/comms/embeddings_updater.py +++ b/frigate/comms/embeddings_updater.py @@ -12,7 +12,7 @@ class EmbeddingsRequestEnum(Enum): # audio transcribe_audio = "transcribe_audio" # custom classification - train_classification = "train_classification" + reload_classification_model = "reload_classification_model" # face clear_face_classifier = "clear_face_classifier" recognize_face = "recognize_face" diff --git a/frigate/data_processing/real_time/custom_classification.py b/frigate/data_processing/real_time/custom_classification.py index a718956e2..f153b5b92 100644 --- a/frigate/data_processing/real_time/custom_classification.py +++ b/frigate/data_processing/real_time/custom_classification.py @@ -3,7 +3,6 @@ import datetime import logging import os -import threading from typing import Any import cv2 @@ -17,10 +16,8 @@ from frigate.comms.event_metadata_updater import ( from frigate.comms.inter_process import InterProcessRequestor from frigate.config import FrigateConfig from frigate.config.classification import CustomClassificationConfig -from frigate.const import CLIPS_DIR, MODEL_CACHE_DIR, UPDATE_MODEL_STATE -from frigate.types import ModelStatusTypesEnum +from frigate.const import CLIPS_DIR, MODEL_CACHE_DIR from frigate.util.builtin import EventsPerSecond, InferenceSpeed, load_labels -from frigate.util.classification import train_classification_model from frigate.util.object import box_overlaps, calculate_region from ..types import DataProcessorMetrics @@ -72,18 +69,6 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi): ) self.classifications_per_second.start() - def __retrain_model(self) -> None: - train_classification_model(self.model_config.name) - self.__build_detector() - self.requestor.send_data( - UPDATE_MODEL_STATE, - { - "model": self.model_config.name, - "state": ModelStatusTypesEnum.complete, - }, - ) - logger.info(f"Successfully loaded updated model for {self.model_config.name}") - def __update_metrics(self, duration: float) -> None: self.classifications_per_second.update() self.inference_speed.update(duration) @@ -172,19 +157,15 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi): ) def handle_request(self, topic, request_data): - if topic == EmbeddingsRequestEnum.train_classification.value: + if topic == EmbeddingsRequestEnum.reload_classification_model.value: if request_data.get("model_name") == self.model_config.name: - self.requestor.send_data( - UPDATE_MODEL_STATE, - { - "model": self.model_config.name, - "state": ModelStatusTypesEnum.training, - }, + self.__build_detector() + logger.info( + f"Successfully loaded updated model for {self.model_config.name}" ) - threading.Thread(target=self.__retrain_model).start() return { "success": True, - "message": f"Began training {self.model_config.name} model.", + "message": f"Loaded {self.model_config.name} model.", } else: return None @@ -232,18 +213,6 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi): prefill=0, ) - def __retrain_model(self) -> None: - train_classification_model(self.model_config.name) - self.__build_detector() - self.requestor.send_data( - UPDATE_MODEL_STATE, - { - "model": self.model_config.name, - "state": ModelStatusTypesEnum.complete, - }, - ) - logger.info(f"Successfully loaded updated model for {self.model_config.name}") - def __update_metrics(self, duration: float) -> None: self.classifications_per_second.update() self.inference_speed.update(duration) @@ -307,19 +276,14 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi): self.detected_objects[obj_data["id"]] = score def handle_request(self, topic, request_data): - if topic == EmbeddingsRequestEnum.train_classification.value: + if topic == EmbeddingsRequestEnum.reload_classification_model.value: if request_data.get("model_name") == self.model_config.name: - self.requestor.send_data( - UPDATE_MODEL_STATE, - { - "model": self.model_config.name, - "state": ModelStatusTypesEnum.training, - }, + logger.info( + f"Successfully loaded updated model for {self.model_config.name}" ) - threading.Thread(target=self.__retrain_model).start() return { "success": True, - "message": f"Began training {self.model_config.name} model.", + "message": f"Loaded {self.model_config.name} model.", } else: return None diff --git a/frigate/data_processing/types.py b/frigate/data_processing/types.py index 783b0798e..50f1ed561 100644 --- a/frigate/data_processing/types.py +++ b/frigate/data_processing/types.py @@ -20,8 +20,8 @@ class DataProcessorMetrics: alpr_pps: Synchronized yolov9_lpr_speed: Synchronized yolov9_lpr_pps: Synchronized - classification_speeds: dict[str, Synchronized] - classification_cps: dict[str, Synchronized] + classification_speeds: dict[str, Synchronized] = {} + classification_cps: dict[str, Synchronized] = {} def __init__(self, custom_classification_models: list[str]): self.image_embeddings_speed = mp.Value("d", 0.0) @@ -36,8 +36,6 @@ class DataProcessorMetrics: self.yolov9_lpr_pps = mp.Value("d", 0.0) if custom_classification_models: - self.classification_speeds = {} - self.classification_cps = {} for key in custom_classification_models: self.classification_speeds[key] = mp.Value("d", 0.0) self.classification_cps[key] = mp.Value("d", 0.0) diff --git a/frigate/embeddings/__init__.py b/frigate/embeddings/__init__.py index 5c2a9005f..037cadcf0 100644 --- a/frigate/embeddings/__init__.py +++ b/frigate/embeddings/__init__.py @@ -22,6 +22,7 @@ from frigate.data_processing.types import DataProcessorMetrics from frigate.db.sqlitevecq import SqliteVecQueueDatabase from frigate.models import Event, Recordings from frigate.util.builtin import serialize +from frigate.util.classification import kickoff_model_training from frigate.util.services import listen from .maintainer import EmbeddingMaintainer @@ -302,9 +303,12 @@ class EmbeddingsContext: return self.requestor.send_data(EmbeddingsRequestEnum.reindex.value, {}) def start_classification_training(self, model_name: str) -> dict[str, Any]: - return self.requestor.send_data( - EmbeddingsRequestEnum.train_classification.value, {"model_name": model_name} - ) + threading.Thread( + target=kickoff_model_training, + args=(self.requestor, model_name), + daemon=True, + ).start() + return {"success": True, "message": f"Began training {model_name} model."} def transcribe_audio(self, event: dict[str, any]) -> dict[str, any]: return self.requestor.send_data( diff --git a/frigate/util/classification.py b/frigate/util/classification.py index 92da7c93e..842f38fa2 100644 --- a/frigate/util/classification.py +++ b/frigate/util/classification.py @@ -10,7 +10,11 @@ from tensorflow.keras import layers, models, optimizers from tensorflow.keras.applications import MobileNetV2 from tensorflow.keras.preprocessing.image import ImageDataGenerator -from frigate.const import CLIPS_DIR, MODEL_CACHE_DIR +from frigate.comms.embeddings_updater import EmbeddingsRequestEnum, EmbeddingsRequestor +from frigate.comms.inter_process import InterProcessRequestor +from frigate.const import CLIPS_DIR, MODEL_CACHE_DIR, UPDATE_MODEL_STATE +from frigate.types import ModelStatusTypesEnum +from frigate.util import Process BATCH_SIZE = 16 EPOCHS = 50 @@ -18,7 +22,7 @@ LEARNING_RATE = 0.001 @staticmethod -def generate_representative_dataset_factory(dataset_dir: str): +def __generate_representative_dataset_factory(dataset_dir: str): def generate_representative_dataset(): image_paths = [] for root, dirs, files in os.walk(dataset_dir): @@ -38,7 +42,7 @@ def generate_representative_dataset_factory(dataset_dir: str): @staticmethod -def train_classification_model(model_name: str) -> bool: +def __train_classification_model(model_name: str) -> bool: """Train a classification model.""" dataset_dir = os.path.join(CLIPS_DIR, model_name, "dataset") model_dir = os.path.join(MODEL_CACHE_DIR, model_name) @@ -107,7 +111,7 @@ def train_classification_model(model_name: str) -> bool: # convert model to tflite converter = tf.lite.TFLiteConverter.from_keras_model(model) converter.optimizations = [tf.lite.Optimize.DEFAULT] - converter.representative_dataset = generate_representative_dataset_factory( + converter.representative_dataset = __generate_representative_dataset_factory( dataset_dir ) converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8] @@ -122,3 +126,42 @@ def train_classification_model(model_name: str) -> bool: # restore original stdout / stderr sys.stdout = original_stdout sys.stderr = original_stderr + + +@staticmethod +def kickoff_model_training( + embeddingRequestor: EmbeddingsRequestor, model_name: str +) -> None: + requestor = InterProcessRequestor() + requestor.send_data( + UPDATE_MODEL_STATE, + { + "model": model_name, + "state": ModelStatusTypesEnum.training, + }, + ) + + # run training in sub process so that + # tensorflow will free CPU / GPU memory + # upon training completion + training_process = Process( + target=__train_classification_model, + name=f"model_training:{model_name}", + args=(model_name,), + ) + training_process.start() + training_process.join() + + # reload model and mark training as complete + embeddingRequestor.send_data( + EmbeddingsRequestEnum.reload_classification_model.value, + {"model_name": model_name}, + ) + requestor.send_data( + UPDATE_MODEL_STATE, + { + "model": model_name, + "state": ModelStatusTypesEnum.complete, + }, + ) + requestor.stop() From faadea8e1fe4af2b77468f23d32fee259a459ee3 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Wed, 11 Jun 2025 11:25:30 -0600 Subject: [PATCH 024/530] Dynamic Management of Cameras (#18671) * Add base class for global config updates * Add or remove camera states * Move camera process management to separate thread * Move camera management fully to separate class * Cleanup * Stop camera processes when stop command is sent * Start processes dynamically when needed * Adjust * Leave extra room in tracked object queue for two cameras * Dynamically set extra config pieces * Add some TODOs * Fix type check * Simplify config updates * Improve typing * Correctly handle indexed entries * Cleanup * Create out SHM * Use ZMQ for signaling object detectoin is completed * Get camera correctly created * Cleanup for updating the cameras config * Cleanup * Don't enable audio if no cameras have audio transcription * Use exact string so similar camera names don't interfere * Add ability to update config via json body to config/set endpoint Additionally, update the config in a single rather than multiple calls for each updated key * fix autotracking calibration to support new config updater function --------- Co-authored-by: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> --- frigate/api/app.py | 45 +++- frigate/api/defs/request/app_body.py | 3 +- frigate/app.py | 166 +++------------ frigate/camera/activity_manager.py | 35 +-- frigate/camera/maintainer.py | 248 ++++++++++++++++++++++ frigate/comms/object_detector_signaler.py | 21 ++ frigate/comms/webpush.py | 9 +- frigate/config/camera/updater.py | 19 +- frigate/embeddings/maintainer.py | 11 + frigate/events/audio.py | 20 +- frigate/object_detection/base.py | 41 ++-- frigate/output/output.py | 12 +- frigate/ptz/autotrack.py | 11 +- frigate/record/maintainer.py | 4 +- frigate/review/maintainer.py | 3 + frigate/track/object_processing.py | 52 +++-- frigate/util/builtin.py | 53 +++-- frigate/video.py | 8 +- 18 files changed, 533 insertions(+), 228 deletions(-) create mode 100644 frigate/camera/maintainer.py create mode 100644 frigate/comms/object_detector_signaler.py diff --git a/frigate/api/app.py b/frigate/api/app.py index 351518673..d9e573d29 100644 --- a/frigate/api/app.py +++ b/frigate/api/app.py @@ -6,6 +6,7 @@ import json import logging import os import traceback +import urllib from datetime import datetime, timedelta from functools import reduce from io import StringIO @@ -36,8 +37,10 @@ from frigate.models import Event, Timeline from frigate.stats.prometheus import get_metrics, update_metrics from frigate.util.builtin import ( clean_camera_user_pass, + flatten_config_data, get_tz_modifiers, - update_yaml_from_url, + process_config_query_string, + update_yaml_file_bulk, ) from frigate.util.config import find_config_file from frigate.util.services import ( @@ -358,14 +361,37 @@ def config_set(request: Request, body: AppConfigSetBody): with open(config_file, "r") as f: old_raw_config = f.read() - f.close() try: - update_yaml_from_url(config_file, str(request.url)) + updates = {} + + # process query string parameters (takes precedence over body.config_data) + parsed_url = urllib.parse.urlparse(str(request.url)) + query_string = urllib.parse.parse_qs(parsed_url.query, keep_blank_values=True) + + # Filter out empty keys but keep blank values for non-empty keys + query_string = {k: v for k, v in query_string.items() if k} + + if query_string: + updates = process_config_query_string(query_string) + elif body.config_data: + updates = flatten_config_data(body.config_data) + + if not updates: + return JSONResponse( + content=( + {"success": False, "message": "No configuration data provided"} + ), + status_code=400, + ) + + # apply all updates in a single operation + update_yaml_file_bulk(config_file, updates) + + # validate the updated config with open(config_file, "r") as f: new_raw_config = f.read() - f.close() - # Validate the config schema + try: config = FrigateConfig.parse(new_raw_config) except Exception: @@ -390,12 +416,19 @@ def config_set(request: Request, body: AppConfigSetBody): ) if body.requires_restart == 0 or body.update_topic: + old_config: FrigateConfig = request.app.frigate_config request.app.frigate_config = config if body.update_topic and body.update_topic.startswith("config/cameras/"): _, _, camera, field = body.update_topic.split("/") - settings = config.get_nested_object(body.update_topic) + if field == "add": + settings = config.cameras[camera] + elif field == "remove": + settings = old_config.cameras[camera] + else: + settings = config.get_nested_object(body.update_topic) + request.app.config_publisher.publish_update( CameraConfigUpdateTopic(CameraConfigUpdateEnum[field], camera), settings, diff --git a/frigate/api/defs/request/app_body.py b/frigate/api/defs/request/app_body.py index 7456a6c77..7f8ca40ec 100644 --- a/frigate/api/defs/request/app_body.py +++ b/frigate/api/defs/request/app_body.py @@ -1,4 +1,4 @@ -from typing import Optional +from typing import Any, Dict, Optional from pydantic import BaseModel @@ -6,6 +6,7 @@ from pydantic import BaseModel class AppConfigSetBody(BaseModel): requires_restart: int = 1 update_topic: str | None = None + config_data: Optional[Dict[str, Any]] = None class AppPutPasswordBody(BaseModel): diff --git a/frigate/app.py b/frigate/app.py index f534de6e0..186ed1195 100644 --- a/frigate/app.py +++ b/frigate/app.py @@ -18,6 +18,7 @@ import frigate.util as util from frigate.api.auth import hash_password from frigate.api.fastapi_app import create_fastapi_app from frigate.camera import CameraMetrics, PTZMetrics +from frigate.camera.maintainer import CameraMaintainer from frigate.comms.base_communicator import Communicator from frigate.comms.dispatcher import Dispatcher from frigate.comms.event_metadata_updater import EventMetadataPublisher @@ -36,7 +37,6 @@ from frigate.const import ( FACE_DIR, MODEL_CACHE_DIR, RECORD_DIR, - SHM_FRAMES_VAR, THUMB_DIR, ) from frigate.data_processing.types import DataProcessorMetrics @@ -71,11 +71,9 @@ from frigate.storage import StorageMaintainer from frigate.timeline import TimelineProcessor from frigate.track.object_processing import TrackedObjectProcessor from frigate.util.builtin import empty_and_close_queue -from frigate.util.image import SharedMemoryFrameManager, UntrackedSharedMemory -from frigate.util.object import get_camera_regions_grid +from frigate.util.image import UntrackedSharedMemory from frigate.util.services import set_file_limit from frigate.version import VERSION -from frigate.video import capture_camera, track_camera from frigate.watchdog import FrigateWatchdog logger = logging.getLogger(__name__) @@ -87,7 +85,6 @@ class FrigateApp: self.stop_event: MpEvent = mp.Event() self.detection_queue: Queue = mp.Queue() self.detectors: dict[str, ObjectDetectProcess] = {} - self.detection_out_events: dict[str, MpEvent] = {} self.detection_shms: list[mp.shared_memory.SharedMemory] = [] self.log_queue: Queue = mp.Queue() self.camera_metrics: dict[str, CameraMetrics] = {} @@ -104,8 +101,6 @@ class FrigateApp: self.ptz_metrics: dict[str, PTZMetrics] = {} self.processes: dict[str, int] = {} self.embeddings: Optional[EmbeddingsContext] = None - self.region_grids: dict[str, list[list[dict[str, int]]]] = {} - self.frame_manager = SharedMemoryFrameManager() self.config = config def ensure_dirs(self) -> None: @@ -141,8 +136,16 @@ class FrigateApp: def init_queues(self) -> None: # Queue for cameras to push tracked objects to + # leaving room for 2 extra cameras to be added self.detected_frames_queue: Queue = mp.Queue( - maxsize=sum(camera.enabled for camera in self.config.cameras.values()) * 2 + maxsize=( + sum( + camera.enabled_in_config == True + for camera in self.config.cameras.values() + ) + + 2 + ) + * 2 ) # Queue for timeline events @@ -279,7 +282,9 @@ class FrigateApp: "synchronous": "NORMAL", # Safe when using WAL https://www.sqlite.org/pragma.html#pragma_synchronous }, timeout=max( - 60, 10 * len([c for c in self.config.cameras.values() if c.enabled]) + 60, + 10 + * len([c for c in self.config.cameras.values() if c.enabled_in_config]), ), load_vec_extension=self.config.semantic_search.enabled, ) @@ -309,7 +314,9 @@ class FrigateApp: def init_embeddings_client(self) -> None: genai_cameras = [ - c for c in self.config.cameras.values() if c.enabled and c.genai.enabled + c + for c in self.config.cameras.values() + if c.enabled_in_config and c.genai.enabled ] if ( @@ -358,8 +365,6 @@ class FrigateApp: def start_detectors(self) -> None: for name in self.config.cameras.keys(): - self.detection_out_events[name] = mp.Event() - try: largest_frame = max( [ @@ -391,7 +396,7 @@ class FrigateApp: self.detectors[name] = ObjectDetectProcess( name, self.detection_queue, - self.detection_out_events, + list(self.config.cameras.keys()), detector_config, ) @@ -426,69 +431,16 @@ class FrigateApp: output_processor.start() logger.info(f"Output process started: {output_processor.pid}") - def init_historical_regions(self) -> None: - # delete region grids for removed or renamed cameras - cameras = list(self.config.cameras.keys()) - Regions.delete().where(~(Regions.camera << cameras)).execute() - - # create or update region grids for each camera - for camera in self.config.cameras.values(): - assert camera.name is not None - self.region_grids[camera.name] = get_camera_regions_grid( - camera.name, - camera.detect, - max(self.config.model.width, self.config.model.height), - ) - - def start_camera_processors(self) -> None: - for name, config in self.config.cameras.items(): - if not self.config.cameras[name].enabled_in_config: - logger.info(f"Camera processor not started for disabled camera {name}") - continue - - camera_process = util.Process( - target=track_camera, - name=f"camera_processor:{name}", - args=( - name, - config, - self.config.model, - self.config.model.merged_labelmap, - self.detection_queue, - self.detection_out_events[name], - self.detected_frames_queue, - self.camera_metrics[name], - self.ptz_metrics[name], - self.region_grids[name], - ), - daemon=True, - ) - self.camera_metrics[name].process = camera_process - camera_process.start() - logger.info(f"Camera processor started for {name}: {camera_process.pid}") - - def start_camera_capture_processes(self) -> None: - shm_frame_count = self.shm_frame_count() - - for name, config in self.config.cameras.items(): - if not self.config.cameras[name].enabled_in_config: - logger.info(f"Capture process not started for disabled camera {name}") - continue - - # pre-create shms - for i in range(shm_frame_count): - frame_size = config.frame_shape_yuv[0] * config.frame_shape_yuv[1] - self.frame_manager.create(f"{config.name}_frame{i}", frame_size) - - capture_process = util.Process( - target=capture_camera, - name=f"camera_capture:{name}", - args=(config, shm_frame_count, self.camera_metrics[name]), - ) - capture_process.daemon = True - self.camera_metrics[name].capture_process = capture_process - capture_process.start() - logger.info(f"Capture process started for {name}: {capture_process.pid}") + def start_camera_processor(self) -> None: + self.camera_maintainer = CameraMaintainer( + self.config, + self.detection_queue, + self.detected_frames_queue, + self.camera_metrics, + self.ptz_metrics, + self.stop_event, + ) + self.camera_maintainer.start() def start_audio_processor(self) -> None: audio_cameras = [ @@ -548,45 +500,6 @@ class FrigateApp: self.frigate_watchdog = FrigateWatchdog(self.detectors, self.stop_event) self.frigate_watchdog.start() - def shm_frame_count(self) -> int: - total_shm = round(shutil.disk_usage("/dev/shm").total / pow(2, 20), 1) - - # required for log files + nginx cache - min_req_shm = 40 + 10 - - if self.config.birdseye.restream: - min_req_shm += 8 - - available_shm = total_shm - min_req_shm - cam_total_frame_size = 0.0 - - for camera in self.config.cameras.values(): - if camera.enabled and camera.detect.width and camera.detect.height: - cam_total_frame_size += round( - (camera.detect.width * camera.detect.height * 1.5 + 270480) - / 1048576, - 1, - ) - - if cam_total_frame_size == 0.0: - return 0 - - shm_frame_count = min( - int(os.environ.get(SHM_FRAMES_VAR, "50")), - int(available_shm / (cam_total_frame_size)), - ) - - logger.debug( - f"Calculated total camera size {available_shm} / {cam_total_frame_size} :: {shm_frame_count} frames for each camera in SHM" - ) - - if shm_frame_count < 20: - logger.warning( - f"The current SHM size of {total_shm}MB is too small, recommend increasing it to at least {round(min_req_shm + cam_total_frame_size * 20)}MB." - ) - - return shm_frame_count - def init_auth(self) -> None: if self.config.auth.enabled: if User.select().count() == 0: @@ -656,10 +569,8 @@ class FrigateApp: self.init_embeddings_client() self.start_video_output_processor() self.start_ptz_autotracker() - self.init_historical_regions() self.start_detected_frames_processor() - self.start_camera_processors() - self.start_camera_capture_processes() + self.start_camera_processor() self.start_audio_processor() self.start_storage_maintainer() self.start_stats_emitter() @@ -716,24 +627,6 @@ class FrigateApp: if self.onvif_controller: self.onvif_controller.close() - # ensure the capture processes are done - for camera, metrics in self.camera_metrics.items(): - capture_process = metrics.capture_process - if capture_process is not None: - logger.info(f"Waiting for capture process for {camera} to stop") - capture_process.terminate() - capture_process.join() - - # ensure the camera processors are done - for camera, metrics in self.camera_metrics.items(): - camera_process = metrics.process - if camera_process is not None: - logger.info(f"Waiting for process for {camera} to stop") - camera_process.terminate() - camera_process.join() - logger.info(f"Closing frame queue for {camera}") - empty_and_close_queue(metrics.frame_queue) - # ensure the detectors are done for detector in self.detectors.values(): detector.stop() @@ -778,7 +671,6 @@ class FrigateApp: self.event_metadata_updater.stop() self.inter_zmq_proxy.stop() - self.frame_manager.cleanup() while len(self.detection_shms) > 0: shm = self.detection_shms.pop() shm.close() diff --git a/frigate/camera/activity_manager.py b/frigate/camera/activity_manager.py index 6039a07f6..e10730931 100644 --- a/frigate/camera/activity_manager.py +++ b/frigate/camera/activity_manager.py @@ -3,7 +3,7 @@ from collections import Counter from typing import Any, Callable -from frigate.config.config import FrigateConfig +from frigate.config import CameraConfig, FrigateConfig class CameraActivityManager: @@ -23,26 +23,33 @@ class CameraActivityManager: if not camera_config.enabled_in_config: continue - self.last_camera_activity[camera_config.name] = {} - self.camera_all_object_counts[camera_config.name] = Counter() - self.camera_active_object_counts[camera_config.name] = Counter() + self.__init_camera(camera_config) - for zone, zone_config in camera_config.zones.items(): - if zone not in self.all_zone_labels: - self.zone_all_object_counts[zone] = Counter() - self.zone_active_object_counts[zone] = Counter() - self.all_zone_labels[zone] = set() + def __init_camera(self, camera_config: CameraConfig) -> None: + self.last_camera_activity[camera_config.name] = {} + self.camera_all_object_counts[camera_config.name] = Counter() + self.camera_active_object_counts[camera_config.name] = Counter() - self.all_zone_labels[zone].update( - zone_config.objects - if zone_config.objects - else camera_config.objects.track - ) + for zone, zone_config in camera_config.zones.items(): + if zone not in self.all_zone_labels: + self.zone_all_object_counts[zone] = Counter() + self.zone_active_object_counts[zone] = Counter() + self.all_zone_labels[zone] = set() + + self.all_zone_labels[zone].update( + zone_config.objects + if zone_config.objects + else camera_config.objects.track + ) def update_activity(self, new_activity: dict[str, dict[str, Any]]) -> None: all_objects: list[dict[str, Any]] = [] for camera in new_activity.keys(): + # handle cameras that were added dynamically + if camera not in self.camera_all_object_counts: + self.__init_camera(self.config.cameras[camera]) + new_objects = new_activity[camera].get("objects", []) all_objects.extend(new_objects) diff --git a/frigate/camera/maintainer.py b/frigate/camera/maintainer.py new file mode 100644 index 000000000..6abeb762e --- /dev/null +++ b/frigate/camera/maintainer.py @@ -0,0 +1,248 @@ +"""Create and maintain camera processes / management.""" + +import logging +import os +import shutil +import threading +from multiprocessing import Queue +from multiprocessing.synchronize import Event as MpEvent + +from frigate.camera import CameraMetrics, PTZMetrics +from frigate.config import FrigateConfig +from frigate.config.camera import CameraConfig +from frigate.config.camera.updater import ( + CameraConfigUpdateEnum, + CameraConfigUpdateSubscriber, +) +from frigate.const import SHM_FRAMES_VAR +from frigate.models import Regions +from frigate.util import Process as FrigateProcess +from frigate.util.builtin import empty_and_close_queue +from frigate.util.image import SharedMemoryFrameManager, UntrackedSharedMemory +from frigate.util.object import get_camera_regions_grid +from frigate.video import capture_camera, track_camera + +logger = logging.getLogger(__name__) + + +class CameraMaintainer(threading.Thread): + def __init__( + self, + config: FrigateConfig, + detection_queue: Queue, + detected_frames_queue: Queue, + camera_metrics: dict[str, CameraMetrics], + ptz_metrics: dict[str, PTZMetrics], + stop_event: MpEvent, + ): + super().__init__(name="camera_processor") + self.config = config + self.detection_queue = detection_queue + self.detected_frames_queue = detected_frames_queue + self.stop_event = stop_event + self.camera_metrics = camera_metrics + self.ptz_metrics = ptz_metrics + self.frame_manager = SharedMemoryFrameManager() + self.region_grids: dict[str, list[list[dict[str, int]]]] = {} + self.update_subscriber = CameraConfigUpdateSubscriber( + self.config, + {}, + [ + CameraConfigUpdateEnum.add, + CameraConfigUpdateEnum.remove, + ], + ) + self.shm_count = self.__calculate_shm_frame_count() + + def __init_historical_regions(self) -> None: + # delete region grids for removed or renamed cameras + cameras = list(self.config.cameras.keys()) + Regions.delete().where(~(Regions.camera << cameras)).execute() + + # create or update region grids for each camera + for camera in self.config.cameras.values(): + assert camera.name is not None + self.region_grids[camera.name] = get_camera_regions_grid( + camera.name, + camera.detect, + max(self.config.model.width, self.config.model.height), + ) + + def __calculate_shm_frame_count(self) -> int: + total_shm = round(shutil.disk_usage("/dev/shm").total / pow(2, 20), 1) + + # required for log files + nginx cache + min_req_shm = 40 + 10 + + if self.config.birdseye.restream: + min_req_shm += 8 + + available_shm = total_shm - min_req_shm + cam_total_frame_size = 0.0 + + for camera in self.config.cameras.values(): + if ( + camera.enabled_in_config + and camera.detect.width + and camera.detect.height + ): + cam_total_frame_size += round( + (camera.detect.width * camera.detect.height * 1.5 + 270480) + / 1048576, + 1, + ) + + # leave room for 2 cameras that are added dynamically, if a user wants to add more cameras they may need to increase the SHM size and restart after adding them. + cam_total_frame_size += 2 * round( + (camera.detect.width * camera.detect.height * 1.5 + 270480) / 1048576, + 1, + ) + + if cam_total_frame_size == 0.0: + return 0 + + shm_frame_count = min( + int(os.environ.get(SHM_FRAMES_VAR, "50")), + int(available_shm / (cam_total_frame_size)), + ) + + logger.debug( + f"Calculated total camera size {available_shm} / {cam_total_frame_size} :: {shm_frame_count} frames for each camera in SHM" + ) + + if shm_frame_count < 20: + logger.warning( + f"The current SHM size of {total_shm}MB is too small, recommend increasing it to at least {round(min_req_shm + cam_total_frame_size * 20)}MB." + ) + + return shm_frame_count + + def __start_camera_processor( + self, name: str, config: CameraConfig, runtime: bool = False + ) -> None: + if not config.enabled_in_config: + logger.info(f"Camera processor not started for disabled camera {name}") + return + + if runtime: + self.camera_metrics[name] = CameraMetrics() + self.ptz_metrics[name] = PTZMetrics(autotracker_enabled=False) + self.region_grids[name] = get_camera_regions_grid( + name, + config.detect, + max(self.config.model.width, self.config.model.height), + ) + + try: + largest_frame = max( + [ + det.model.height * det.model.width * 3 + if det.model is not None + else 320 + for det in self.config.detectors.values() + ] + ) + UntrackedSharedMemory(name=f"out-{name}", create=True, size=20 * 6 * 4) + UntrackedSharedMemory( + name=name, + create=True, + size=largest_frame, + ) + except FileExistsError: + pass + + camera_process = FrigateProcess( + target=track_camera, + name=f"camera_processor:{name}", + args=( + config.name, + config, + self.config.model, + self.config.model.merged_labelmap, + self.detection_queue, + self.detected_frames_queue, + self.camera_metrics[name], + self.ptz_metrics[name], + self.region_grids[name], + ), + daemon=True, + ) + self.camera_metrics[config.name].process = camera_process + camera_process.start() + logger.info(f"Camera processor started for {config.name}: {camera_process.pid}") + + def __start_camera_capture( + self, name: str, config: CameraConfig, runtime: bool = False + ) -> None: + if not config.enabled_in_config: + logger.info(f"Capture process not started for disabled camera {name}") + return + + # pre-create shms + for i in range(10 if runtime else self.shm_count): + frame_size = config.frame_shape_yuv[0] * config.frame_shape_yuv[1] + self.frame_manager.create(f"{config.name}_frame{i}", frame_size) + + capture_process = FrigateProcess( + target=capture_camera, + name=f"camera_capture:{name}", + args=(config, self.shm_count, self.camera_metrics[name]), + ) + capture_process.daemon = True + self.camera_metrics[name].capture_process = capture_process + capture_process.start() + logger.info(f"Capture process started for {name}: {capture_process.pid}") + + def __stop_camera_capture_process(self, camera: str) -> None: + capture_process = self.camera_metrics[camera].capture_process + if capture_process is not None: + logger.info(f"Waiting for capture process for {camera} to stop") + capture_process.terminate() + capture_process.join() + + def __stop_camera_process(self, camera: str) -> None: + metrics = self.camera_metrics[camera] + camera_process = metrics.process + if camera_process is not None: + logger.info(f"Waiting for process for {camera} to stop") + camera_process.terminate() + camera_process.join() + logger.info(f"Closing frame queue for {camera}") + empty_and_close_queue(metrics.frame_queue) + + def run(self): + self.__init_historical_regions() + + # start camera processes + for camera, config in self.config.cameras.items(): + self.__start_camera_processor(camera, config) + self.__start_camera_capture(camera, config) + + while not self.stop_event.wait(1): + updates = self.update_subscriber.check_for_updates() + + for update_type, updated_cameras in updates.items(): + if update_type == CameraConfigUpdateEnum.add.name: + for camera in updated_cameras: + self.__start_camera_processor( + camera, + self.update_subscriber.camera_configs[camera], + runtime=True, + ) + self.__start_camera_capture( + camera, self.update_subscriber.camera_configs[camera] + ) + elif update_type == CameraConfigUpdateEnum.remove.name: + self.__stop_camera_capture_process(camera) + self.__stop_camera_process(camera) + + # ensure the capture processes are done + for camera in self.camera_metrics.keys(): + self.__stop_camera_capture_process(camera) + + # ensure the camera processors are done + for camera in self.camera_metrics.keys(): + self.__stop_camera_process(camera) + + self.update_subscriber.stop() + self.frame_manager.cleanup() diff --git a/frigate/comms/object_detector_signaler.py b/frigate/comms/object_detector_signaler.py new file mode 100644 index 000000000..befc83e4d --- /dev/null +++ b/frigate/comms/object_detector_signaler.py @@ -0,0 +1,21 @@ +"""Facilitates communication between processes for object detection signals.""" + +from .zmq_proxy import Publisher, Subscriber + + +class ObjectDetectorPublisher(Publisher): + """Publishes signal for object detection to different processes.""" + + topic_base = "object_detector/" + + +class ObjectDetectorSubscriber(Subscriber): + """Simplifies receiving a signal for object detection.""" + + topic_base = "object_detector/" + + def __init__(self, topic: str) -> None: + super().__init__(topic) + + def check_for_update(self): + return super().check_for_update(timeout=5) diff --git a/frigate/comms/webpush.py b/frigate/comms/webpush.py index d93c3169b..7bc66f3b7 100644 --- a/frigate/comms/webpush.py +++ b/frigate/comms/webpush.py @@ -81,7 +81,7 @@ class WebPushClient(Communicator): # type: ignore[misc] "config/notifications", exact=True ) self.config_subscriber = CameraConfigUpdateSubscriber( - self.config.cameras, [CameraConfigUpdateEnum.notifications] + self.config, self.config.cameras, [CameraConfigUpdateEnum.notifications] ) def subscribe(self, receiver: Callable) -> None: @@ -170,7 +170,12 @@ class WebPushClient(Communicator): # type: ignore[misc] if updated_notification_config: self.config.notifications = updated_notification_config - self.config_subscriber.check_for_updates() + updates = self.config_subscriber.check_for_updates() + + if "add" in updates: + for camera in updates["add"]: + self.suspended_cameras[camera] = 0 + self.last_camera_notification_time[camera] = 0 if topic == "reviews": decoded = json.loads(payload) diff --git a/frigate/config/camera/updater.py b/frigate/config/camera/updater.py index 5ddc26d44..83536fc46 100644 --- a/frigate/config/camera/updater.py +++ b/frigate/config/camera/updater.py @@ -5,12 +5,13 @@ from enum import Enum from typing import Any from frigate.comms.config_updater import ConfigPublisher, ConfigSubscriber -from frigate.config import CameraConfig +from frigate.config import CameraConfig, FrigateConfig class CameraConfigUpdateEnum(str, Enum): """Supported camera config update types.""" + add = "add" # for adding a camera audio = "audio" audio_transcription = "audio_transcription" birdseye = "birdseye" @@ -20,6 +21,7 @@ class CameraConfigUpdateEnum(str, Enum): notifications = "notifications" objects = "objects" record = "record" + remove = "remove" # for removing a camera review = "review" snapshots = "snapshots" zones = "zones" @@ -49,9 +51,11 @@ class CameraConfigUpdatePublisher: class CameraConfigUpdateSubscriber: def __init__( self, + config: FrigateConfig | None, camera_configs: dict[str, CameraConfig], topics: list[CameraConfigUpdateEnum], ): + self.config = config self.camera_configs = camera_configs self.topics = topics @@ -68,14 +72,23 @@ class CameraConfigUpdateSubscriber: def __update_config( self, camera: str, update_type: CameraConfigUpdateEnum, updated_config: Any ) -> None: - config = self.camera_configs[camera] + if update_type == CameraConfigUpdateEnum.add: + self.config.cameras[camera] = updated_config + self.camera_configs[camera] = updated_config + return + elif update_type == CameraConfigUpdateEnum.remove: + self.config.cameras.pop(camera) + self.camera_configs.pop(camera) + return + + config = self.camera_configs.get(camera) if not config: return if update_type == CameraConfigUpdateEnum.audio: config.audio = updated_config - if update_type == CameraConfigUpdateEnum.audio_transcription: + elif update_type == CameraConfigUpdateEnum.audio_transcription: config.audio_transcription = updated_config elif update_type == CameraConfigUpdateEnum.birdseye: config.birdseye = updated_config diff --git a/frigate/embeddings/maintainer.py b/frigate/embeddings/maintainer.py index ce81c2bc4..0980a8ae8 100644 --- a/frigate/embeddings/maintainer.py +++ b/frigate/embeddings/maintainer.py @@ -29,6 +29,10 @@ from frigate.comms.recordings_updater import ( ) from frigate.config import FrigateConfig from frigate.config.camera.camera import CameraTypeEnum +from frigate.config.camera.updater import ( + CameraConfigUpdateEnum, + CameraConfigUpdateSubscriber, +) from frigate.const import ( CLIPS_DIR, UPDATE_EVENT_DESCRIPTION, @@ -87,6 +91,11 @@ class EmbeddingMaintainer(threading.Thread): self.config = config self.metrics = metrics self.embeddings = None + self.config_updater = CameraConfigUpdateSubscriber( + self.config, + self.config.cameras, + [CameraConfigUpdateEnum.add, CameraConfigUpdateEnum.remove], + ) if config.semantic_search.enabled: self.embeddings = Embeddings(config, db, metrics) @@ -198,6 +207,7 @@ class EmbeddingMaintainer(threading.Thread): def run(self) -> None: """Maintain a SQLite-vec database for semantic search.""" while not self.stop_event.is_set(): + self.config_updater.check_for_updates() self._process_requests() self._process_updates() self._process_recordings_updates() @@ -206,6 +216,7 @@ class EmbeddingMaintainer(threading.Thread): self._process_finalized() self._process_event_metadata() + self.config_updater.stop() self.event_subscriber.stop() self.event_end_subscriber.stop() self.recordings_subscriber.stop() diff --git a/frigate/events/audio.py b/frigate/events/audio.py index aeeaf3b4f..797a767ba 100644 --- a/frigate/events/audio.py +++ b/frigate/events/audio.py @@ -90,10 +90,19 @@ class AudioProcessor(util.Process): self.camera_metrics = camera_metrics self.cameras = cameras self.config = config - self.transcription_model_runner = AudioTranscriptionModelRunner( - self.config.audio_transcription.device, - self.config.audio_transcription.model_size, - ) + + if any( + [ + conf.audio_transcription.enabled_in_config + for conf in config.cameras.values() + ] + ): + self.transcription_model_runner = AudioTranscriptionModelRunner( + self.config.audio_transcription.device, + self.config.audio_transcription.model_size, + ) + else: + self.transcription_model_runner = None def run(self) -> None: audio_threads: list[AudioEventMaintainer] = [] @@ -138,7 +147,7 @@ class AudioEventMaintainer(threading.Thread): camera: CameraConfig, config: FrigateConfig, camera_metrics: dict[str, CameraMetrics], - audio_transcription_model_runner: AudioTranscriptionModelRunner, + audio_transcription_model_runner: AudioTranscriptionModelRunner | None, stop_event: threading.Event, ) -> None: super().__init__(name=f"{camera.name}_audio_event_processor") @@ -162,6 +171,7 @@ class AudioEventMaintainer(threading.Thread): # create communication for audio detections self.requestor = InterProcessRequestor() self.config_subscriber = CameraConfigUpdateSubscriber( + None, {self.camera_config.name: self.camera_config}, [ CameraConfigUpdateEnum.audio, diff --git a/frigate/object_detection/base.py b/frigate/object_detection/base.py index c77a720a0..86febc6a7 100644 --- a/frigate/object_detection/base.py +++ b/frigate/object_detection/base.py @@ -13,6 +13,10 @@ import numpy as np from setproctitle import setproctitle import frigate.util as util +from frigate.comms.object_detector_signaler import ( + ObjectDetectorPublisher, + ObjectDetectorSubscriber, +) from frigate.detectors import create_detector from frigate.detectors.detector_config import ( BaseDetectorConfig, @@ -89,7 +93,7 @@ class LocalObjectDetector(ObjectDetector): def run_detector( name: str, detection_queue: Queue, - out_events: dict[str, MpEvent], + cameras: list[str], avg_speed: Value, start: Value, detector_config: BaseDetectorConfig, @@ -108,15 +112,19 @@ def run_detector( signal.signal(signal.SIGTERM, receiveSignal) signal.signal(signal.SIGINT, receiveSignal) - frame_manager = SharedMemoryFrameManager() - object_detector = LocalObjectDetector(detector_config=detector_config) - - outputs = {} - for name in out_events.keys(): + def create_output_shm(name: str): out_shm = UntrackedSharedMemory(name=f"out-{name}", create=False) out_np = np.ndarray((20, 6), dtype=np.float32, buffer=out_shm.buf) outputs[name] = {"shm": out_shm, "np": out_np} + frame_manager = SharedMemoryFrameManager() + object_detector = LocalObjectDetector(detector_config=detector_config) + detector_publisher = ObjectDetectorPublisher() + + outputs = {} + for name in cameras: + create_output_shm(name) + while not stop_event.is_set(): try: connection_id = detection_queue.get(timeout=1) @@ -136,12 +144,18 @@ def run_detector( detections = object_detector.detect_raw(input_frame) duration = datetime.datetime.now().timestamp() - start.value frame_manager.close(connection_id) + + if connection_id not in outputs: + create_output_shm(connection_id) + outputs[connection_id]["np"][:] = detections[:] - out_events[connection_id].set() + signal_id = f"{connection_id}/update" + detector_publisher.publish(signal_id, signal_id) start.value = 0.0 avg_speed.value = (avg_speed.value * 9 + duration) / 10 + detector_publisher.stop() logger.info("Exited detection process...") @@ -150,11 +164,11 @@ class ObjectDetectProcess: self, name: str, detection_queue: Queue, - out_events: dict[str, MpEvent], + cameras: list[str], detector_config: BaseDetectorConfig, ): self.name = name - self.out_events = out_events + self.cameras = cameras self.detection_queue = detection_queue self.avg_inference_speed = Value("d", 0.01) self.detection_start = Value("d", 0.0) @@ -185,7 +199,7 @@ class ObjectDetectProcess: args=( self.name, self.detection_queue, - self.out_events, + self.cameras, self.avg_inference_speed, self.detection_start, self.detector_config, @@ -201,7 +215,6 @@ class RemoteObjectDetector: name: str, labels: dict[int, str], detection_queue: Queue, - event: MpEvent, model_config: ModelConfig, stop_event: MpEvent, ): @@ -209,7 +222,6 @@ class RemoteObjectDetector: self.name = name self.fps = EventsPerSecond() self.detection_queue = detection_queue - self.event = event self.stop_event = stop_event self.shm = UntrackedSharedMemory(name=self.name, create=False) self.np_shm = np.ndarray( @@ -219,6 +231,7 @@ class RemoteObjectDetector: ) self.out_shm = UntrackedSharedMemory(name=f"out-{self.name}", create=False) self.out_np_shm = np.ndarray((20, 6), dtype=np.float32, buffer=self.out_shm.buf) + self.detector_subscriber = ObjectDetectorSubscriber(f"{name}/update") def detect(self, tensor_input, threshold=0.4): detections = [] @@ -228,9 +241,8 @@ class RemoteObjectDetector: # copy input to shared memory self.np_shm[:] = tensor_input[:] - self.event.clear() self.detection_queue.put(self.name) - result = self.event.wait(timeout=5.0) + result = self.detector_subscriber.check_for_update() # if it timed out if result is None: @@ -246,5 +258,6 @@ class RemoteObjectDetector: return detections def cleanup(self): + self.detector_subscriber.stop() self.shm.unlink() self.out_shm.unlink() diff --git a/frigate/output/output.py b/frigate/output/output.py index 6decf0005..d323596fe 100644 --- a/frigate/output/output.py +++ b/frigate/output/output.py @@ -103,8 +103,10 @@ def output_frames( detection_subscriber = DetectionSubscriber(DetectionTypeEnum.video) config_subscriber = CameraConfigUpdateSubscriber( + config, config.cameras, [ + CameraConfigUpdateEnum.add, CameraConfigUpdateEnum.birdseye, CameraConfigUpdateEnum.enabled, CameraConfigUpdateEnum.record, @@ -135,7 +137,15 @@ def output_frames( while not stop_event.is_set(): # check if there is an updated config - config_subscriber.check_for_updates() + updates = config_subscriber.check_for_updates() + + if "add" in updates: + for camera in updates["add"]: + jsmpeg_cameras[camera] = JsmpegCamera( + cam_config, stop_event, websocket_server + ) + preview_recorders[camera] = PreviewRecorder(cam_config) + preview_write_times[camera] = 0 (topic, data) = detection_subscriber.check_for_update(timeout=1) now = datetime.datetime.now().timestamp() diff --git a/frigate/ptz/autotrack.py b/frigate/ptz/autotrack.py index f38bf1f5f..f0d8419dd 100644 --- a/frigate/ptz/autotrack.py +++ b/frigate/ptz/autotrack.py @@ -31,7 +31,7 @@ from frigate.const import ( ) from frigate.ptz.onvif import OnvifController from frigate.track.tracked_object import TrackedObject -from frigate.util.builtin import update_yaml_file +from frigate.util.builtin import update_yaml_file_bulk from frigate.util.config import find_config_file from frigate.util.image import SharedMemoryFrameManager, intersection_over_union @@ -348,10 +348,13 @@ class PtzAutoTracker: f"{camera}: Writing new config with autotracker motion coefficients: {self.config.cameras[camera].onvif.autotracking.movement_weights}" ) - update_yaml_file( + update_yaml_file_bulk( config_file, - ["cameras", camera, "onvif", "autotracking", "movement_weights"], - self.config.cameras[camera].onvif.autotracking.movement_weights, + { + f"cameras.{camera}.onvif.autotracking.movement_weights": self.config.cameras[ + camera + ].onvif.autotracking.movement_weights + }, ) async def _calibrate_camera(self, camera): diff --git a/frigate/record/maintainer.py b/frigate/record/maintainer.py index ace9a5d24..0883437da 100644 --- a/frigate/record/maintainer.py +++ b/frigate/record/maintainer.py @@ -75,7 +75,9 @@ class RecordingMaintainer(threading.Thread): # create communication for retained recordings self.requestor = InterProcessRequestor() self.config_subscriber = CameraConfigUpdateSubscriber( - self.config.cameras, [CameraConfigUpdateEnum.record] + self.config, + self.config.cameras, + [CameraConfigUpdateEnum.add, CameraConfigUpdateEnum.record], ) self.detection_subscriber = DetectionSubscriber(DetectionTypeEnum.all) self.recordings_publisher = RecordingsDataPublisher( diff --git a/frigate/review/maintainer.py b/frigate/review/maintainer.py index 7f60a0209..778717db3 100644 --- a/frigate/review/maintainer.py +++ b/frigate/review/maintainer.py @@ -154,10 +154,13 @@ class ReviewSegmentMaintainer(threading.Thread): # create communication for review segments self.requestor = InterProcessRequestor() self.config_subscriber = CameraConfigUpdateSubscriber( + config, config.cameras, [ + CameraConfigUpdateEnum.add, CameraConfigUpdateEnum.enabled, CameraConfigUpdateEnum.record, + CameraConfigUpdateEnum.remove, CameraConfigUpdateEnum.review, ], ) diff --git a/frigate/track/object_processing.py b/frigate/track/object_processing.py index e25c83815..79d2f16ef 100644 --- a/frigate/track/object_processing.py +++ b/frigate/track/object_processing.py @@ -70,9 +70,15 @@ class TrackedObjectProcessor(threading.Thread): self.last_motion_detected: dict[str, float] = {} self.ptz_autotracker_thread = ptz_autotracker_thread - self.config_subscriber = CameraConfigUpdateSubscriber( + self.camera_config_subscriber = CameraConfigUpdateSubscriber( + self.config, self.config.cameras, - [CameraConfigUpdateEnum.enabled, CameraConfigUpdateEnum.zones], + [ + CameraConfigUpdateEnum.add, + CameraConfigUpdateEnum.enabled, + CameraConfigUpdateEnum.remove, + CameraConfigUpdateEnum.zones, + ], ) self.requestor = InterProcessRequestor() @@ -95,6 +101,12 @@ class TrackedObjectProcessor(threading.Thread): self.zone_data = defaultdict(lambda: defaultdict(dict)) self.active_zone_data = defaultdict(lambda: defaultdict(dict)) + for camera in self.config.cameras.keys(): + self.create_camera_state(camera) + + def create_camera_state(self, camera: str) -> None: + """Creates a new camera state.""" + def start(camera: str, obj: TrackedObject, frame_name: str): self.event_sender.publish( ( @@ -206,17 +218,16 @@ class TrackedObjectProcessor(threading.Thread): self.camera_activity[camera] = activity self.requestor.send_data(UPDATE_CAMERA_ACTIVITY, self.camera_activity) - for camera in self.config.cameras.keys(): - camera_state = CameraState( - camera, self.config, self.frame_manager, self.ptz_autotracker_thread - ) - camera_state.on("start", start) - camera_state.on("autotrack", autotrack) - camera_state.on("update", update) - camera_state.on("end", end) - camera_state.on("snapshot", snapshot) - camera_state.on("camera_activity", camera_activity) - self.camera_states[camera] = camera_state + camera_state = CameraState( + camera, self.config, self.frame_manager, self.ptz_autotracker_thread + ) + camera_state.on("start", start) + camera_state.on("autotrack", autotrack) + camera_state.on("update", update) + camera_state.on("end", end) + camera_state.on("snapshot", snapshot) + camera_state.on("camera_activity", camera_activity) + self.camera_states[camera] = camera_state def should_save_snapshot(self, camera, obj: TrackedObject): if obj.false_positive: @@ -644,7 +655,7 @@ class TrackedObjectProcessor(threading.Thread): def run(self): while not self.stop_event.is_set(): # check for config updates - updated_topics = self.config_subscriber.check_for_updates() + updated_topics = self.camera_config_subscriber.check_for_updates() if "enabled" in updated_topics: for camera in updated_topics["enabled"]: @@ -652,6 +663,17 @@ class TrackedObjectProcessor(threading.Thread): self.camera_states[camera].prev_enabled = self.config.cameras[ camera ].enabled + elif "add" in updated_topics: + for camera in updated_topics["add"]: + self.config.cameras[camera] = ( + self.camera_config_subscriber.camera_configs[camera] + ) + self.create_camera_state(camera) + elif "remove" in updated_topics: + for camera in updated_topics["remove"]: + camera_state = self.camera_states[camera] + camera_state.shutdown() + self.camera_states.pop(camera) # manage camera disabled state for camera, config in self.config.cameras.items(): @@ -760,6 +782,6 @@ class TrackedObjectProcessor(threading.Thread): self.event_sender.stop() self.event_end_subscriber.stop() self.sub_label_subscriber.stop() - self.config_subscriber.stop() + self.camera_config_subscriber.stop() logger.info("Exiting object processor...") diff --git a/frigate/util/builtin.py b/frigate/util/builtin.py index 52280ecd8..0433af18e 100644 --- a/frigate/util/builtin.py +++ b/frigate/util/builtin.py @@ -14,7 +14,7 @@ import urllib.parse from collections.abc import Mapping from multiprocessing.sharedctypes import Synchronized from pathlib import Path -from typing import Any, Optional, Tuple, Union +from typing import Any, Dict, Optional, Tuple, Union from zoneinfo import ZoneInfoNotFoundError import numpy as np @@ -184,25 +184,12 @@ def create_mask(frame_shape, mask): mask_img[:] = 255 -def update_yaml_from_url(file_path, url): - parsed_url = urllib.parse.urlparse(url) - query_string = urllib.parse.parse_qs(parsed_url.query, keep_blank_values=True) - - # Filter out empty keys but keep blank values for non-empty keys - query_string = {k: v for k, v in query_string.items() if k} - +def process_config_query_string(query_string: Dict[str, list]) -> Dict[str, Any]: + updates = {} for key_path_str, new_value_list in query_string.items(): - key_path = key_path_str.split(".") - for i in range(len(key_path)): - try: - index = int(key_path[i]) - key_path[i] = (key_path[i - 1], index) - key_path.pop(i - 1) - except ValueError: - pass - + # use the string key as-is for updates dictionary if len(new_value_list) > 1: - update_yaml_file(file_path, key_path, new_value_list) + updates[key_path_str] = new_value_list else: value = new_value_list[0] try: @@ -210,10 +197,24 @@ def update_yaml_from_url(file_path, url): value = ast.literal_eval(value) if "," not in value else value except (ValueError, SyntaxError): pass - update_yaml_file(file_path, key_path, value) + updates[key_path_str] = value + return updates -def update_yaml_file(file_path, key_path, new_value): +def flatten_config_data( + config_data: Dict[str, Any], parent_key: str = "" +) -> Dict[str, Any]: + items = [] + for key, value in config_data.items(): + new_key = f"{parent_key}.{key}" if parent_key else key + if isinstance(value, dict): + items.extend(flatten_config_data(value, new_key).items()) + else: + items.append((new_key, value)) + return dict(items) + + +def update_yaml_file_bulk(file_path: str, updates: Dict[str, Any]): yaml = YAML() yaml.indent(mapping=2, sequence=4, offset=2) @@ -226,7 +227,17 @@ def update_yaml_file(file_path, key_path, new_value): ) return - data = update_yaml(data, key_path, new_value) + # Apply all updates + for key_path_str, new_value in updates.items(): + key_path = key_path_str.split(".") + for i in range(len(key_path)): + try: + index = int(key_path[i]) + key_path[i] = (key_path[i - 1], index) + key_path.pop(i - 1) + except ValueError: + pass + data = update_yaml(data, key_path, new_value) try: with open(file_path, "w") as f: diff --git a/frigate/video.py b/frigate/video.py index 5012c31c6..9710dbd81 100755 --- a/frigate/video.py +++ b/frigate/video.py @@ -116,7 +116,7 @@ def capture_frames( skipped_eps = EventsPerSecond() skipped_eps.start() config_subscriber = CameraConfigUpdateSubscriber( - {config.name: config}, [CameraConfigUpdateEnum.enabled] + None, {config.name: config}, [CameraConfigUpdateEnum.enabled] ) def get_enabled_state(): @@ -196,7 +196,7 @@ class CameraWatchdog(threading.Thread): self.sleeptime = self.config.ffmpeg.retry_interval self.config_subscriber = CameraConfigUpdateSubscriber( - {config.name: config}, [CameraConfigUpdateEnum.enabled] + None, {config.name: config}, [CameraConfigUpdateEnum.enabled] ) self.was_enabled = self.config.enabled @@ -483,7 +483,6 @@ def track_camera( model_config: ModelConfig, labelmap: dict[int, str], detection_queue: Queue, - result_connection: MpEvent, detected_objects_queue, camera_metrics: CameraMetrics, ptz_metrics: PTZMetrics, @@ -513,7 +512,7 @@ def track_camera( ptz_metrics=ptz_metrics, ) object_detector = RemoteObjectDetector( - name, labelmap, detection_queue, result_connection, model_config, stop_event + name, labelmap, detection_queue, model_config, stop_event ) object_tracker = NorfairTracker(config, ptz_metrics) @@ -607,6 +606,7 @@ def process_frames( ): next_region_update = get_tomorrow_at_time(2) config_subscriber = CameraConfigUpdateSubscriber( + None, {camera_name: camera_config}, [ CameraConfigUpdateEnum.detect, From 1caf8b97c485b023404452f5cd2273e9bae1006e Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Thu, 12 Jun 2025 12:12:34 -0600 Subject: [PATCH 025/530] Use Fork-Server As Spawn Method (#18682) * Set runtime * Use count correctly * Don't assume camera sizes * Use separate zmq proxy for object detection * Correct order * Use forkserver * Only store PID instead of entire process reference * Cleanup * Catch correct errors * Fix typing * Remove before_run from process util The before_run never actually ran because: You're right to suspect an issue with before_run not being called and a potential deadlock. The way you've implemented the run_wrapper using __getattribute__ for the run method of BaseProcess is a common pitfall in Python's multiprocessing, especially when combined with how multiprocessing.Process works internally. Here's a breakdown of why before_run isn't being called and why you might be experiencing a deadlock: The Problem: __getattribute__ and Process Serialization When you create a multiprocessing.Process object and call start(), the multiprocessing module needs to serialize the process object (or at least enough of it to re-create the process in the new interpreter). It then pickles this serialized object and sends it to the newly spawned process. The issue with your __getattribute__ implementation for run is that: run is retrieved during serialization: When multiprocessing tries to pickle your Process object to send to the new process, it will likely access the run attribute. This triggers your __getattribute__ wrapper, which then tries to bind run_wrapper to self. run_wrapper is bound to the parent process's self: The run_wrapper closure, when created in the parent process, captures the self (the Process instance) from the parent's memory space. Deserialization creates a new object: In the child process, a new Process object is created by deserializing the pickled data. However, the run_wrapper method that was pickled still holds a reference to the self from the parent process. This is a subtle but critical distinction. The child's run is not your wrapped run: When the child process starts, it internally calls its own run method. Because of the serialization and deserialization process, the run method that's ultimately executed in the child process is the original multiprocessing.Process.run or the Process.run if you had directly overridden it. Your __getattribute__ magic, which wraps run, isn't correctly applied to the Process object within the child's context. * Cleanup * Logging bugfix (#18465) * use mp Manager to handle logging queues A Python bug (https://github.com/python/cpython/issues/91555) was preventing logs from the embeddings maintainer process from printing. The bug is fixed in Python 3.14, but a viable workaround is to use the multiprocessing Manager, which better manages mp queues and causes the logging to work correctly. * consolidate * fix typing * Fix typing * Use global log queue * Move to using process for logging * Convert camera tracking to process * Add more processes * Finalize process * Cleanup * Cleanup typing * Formatting * Remove daemon --------- Co-authored-by: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> --- frigate/__main__.py | 7 +- frigate/app.py | 61 ++-- frigate/camera/__init__.py | 32 +-- frigate/camera/maintainer.py | 65 ++--- frigate/comms/object_detector_signaler.py | 85 +++++- frigate/data_processing/types.py | 34 +-- frigate/embeddings/__init__.py | 58 ++-- frigate/embeddings/maintainer.py | 23 +- frigate/events/audio.py | 9 +- frigate/log.py | 13 +- frigate/object_detection/base.py | 139 +++++---- frigate/output/output.py | 330 +++++++++++----------- frigate/record/record.py | 60 ++-- frigate/review/review.py | 37 +-- frigate/stats/util.py | 10 +- frigate/util/builtin.py | 13 +- frigate/util/process.py | 29 +- frigate/video.py | 208 +++++++------- web/src/views/system/CameraMetrics.tsx | 2 +- 19 files changed, 606 insertions(+), 609 deletions(-) diff --git a/frigate/__main__.py b/frigate/__main__.py index 4c732be80..6dd5d130e 100644 --- a/frigate/__main__.py +++ b/frigate/__main__.py @@ -1,5 +1,6 @@ import argparse import faulthandler +import multiprocessing as mp import signal import sys import threading @@ -15,10 +16,11 @@ from frigate.util.config import find_config_file def main() -> None: + manager = mp.Manager() faulthandler.enable() # Setup the logging thread - setup_logging() + setup_logging(manager) threading.current_thread().name = "frigate" @@ -108,8 +110,9 @@ def main() -> None: sys.exit(0) # Run the main application. - FrigateApp(config).start() + FrigateApp(config, manager).start() if __name__ == "__main__": + mp.set_start_method("forkserver", force=True) main() diff --git a/frigate/app.py b/frigate/app.py index 186ed1195..010f311b9 100644 --- a/frigate/app.py +++ b/frigate/app.py @@ -5,6 +5,7 @@ import os import secrets import shutil from multiprocessing import Queue +from multiprocessing.managers import DictProxy, SyncManager from multiprocessing.synchronize import Event as MpEvent from pathlib import Path from typing import Optional @@ -14,7 +15,6 @@ import uvicorn from peewee_migrate import Router from playhouse.sqlite_ext import SqliteExtDatabase -import frigate.util as util from frigate.api.auth import hash_password from frigate.api.fastapi_app import create_fastapi_app from frigate.camera import CameraMetrics, PTZMetrics @@ -24,6 +24,7 @@ from frigate.comms.dispatcher import Dispatcher from frigate.comms.event_metadata_updater import EventMetadataPublisher from frigate.comms.inter_process import InterProcessCommunicator from frigate.comms.mqtt import MqttClient +from frigate.comms.object_detector_signaler import DetectorProxy from frigate.comms.webpush import WebPushClient from frigate.comms.ws import WebSocketClient from frigate.comms.zmq_proxy import ZmqProxy @@ -41,7 +42,7 @@ from frigate.const import ( ) from frigate.data_processing.types import DataProcessorMetrics from frigate.db.sqlitevecq import SqliteVecQueueDatabase -from frigate.embeddings import EmbeddingsContext, manage_embeddings +from frigate.embeddings import EmbeddingProcess, EmbeddingsContext from frigate.events.audio import AudioProcessor from frigate.events.cleanup import EventCleanup from frigate.events.maintainer import EventProcessor @@ -58,13 +59,13 @@ from frigate.models import ( User, ) from frigate.object_detection.base import ObjectDetectProcess -from frigate.output.output import output_frames +from frigate.output.output import OutputProcess from frigate.ptz.autotrack import PtzAutoTrackerThread from frigate.ptz.onvif import OnvifController from frigate.record.cleanup import RecordingCleanup from frigate.record.export import migrate_exports -from frigate.record.record import manage_recordings -from frigate.review.review import manage_review_segments +from frigate.record.record import RecordProcess +from frigate.review.review import ReviewProcess from frigate.stats.emitter import StatsEmitter from frigate.stats.util import stats_init from frigate.storage import StorageMaintainer @@ -80,16 +81,19 @@ logger = logging.getLogger(__name__) class FrigateApp: - def __init__(self, config: FrigateConfig) -> None: + def __init__(self, config: FrigateConfig, manager: SyncManager) -> None: + self.metrics_manager = manager self.audio_process: Optional[mp.Process] = None self.stop_event: MpEvent = mp.Event() self.detection_queue: Queue = mp.Queue() self.detectors: dict[str, ObjectDetectProcess] = {} self.detection_shms: list[mp.shared_memory.SharedMemory] = [] self.log_queue: Queue = mp.Queue() - self.camera_metrics: dict[str, CameraMetrics] = {} + self.camera_metrics: DictProxy = self.metrics_manager.dict() self.embeddings_metrics: DataProcessorMetrics | None = ( - DataProcessorMetrics(list(config.classification.custom.keys())) + DataProcessorMetrics( + self.metrics_manager, list(config.classification.custom.keys()) + ) if ( config.semantic_search.enabled or config.lpr.enabled @@ -127,7 +131,7 @@ class FrigateApp: def init_camera_metrics(self) -> None: # create camera_metrics for camera_name in self.config.cameras.keys(): - self.camera_metrics[camera_name] = CameraMetrics() + self.camera_metrics[camera_name] = CameraMetrics(self.metrics_manager) self.ptz_metrics[camera_name] = PTZMetrics( autotracker_enabled=self.config.cameras[ camera_name @@ -221,24 +225,14 @@ class FrigateApp: self.processes["go2rtc"] = proc.info["pid"] def init_recording_manager(self) -> None: - recording_process = util.Process( - target=manage_recordings, - name="recording_manager", - args=(self.config,), - ) - recording_process.daemon = True + recording_process = RecordProcess(self.config) self.recording_process = recording_process recording_process.start() self.processes["recording"] = recording_process.pid or 0 logger.info(f"Recording process started: {recording_process.pid}") def init_review_segment_manager(self) -> None: - review_segment_process = util.Process( - target=manage_review_segments, - name="review_segment_manager", - args=(self.config,), - ) - review_segment_process.daemon = True + review_segment_process = ReviewProcess(self.config) self.review_segment_process = review_segment_process review_segment_process.start() self.processes["review_segment"] = review_segment_process.pid or 0 @@ -257,15 +251,10 @@ class FrigateApp: ): return - embedding_process = util.Process( - target=manage_embeddings, - name="embeddings_manager", - args=( - self.config, - self.embeddings_metrics, - ), + embedding_process = EmbeddingProcess( + self.config, + self.embeddings_metrics, ) - embedding_process.daemon = True self.embedding_process = embedding_process embedding_process.start() self.processes["embeddings"] = embedding_process.pid or 0 @@ -333,6 +322,7 @@ class FrigateApp: self.inter_config_updater = CameraConfigUpdatePublisher() self.event_metadata_updater = EventMetadataPublisher() self.inter_zmq_proxy = ZmqProxy() + self.detection_proxy = DetectorProxy() def init_onvif(self) -> None: self.onvif_controller = OnvifController(self.config, self.ptz_metrics) @@ -421,12 +411,7 @@ class FrigateApp: self.detected_frames_processor.start() def start_video_output_processor(self) -> None: - output_processor = util.Process( - target=output_frames, - name="output_processor", - args=(self.config,), - ) - output_processor.daemon = True + output_processor = OutputProcess(self.config) self.output_processor = output_processor output_processor.start() logger.info(f"Output process started: {output_processor.pid}") @@ -560,11 +545,11 @@ class FrigateApp: self.init_recording_manager() self.init_review_segment_manager() self.init_go2rtc() - self.start_detectors() self.init_embeddings_manager() self.bind_database() self.check_db_data_migrations() self.init_inter_process_communicator() + self.start_detectors() self.init_dispatcher() self.init_embeddings_client() self.start_video_output_processor() @@ -670,13 +655,13 @@ class FrigateApp: self.inter_config_updater.stop() self.event_metadata_updater.stop() self.inter_zmq_proxy.stop() + self.detection_proxy.stop() while len(self.detection_shms) > 0: shm = self.detection_shms.pop() shm.close() shm.unlink() - # exit the mp Manager process _stop_logging() - + self.metrics_manager.shutdown() os._exit(os.EX_OK) diff --git a/frigate/camera/__init__.py b/frigate/camera/__init__.py index 456751c52..77b1fd424 100644 --- a/frigate/camera/__init__.py +++ b/frigate/camera/__init__.py @@ -1,7 +1,7 @@ import multiprocessing as mp +from multiprocessing.managers import SyncManager from multiprocessing.sharedctypes import Synchronized from multiprocessing.synchronize import Event -from typing import Optional class CameraMetrics: @@ -16,25 +16,25 @@ class CameraMetrics: frame_queue: mp.Queue - process: Optional[mp.Process] - capture_process: Optional[mp.Process] + process_pid: Synchronized + capture_process_pid: Synchronized ffmpeg_pid: Synchronized - def __init__(self): - self.camera_fps = mp.Value("d", 0) - self.detection_fps = mp.Value("d", 0) - self.detection_frame = mp.Value("d", 0) - self.process_fps = mp.Value("d", 0) - self.skipped_fps = mp.Value("d", 0) - self.read_start = mp.Value("d", 0) - self.audio_rms = mp.Value("d", 0) - self.audio_dBFS = mp.Value("d", 0) + def __init__(self, manager: SyncManager): + self.camera_fps = manager.Value("d", 0) + self.detection_fps = manager.Value("d", 0) + self.detection_frame = manager.Value("d", 0) + self.process_fps = manager.Value("d", 0) + self.skipped_fps = manager.Value("d", 0) + self.read_start = manager.Value("d", 0) + self.audio_rms = manager.Value("d", 0) + self.audio_dBFS = manager.Value("d", 0) - self.frame_queue = mp.Queue(maxsize=2) + self.frame_queue = manager.Queue(maxsize=2) - self.process = None - self.capture_process = None - self.ffmpeg_pid = mp.Value("i", 0) + self.process_pid = manager.Value("i", 0) + self.capture_process_pid = manager.Value("i", 0) + self.ffmpeg_pid = manager.Value("i", 0) class PTZMetrics: diff --git a/frigate/camera/maintainer.py b/frigate/camera/maintainer.py index 6abeb762e..dd978bbfc 100644 --- a/frigate/camera/maintainer.py +++ b/frigate/camera/maintainer.py @@ -1,10 +1,12 @@ """Create and maintain camera processes / management.""" import logging +import multiprocessing as mp import os import shutil import threading from multiprocessing import Queue +from multiprocessing.managers import DictProxy from multiprocessing.synchronize import Event as MpEvent from frigate.camera import CameraMetrics, PTZMetrics @@ -16,11 +18,10 @@ from frigate.config.camera.updater import ( ) from frigate.const import SHM_FRAMES_VAR from frigate.models import Regions -from frigate.util import Process as FrigateProcess from frigate.util.builtin import empty_and_close_queue from frigate.util.image import SharedMemoryFrameManager, UntrackedSharedMemory from frigate.util.object import get_camera_regions_grid -from frigate.video import capture_camera, track_camera +from frigate.video import CameraCapture, CameraTracker logger = logging.getLogger(__name__) @@ -31,7 +32,7 @@ class CameraMaintainer(threading.Thread): config: FrigateConfig, detection_queue: Queue, detected_frames_queue: Queue, - camera_metrics: dict[str, CameraMetrics], + camera_metrics: DictProxy, ptz_metrics: dict[str, PTZMetrics], stop_event: MpEvent, ): @@ -53,6 +54,8 @@ class CameraMaintainer(threading.Thread): ], ) self.shm_count = self.__calculate_shm_frame_count() + self.camera_processes: dict[str, mp.Process] = {} + self.capture_processes: dict[str, mp.Process] = {} def __init_historical_regions(self) -> None: # delete region grids for removed or renamed cameras @@ -94,7 +97,7 @@ class CameraMaintainer(threading.Thread): # leave room for 2 cameras that are added dynamically, if a user wants to add more cameras they may need to increase the SHM size and restart after adding them. cam_total_frame_size += 2 * round( - (camera.detect.width * camera.detect.height * 1.5 + 270480) / 1048576, + (1280 * 720 * 1.5 + 270480) / 1048576, 1, ) @@ -151,24 +154,19 @@ class CameraMaintainer(threading.Thread): except FileExistsError: pass - camera_process = FrigateProcess( - target=track_camera, - name=f"camera_processor:{name}", - args=( - config.name, - config, - self.config.model, - self.config.model.merged_labelmap, - self.detection_queue, - self.detected_frames_queue, - self.camera_metrics[name], - self.ptz_metrics[name], - self.region_grids[name], - ), - daemon=True, + camera_process = CameraTracker( + config, + self.config.model, + self.config.model.merged_labelmap, + self.detection_queue, + self.detected_frames_queue, + self.camera_metrics[name], + self.ptz_metrics[name], + self.region_grids[name], ) - self.camera_metrics[config.name].process = camera_process + self.camera_processes[config.name] = camera_process camera_process.start() + self.camera_metrics[config.name].process_pid.value = camera_process.pid logger.info(f"Camera processor started for {config.name}: {camera_process.pid}") def __start_camera_capture( @@ -179,36 +177,33 @@ class CameraMaintainer(threading.Thread): return # pre-create shms - for i in range(10 if runtime else self.shm_count): + count = 10 if runtime else self.shm_count + for i in range(count): frame_size = config.frame_shape_yuv[0] * config.frame_shape_yuv[1] self.frame_manager.create(f"{config.name}_frame{i}", frame_size) - capture_process = FrigateProcess( - target=capture_camera, - name=f"camera_capture:{name}", - args=(config, self.shm_count, self.camera_metrics[name]), - ) + capture_process = CameraCapture(config, count, self.camera_metrics[name]) capture_process.daemon = True - self.camera_metrics[name].capture_process = capture_process + self.capture_processes[name] = capture_process capture_process.start() + self.camera_metrics[name].capture_process_pid.value = capture_process.pid logger.info(f"Capture process started for {name}: {capture_process.pid}") def __stop_camera_capture_process(self, camera: str) -> None: - capture_process = self.camera_metrics[camera].capture_process + capture_process = self.capture_processes[camera] if capture_process is not None: logger.info(f"Waiting for capture process for {camera} to stop") capture_process.terminate() capture_process.join() def __stop_camera_process(self, camera: str) -> None: - metrics = self.camera_metrics[camera] - camera_process = metrics.process + camera_process = self.camera_processes[camera] if camera_process is not None: logger.info(f"Waiting for process for {camera} to stop") camera_process.terminate() camera_process.join() logger.info(f"Closing frame queue for {camera}") - empty_and_close_queue(metrics.frame_queue) + empty_and_close_queue(self.camera_metrics[camera].frame_queue) def run(self): self.__init_historical_regions() @@ -230,18 +225,20 @@ class CameraMaintainer(threading.Thread): runtime=True, ) self.__start_camera_capture( - camera, self.update_subscriber.camera_configs[camera] + camera, + self.update_subscriber.camera_configs[camera], + runtime=True, ) elif update_type == CameraConfigUpdateEnum.remove.name: self.__stop_camera_capture_process(camera) self.__stop_camera_process(camera) # ensure the capture processes are done - for camera in self.camera_metrics.keys(): + for camera in self.camera_processes.keys(): self.__stop_camera_capture_process(camera) # ensure the camera processors are done - for camera in self.camera_metrics.keys(): + for camera in self.capture_processes.keys(): self.__stop_camera_process(camera) self.update_subscriber.stop() diff --git a/frigate/comms/object_detector_signaler.py b/frigate/comms/object_detector_signaler.py index befc83e4d..e8871db1a 100644 --- a/frigate/comms/object_detector_signaler.py +++ b/frigate/comms/object_detector_signaler.py @@ -1,21 +1,92 @@ """Facilitates communication between processes for object detection signals.""" -from .zmq_proxy import Publisher, Subscriber +import threading + +import zmq + +SOCKET_PUB = "ipc:///tmp/cache/detector_pub" +SOCKET_SUB = "ipc:///tmp/cache/detector_sub" -class ObjectDetectorPublisher(Publisher): +class ZmqProxyRunner(threading.Thread): + def __init__(self, context: zmq.Context[zmq.Socket]) -> None: + super().__init__(name="detector_proxy") + self.context = context + + def run(self) -> None: + """Run the proxy.""" + incoming = self.context.socket(zmq.XSUB) + incoming.bind(SOCKET_PUB) + outgoing = self.context.socket(zmq.XPUB) + outgoing.bind(SOCKET_SUB) + + # Blocking: This will unblock (via exception) when we destroy the context + # The incoming and outgoing sockets will be closed automatically + # when the context is destroyed as well. + try: + zmq.proxy(incoming, outgoing) + except zmq.ZMQError: + pass + + +class DetectorProxy: + """Proxies object detection signals.""" + + def __init__(self) -> None: + self.context = zmq.Context() + self.runner = ZmqProxyRunner(self.context) + self.runner.start() + + def stop(self) -> None: + # destroying the context will tell the proxy to stop + self.context.destroy() + self.runner.join() + + +class ObjectDetectorPublisher: """Publishes signal for object detection to different processes.""" topic_base = "object_detector/" + def __init__(self, topic: str = "") -> None: + self.topic = f"{self.topic_base}{topic}" + self.context = zmq.Context() + self.socket = self.context.socket(zmq.PUB) + self.socket.connect(SOCKET_PUB) -class ObjectDetectorSubscriber(Subscriber): + def publish(self, sub_topic: str = "") -> None: + """Publish message.""" + self.socket.send_string(f"{self.topic}{sub_topic}/") + + def stop(self) -> None: + self.socket.close() + self.context.destroy() + + +class ObjectDetectorSubscriber: """Simplifies receiving a signal for object detection.""" topic_base = "object_detector/" - def __init__(self, topic: str) -> None: - super().__init__(topic) + def __init__(self, topic: str = "") -> None: + self.topic = f"{self.topic_base}{topic}/" + self.context = zmq.Context() + self.socket = self.context.socket(zmq.SUB) + self.socket.setsockopt_string(zmq.SUBSCRIBE, self.topic) + self.socket.connect(SOCKET_SUB) - def check_for_update(self): - return super().check_for_update(timeout=5) + def check_for_update(self, timeout: float = 5) -> str | None: + """Returns message or None if no update.""" + try: + has_update, _, _ = zmq.select([self.socket], [], [], timeout) + + if has_update: + return self.socket.recv_string(flags=zmq.NOBLOCK) + except zmq.ZMQError: + pass + + return None + + def stop(self) -> None: + self.socket.close() + self.context.destroy() diff --git a/frigate/data_processing/types.py b/frigate/data_processing/types.py index 50f1ed561..d18a1175a 100644 --- a/frigate/data_processing/types.py +++ b/frigate/data_processing/types.py @@ -1,7 +1,7 @@ """Embeddings types.""" -import multiprocessing as mp from enum import Enum +from multiprocessing.managers import SyncManager from multiprocessing.sharedctypes import Synchronized import sherpa_onnx @@ -20,25 +20,27 @@ class DataProcessorMetrics: alpr_pps: Synchronized yolov9_lpr_speed: Synchronized yolov9_lpr_pps: Synchronized - classification_speeds: dict[str, Synchronized] = {} - classification_cps: dict[str, Synchronized] = {} + classification_speeds: dict[str, Synchronized] + classification_cps: dict[str, Synchronized] - def __init__(self, custom_classification_models: list[str]): - self.image_embeddings_speed = mp.Value("d", 0.0) - self.image_embeddings_eps = mp.Value("d", 0.0) - self.text_embeddings_speed = mp.Value("d", 0.0) - self.text_embeddings_eps = mp.Value("d", 0.0) - self.face_rec_speed = mp.Value("d", 0.0) - self.face_rec_fps = mp.Value("d", 0.0) - self.alpr_speed = mp.Value("d", 0.0) - self.alpr_pps = mp.Value("d", 0.0) - self.yolov9_lpr_speed = mp.Value("d", 0.0) - self.yolov9_lpr_pps = mp.Value("d", 0.0) + def __init__(self, manager: SyncManager, custom_classification_models: list[str]): + self.image_embeddings_speed = manager.Value("d", 0.0) + self.image_embeddings_eps = manager.Value("d", 0.0) + self.text_embeddings_speed = manager.Value("d", 0.0) + self.text_embeddings_eps = manager.Value("d", 0.0) + self.face_rec_speed = manager.Value("d", 0.0) + self.face_rec_fps = manager.Value("d", 0.0) + self.alpr_speed = manager.Value("d", 0.0) + self.alpr_pps = manager.Value("d", 0.0) + self.yolov9_lpr_speed = manager.Value("d", 0.0) + self.yolov9_lpr_pps = manager.Value("d", 0.0) + self.classification_speeds = manager.dict() + self.classification_cps = manager.dict() if custom_classification_models: for key in custom_classification_models: - self.classification_speeds[key] = mp.Value("d", 0.0) - self.classification_cps[key] = mp.Value("d", 0.0) + self.classification_speeds[key] = manager.Value("d", 0.0) + self.classification_cps[key] = manager.Value("d", 0.0) class DataProcessorModelRunner: diff --git a/frigate/embeddings/__init__.py b/frigate/embeddings/__init__.py index 037cadcf0..054f2c334 100644 --- a/frigate/embeddings/__init__.py +++ b/frigate/embeddings/__init__.py @@ -3,27 +3,23 @@ import base64 import json import logging -import multiprocessing as mp import os -import signal import threading from json.decoder import JSONDecodeError -from types import FrameType -from typing import Any, Optional, Union +from typing import Any, Union import regex from pathvalidate import ValidationError, sanitize_filename -from setproctitle import setproctitle from frigate.comms.embeddings_updater import EmbeddingsRequestEnum, EmbeddingsRequestor from frigate.config import FrigateConfig from frigate.const import CONFIG_DIR, FACE_DIR from frigate.data_processing.types import DataProcessorMetrics from frigate.db.sqlitevecq import SqliteVecQueueDatabase -from frigate.models import Event, Recordings +from frigate.models import Event +from frigate.util import Process as FrigateProcess from frigate.util.builtin import serialize from frigate.util.classification import kickoff_model_training -from frigate.util.services import listen from .maintainer import EmbeddingMaintainer from .util import ZScoreNormalization @@ -31,40 +27,22 @@ from .util import ZScoreNormalization logger = logging.getLogger(__name__) -def manage_embeddings(config: FrigateConfig, metrics: DataProcessorMetrics) -> None: - stop_event = mp.Event() +class EmbeddingProcess(FrigateProcess): + def __init__( + self, config: FrigateConfig, metrics: DataProcessorMetrics | None + ) -> None: + super().__init__(name="frigate.embeddings_manager", daemon=True) + self.config = config + self.metrics = metrics - def receiveSignal(signalNumber: int, frame: Optional[FrameType]) -> None: - stop_event.set() - - signal.signal(signal.SIGTERM, receiveSignal) - signal.signal(signal.SIGINT, receiveSignal) - - threading.current_thread().name = "process:embeddings_manager" - setproctitle("frigate.embeddings_manager") - listen() - - # Configure Frigate DB - db = SqliteVecQueueDatabase( - config.database.path, - pragmas={ - "auto_vacuum": "FULL", # Does not defragment database - "cache_size": -512 * 1000, # 512MB of cache - "synchronous": "NORMAL", # Safe when using WAL https://www.sqlite.org/pragma.html#pragma_synchronous - }, - timeout=max(60, 10 * len([c for c in config.cameras.values() if c.enabled])), - load_vec_extension=True, - ) - models = [Event, Recordings] - db.bind(models) - - maintainer = EmbeddingMaintainer( - db, - config, - metrics, - stop_event, - ) - maintainer.start() + def run(self) -> None: + self.pre_run_setup() + maintainer = EmbeddingMaintainer( + self.config, + self.metrics, + self.stop_event, + ) + maintainer.start() class EmbeddingsContext: diff --git a/frigate/embeddings/maintainer.py b/frigate/embeddings/maintainer.py index 0980a8ae8..c659d04fe 100644 --- a/frigate/embeddings/maintainer.py +++ b/frigate/embeddings/maintainer.py @@ -12,7 +12,6 @@ from typing import Any, Optional import cv2 import numpy as np from peewee import DoesNotExist -from playhouse.sqliteq import SqliteQueueDatabase from frigate.comms.detections_updater import DetectionSubscriber, DetectionTypeEnum from frigate.comms.embeddings_updater import EmbeddingsRequestEnum, EmbeddingsResponder @@ -58,9 +57,10 @@ from frigate.data_processing.real_time.license_plate import ( LicensePlateRealTimeProcessor, ) from frigate.data_processing.types import DataProcessorMetrics, PostProcessDataEnum +from frigate.db.sqlitevecq import SqliteVecQueueDatabase from frigate.events.types import EventTypeEnum, RegenerateDescriptionEnum from frigate.genai import get_genai_client -from frigate.models import Event +from frigate.models import Event, Recordings from frigate.types import TrackedObjectUpdateTypesEnum from frigate.util.builtin import serialize from frigate.util.image import ( @@ -82,9 +82,8 @@ class EmbeddingMaintainer(threading.Thread): def __init__( self, - db: SqliteQueueDatabase, config: FrigateConfig, - metrics: DataProcessorMetrics, + metrics: DataProcessorMetrics | None, stop_event: MpEvent, ) -> None: super().__init__(name="embeddings_maintainer") @@ -97,6 +96,22 @@ class EmbeddingMaintainer(threading.Thread): [CameraConfigUpdateEnum.add, CameraConfigUpdateEnum.remove], ) + # Configure Frigate DB + db = SqliteVecQueueDatabase( + config.database.path, + pragmas={ + "auto_vacuum": "FULL", # Does not defragment database + "cache_size": -512 * 1000, # 512MB of cache + "synchronous": "NORMAL", # Safe when using WAL https://www.sqlite.org/pragma.html#pragma_synchronous + }, + timeout=max( + 60, 10 * len([c for c in config.cameras.values() if c.enabled]) + ), + load_vec_extension=True, + ) + models = [Event, Recordings] + db.bind(models) + if config.semantic_search.enabled: self.embeddings = Embeddings(config, db, metrics) diff --git a/frigate/events/audio.py b/frigate/events/audio.py index 797a767ba..9152428fa 100644 --- a/frigate/events/audio.py +++ b/frigate/events/audio.py @@ -6,12 +6,12 @@ import random import string import threading import time +from multiprocessing.managers import DictProxy from typing import Any, Tuple import numpy as np import frigate.util as util -from frigate.camera import CameraMetrics from frigate.comms.detections_updater import DetectionPublisher, DetectionTypeEnum from frigate.comms.event_metadata_updater import ( EventMetadataPublisher, @@ -83,7 +83,7 @@ class AudioProcessor(util.Process): self, config: FrigateConfig, cameras: list[CameraConfig], - camera_metrics: dict[str, CameraMetrics], + camera_metrics: DictProxy, ): super().__init__(name="frigate.audio_manager", daemon=True) @@ -93,7 +93,7 @@ class AudioProcessor(util.Process): if any( [ - conf.audio_transcription.enabled_in_config + conf.audio_transcription.enabled_in_config == True for conf in config.cameras.values() ] ): @@ -105,6 +105,7 @@ class AudioProcessor(util.Process): self.transcription_model_runner = None def run(self) -> None: + self.pre_run_setup() audio_threads: list[AudioEventMaintainer] = [] threading.current_thread().name = "process:audio_manager" @@ -146,7 +147,7 @@ class AudioEventMaintainer(threading.Thread): self, camera: CameraConfig, config: FrigateConfig, - camera_metrics: dict[str, CameraMetrics], + camera_metrics: DictProxy, audio_transcription_model_runner: AudioTranscriptionModelRunner | None, stop_event: threading.Event, ) -> None: diff --git a/frigate/log.py b/frigate/log.py index 096b52215..f535a278c 100644 --- a/frigate/log.py +++ b/frigate/log.py @@ -1,12 +1,12 @@ # In log.py import atexit import logging -import multiprocessing as mp import os import sys import threading from collections import deque from logging.handlers import QueueHandler, QueueListener +from multiprocessing.managers import SyncManager from queue import Queue from typing import Deque, Optional @@ -35,12 +35,10 @@ LOG_HANDLER.addFilter( log_listener: Optional[QueueListener] = None log_queue: Optional[Queue] = None -manager = None -def setup_logging() -> None: - global log_listener, log_queue, manager - manager = mp.Manager() +def setup_logging(manager: SyncManager) -> None: + global log_listener, log_queue log_queue = manager.Queue() log_listener = QueueListener(log_queue, LOG_HANDLER, respect_handler_level=True) @@ -57,13 +55,10 @@ def setup_logging() -> None: def _stop_logging() -> None: - global log_listener, manager + global log_listener if log_listener is not None: log_listener.stop() log_listener = None - if manager is not None: - manager.shutdown() - manager = None # When a multiprocessing.Process exits, python tries to flush stdout and stderr. However, if the diff --git a/frigate/object_detection/base.py b/frigate/object_detection/base.py index 86febc6a7..d203e8574 100644 --- a/frigate/object_detection/base.py +++ b/frigate/object_detection/base.py @@ -1,16 +1,11 @@ import datetime import logging -import multiprocessing as mp -import os import queue -import signal -import threading from abc import ABC, abstractmethod from multiprocessing import Queue, Value from multiprocessing.synchronize import Event as MpEvent import numpy as np -from setproctitle import setproctitle import frigate.util as util from frigate.comms.object_detector_signaler import ( @@ -25,7 +20,6 @@ from frigate.detectors.detector_config import ( ) from frigate.util.builtin import EventsPerSecond, load_labels from frigate.util.image import SharedMemoryFrameManager, UntrackedSharedMemory -from frigate.util.services import listen from .util import tensor_transform @@ -90,73 +84,75 @@ class LocalObjectDetector(ObjectDetector): return self.detect_api.detect_raw(tensor_input=tensor_input) -def run_detector( - name: str, - detection_queue: Queue, - cameras: list[str], - avg_speed: Value, - start: Value, - detector_config: BaseDetectorConfig, -): - threading.current_thread().name = f"detector:{name}" - logger = logging.getLogger(f"detector.{name}") - logger.info(f"Starting detection process: {os.getpid()}") - setproctitle(f"frigate.detector.{name}") - listen() +class DetectorRunner(util.Process): + def __init__( + self, + name, + detection_queue: Queue, + cameras: list[str], + avg_speed: Value, + start_time: Value, + detector_config: BaseDetectorConfig, + ) -> None: + super().__init__(name=name, daemon=True) + self.detection_queue = detection_queue + self.cameras = cameras + self.avg_speed = avg_speed + self.start_time = start_time + self.detector_config = detector_config + self.outputs: dict = {} - stop_event: MpEvent = mp.Event() - - def receiveSignal(signalNumber, frame): - stop_event.set() - - signal.signal(signal.SIGTERM, receiveSignal) - signal.signal(signal.SIGINT, receiveSignal) - - def create_output_shm(name: str): + def create_output_shm(self, name: str): out_shm = UntrackedSharedMemory(name=f"out-{name}", create=False) out_np = np.ndarray((20, 6), dtype=np.float32, buffer=out_shm.buf) - outputs[name] = {"shm": out_shm, "np": out_np} + self.outputs[name] = {"shm": out_shm, "np": out_np} - frame_manager = SharedMemoryFrameManager() - object_detector = LocalObjectDetector(detector_config=detector_config) - detector_publisher = ObjectDetectorPublisher() + def run(self) -> None: + self.pre_run_setup() - outputs = {} - for name in cameras: - create_output_shm(name) + frame_manager = SharedMemoryFrameManager() + object_detector = LocalObjectDetector(detector_config=self.detector_config) + detector_publisher = ObjectDetectorPublisher() - while not stop_event.is_set(): - try: - connection_id = detection_queue.get(timeout=1) - except queue.Empty: - continue - input_frame = frame_manager.get( - connection_id, - (1, detector_config.model.height, detector_config.model.width, 3), - ) + for name in self.cameras: + self.create_output_shm(name) - if input_frame is None: - logger.warning(f"Failed to get frame {connection_id} from SHM") - continue + while not self.stop_event.is_set(): + try: + connection_id = self.detection_queue.get(timeout=1) + except queue.Empty: + continue + input_frame = frame_manager.get( + connection_id, + ( + 1, + self.detector_config.model.height, + self.detector_config.model.width, + 3, + ), + ) - # detect and send the output - start.value = datetime.datetime.now().timestamp() - detections = object_detector.detect_raw(input_frame) - duration = datetime.datetime.now().timestamp() - start.value - frame_manager.close(connection_id) + if input_frame is None: + logger.warning(f"Failed to get frame {connection_id} from SHM") + continue - if connection_id not in outputs: - create_output_shm(connection_id) + # detect and send the output + self.start_time.value = datetime.datetime.now().timestamp() + detections = object_detector.detect_raw(input_frame) + duration = datetime.datetime.now().timestamp() - self.start_time.value + frame_manager.close(connection_id) - outputs[connection_id]["np"][:] = detections[:] - signal_id = f"{connection_id}/update" - detector_publisher.publish(signal_id, signal_id) - start.value = 0.0 + if connection_id not in self.outputs: + self.create_output_shm(connection_id) - avg_speed.value = (avg_speed.value * 9 + duration) / 10 + self.outputs[connection_id]["np"][:] = detections[:] + detector_publisher.publish(connection_id) + self.start_time.value = 0.0 - detector_publisher.stop() - logger.info("Exited detection process...") + self.avg_speed.value = (self.avg_speed.value * 9 + duration) / 10 + + detector_publisher.stop() + logger.info("Exited detection process...") class ObjectDetectProcess: @@ -193,19 +189,14 @@ class ObjectDetectProcess: self.detection_start.value = 0.0 if (self.detect_process is not None) and self.detect_process.is_alive(): self.stop() - self.detect_process = util.Process( - target=run_detector, - name=f"detector:{self.name}", - args=( - self.name, - self.detection_queue, - self.cameras, - self.avg_inference_speed, - self.detection_start, - self.detector_config, - ), + self.detect_process = DetectorRunner( + f"detector:{self.name}", + self.detection_queue, + self.cameras, + self.avg_inference_speed, + self.detection_start, + self.detector_config, ) - self.detect_process.daemon = True self.detect_process.start() @@ -231,7 +222,7 @@ class RemoteObjectDetector: ) self.out_shm = UntrackedSharedMemory(name=f"out-{self.name}", create=False) self.out_np_shm = np.ndarray((20, 6), dtype=np.float32, buffer=self.out_shm.buf) - self.detector_subscriber = ObjectDetectorSubscriber(f"{name}/update") + self.detector_subscriber = ObjectDetectorSubscriber(name) def detect(self, tensor_input, threshold=0.4): detections = [] diff --git a/frigate/output/output.py b/frigate/output/output.py index d323596fe..8c60e51c7 100644 --- a/frigate/output/output.py +++ b/frigate/output/output.py @@ -2,14 +2,11 @@ import datetime import logging -import multiprocessing as mp import os import shutil -import signal import threading from wsgiref.simple_server import make_server -from setproctitle import setproctitle from ws4py.server.wsgirefserver import ( WebSocketWSGIHandler, WebSocketWSGIRequestHandler, @@ -17,6 +14,7 @@ from ws4py.server.wsgirefserver import ( ) from ws4py.server.wsgiutils import WebSocketWSGIApplication +import frigate.util as util from frigate.comms.detections_updater import DetectionSubscriber, DetectionTypeEnum from frigate.comms.ws import WebSocket from frigate.config import FrigateConfig @@ -73,189 +71,193 @@ def check_disabled_camera_update( birdseye.all_cameras_disabled() -def output_frames( - config: FrigateConfig, -): - threading.current_thread().name = "output" - setproctitle("frigate.output") +class OutputProcess(util.Process): + def __init__(self, config: FrigateConfig) -> None: + super().__init__(name="frigate.output", daemon=True) + self.config = config - stop_event = mp.Event() + def run(self) -> None: + self.pre_run_setup() - def receiveSignal(signalNumber, frame): - stop_event.set() + frame_manager = SharedMemoryFrameManager() - signal.signal(signal.SIGTERM, receiveSignal) - signal.signal(signal.SIGINT, receiveSignal) - - frame_manager = SharedMemoryFrameManager() - - # start a websocket server on 8082 - WebSocketWSGIHandler.http_version = "1.1" - websocket_server = make_server( - "127.0.0.1", - 8082, - server_class=WSGIServer, - handler_class=WebSocketWSGIRequestHandler, - app=WebSocketWSGIApplication(handler_cls=WebSocket), - ) - websocket_server.initialize_websockets_manager() - websocket_thread = threading.Thread(target=websocket_server.serve_forever) - - detection_subscriber = DetectionSubscriber(DetectionTypeEnum.video) - config_subscriber = CameraConfigUpdateSubscriber( - config, - config.cameras, - [ - CameraConfigUpdateEnum.add, - CameraConfigUpdateEnum.birdseye, - CameraConfigUpdateEnum.enabled, - CameraConfigUpdateEnum.record, - ], - ) - - jsmpeg_cameras: dict[str, JsmpegCamera] = {} - birdseye: Birdseye | None = None - preview_recorders: dict[str, PreviewRecorder] = {} - preview_write_times: dict[str, float] = {} - failed_frame_requests: dict[str, int] = {} - last_disabled_cam_check = datetime.datetime.now().timestamp() - - move_preview_frames("cache") - - for camera, cam_config in config.cameras.items(): - if not cam_config.enabled_in_config: - continue - - jsmpeg_cameras[camera] = JsmpegCamera(cam_config, stop_event, websocket_server) - preview_recorders[camera] = PreviewRecorder(cam_config) - preview_write_times[camera] = 0 - - if config.birdseye.enabled: - birdseye = Birdseye(config, stop_event, websocket_server) - - websocket_thread.start() - - while not stop_event.is_set(): - # check if there is an updated config - updates = config_subscriber.check_for_updates() - - if "add" in updates: - for camera in updates["add"]: - jsmpeg_cameras[camera] = JsmpegCamera( - cam_config, stop_event, websocket_server - ) - preview_recorders[camera] = PreviewRecorder(cam_config) - preview_write_times[camera] = 0 - - (topic, data) = detection_subscriber.check_for_update(timeout=1) - now = datetime.datetime.now().timestamp() - - if now - last_disabled_cam_check > 5: - # check disabled cameras every 5 seconds - last_disabled_cam_check = now - check_disabled_camera_update( - config, birdseye, preview_recorders, preview_write_times - ) - - if not topic: - continue - - ( - camera, - frame_name, - frame_time, - current_tracked_objects, - motion_boxes, - _, - ) = data - - if not config.cameras[camera].enabled: - continue - - frame = frame_manager.get(frame_name, config.cameras[camera].frame_shape_yuv) - - if frame is None: - logger.debug(f"Failed to get frame {frame_name} from SHM") - failed_frame_requests[camera] = failed_frame_requests.get(camera, 0) + 1 - - if failed_frame_requests[camera] > config.cameras[camera].detect.fps: - logger.warning( - f"Failed to retrieve many frames for {camera} from SHM, consider increasing SHM size if this continues." - ) - - continue - else: - failed_frame_requests[camera] = 0 - - # send frames for low fps recording - preview_recorders[camera].write_data( - current_tracked_objects, motion_boxes, frame_time, frame + # start a websocket server on 8082 + WebSocketWSGIHandler.http_version = "1.1" + websocket_server = make_server( + "127.0.0.1", + 8082, + server_class=WSGIServer, + handler_class=WebSocketWSGIRequestHandler, + app=WebSocketWSGIApplication(handler_cls=WebSocket), ) - preview_write_times[camera] = frame_time + websocket_server.initialize_websockets_manager() + websocket_thread = threading.Thread(target=websocket_server.serve_forever) - # send camera frame to ffmpeg process if websockets are connected - if any( - ws.environ["PATH_INFO"].endswith(camera) for ws in websocket_server.manager - ): - # write to the converter for the camera if clients are listening to the specific camera - jsmpeg_cameras[camera].write_frame(frame.tobytes()) + detection_subscriber = DetectionSubscriber(DetectionTypeEnum.video) + config_subscriber = CameraConfigUpdateSubscriber( + self.config, + self.config.cameras, + [ + CameraConfigUpdateEnum.add, + CameraConfigUpdateEnum.birdseye, + CameraConfigUpdateEnum.enabled, + CameraConfigUpdateEnum.record, + ], + ) - # send output data to birdseye if websocket is connected or restreaming - if config.birdseye.enabled and ( - config.birdseye.restream - or any( - ws.environ["PATH_INFO"].endswith("birdseye") - for ws in websocket_server.manager + jsmpeg_cameras: dict[str, JsmpegCamera] = {} + birdseye: Birdseye | None = None + preview_recorders: dict[str, PreviewRecorder] = {} + preview_write_times: dict[str, float] = {} + failed_frame_requests: dict[str, int] = {} + last_disabled_cam_check = datetime.datetime.now().timestamp() + + move_preview_frames("cache") + + for camera, cam_config in self.config.cameras.items(): + if not cam_config.enabled_in_config: + continue + + jsmpeg_cameras[camera] = JsmpegCamera( + cam_config, self.stop_event, websocket_server ) - ): - birdseye.write_data( + preview_recorders[camera] = PreviewRecorder(cam_config) + preview_write_times[camera] = 0 + + if self.config.birdseye.enabled: + birdseye = Birdseye(self.config, self.stop_event, websocket_server) + + websocket_thread.start() + + while not self.stop_event.is_set(): + # check if there is an updated config + updates = config_subscriber.check_for_updates() + + if "add" in updates: + for camera in updates["add"]: + jsmpeg_cameras[camera] = JsmpegCamera( + cam_config, self.stop_event, websocket_server + ) + preview_recorders[camera] = PreviewRecorder(cam_config) + preview_write_times[camera] = 0 + + (topic, data) = detection_subscriber.check_for_update(timeout=1) + now = datetime.datetime.now().timestamp() + + if now - last_disabled_cam_check > 5: + # check disabled cameras every 5 seconds + last_disabled_cam_check = now + check_disabled_camera_update( + self.config, birdseye, preview_recorders, preview_write_times + ) + + if not topic: + continue + + ( camera, + frame_name, + frame_time, current_tracked_objects, motion_boxes, - frame_time, - frame, + _, + ) = data + + if not self.config.cameras[camera].enabled: + continue + + frame = frame_manager.get( + frame_name, self.config.cameras[camera].frame_shape_yuv ) - frame_manager.close(frame_name) + if frame is None: + logger.debug(f"Failed to get frame {frame_name} from SHM") + failed_frame_requests[camera] = failed_frame_requests.get(camera, 0) + 1 - move_preview_frames("clips") + if ( + failed_frame_requests[camera] + > self.config.cameras[camera].detect.fps + ): + logger.warning( + f"Failed to retrieve many frames for {camera} from SHM, consider increasing SHM size if this continues." + ) - while True: - (topic, data) = detection_subscriber.check_for_update(timeout=0) + continue + else: + failed_frame_requests[camera] = 0 - if not topic: - break + # send frames for low fps recording + preview_recorders[camera].write_data( + current_tracked_objects, motion_boxes, frame_time, frame + ) + preview_write_times[camera] = frame_time - ( - camera, - frame_name, - frame_time, - current_tracked_objects, - motion_boxes, - regions, - ) = data + # send camera frame to ffmpeg process if websockets are connected + if any( + ws.environ["PATH_INFO"].endswith(camera) + for ws in websocket_server.manager + ): + # write to the converter for the camera if clients are listening to the specific camera + jsmpeg_cameras[camera].write_frame(frame.tobytes()) - frame = frame_manager.get(frame_name, config.cameras[camera].frame_shape_yuv) - frame_manager.close(frame_name) + # send output data to birdseye if websocket is connected or restreaming + if self.config.birdseye.enabled and ( + self.config.birdseye.restream + or any( + ws.environ["PATH_INFO"].endswith("birdseye") + for ws in websocket_server.manager + ) + ): + birdseye.write_data( + camera, + current_tracked_objects, + motion_boxes, + frame_time, + frame, + ) - detection_subscriber.stop() + frame_manager.close(frame_name) - for jsmpeg in jsmpeg_cameras.values(): - jsmpeg.stop() + move_preview_frames("clips") - for preview in preview_recorders.values(): - preview.stop() + while True: + (topic, data) = detection_subscriber.check_for_update(timeout=0) - if birdseye is not None: - birdseye.stop() + if not topic: + break - config_subscriber.stop() - websocket_server.manager.close_all() - websocket_server.manager.stop() - websocket_server.manager.join() - websocket_server.shutdown() - websocket_thread.join() - logger.info("exiting output process...") + ( + camera, + frame_name, + frame_time, + current_tracked_objects, + motion_boxes, + regions, + ) = data + + frame = frame_manager.get( + frame_name, self.config.cameras[camera].frame_shape_yuv + ) + frame_manager.close(frame_name) + + detection_subscriber.stop() + + for jsmpeg in jsmpeg_cameras.values(): + jsmpeg.stop() + + for preview in preview_recorders.values(): + preview.stop() + + if birdseye is not None: + birdseye.stop() + + config_subscriber.stop() + websocket_server.manager.close_all() + websocket_server.manager.stop() + websocket_server.manager.join() + websocket_server.shutdown() + websocket_thread.join() + logger.info("exiting output process...") def move_preview_frames(loc: str): diff --git a/frigate/record/record.py b/frigate/record/record.py index 252b80545..40a943a43 100644 --- a/frigate/record/record.py +++ b/frigate/record/record.py @@ -1,50 +1,40 @@ """Run recording maintainer and cleanup.""" import logging -import multiprocessing as mp -import signal -import threading -from types import FrameType -from typing import Optional from playhouse.sqliteq import SqliteQueueDatabase -from setproctitle import setproctitle from frigate.config import FrigateConfig from frigate.models import Recordings, ReviewSegment from frigate.record.maintainer import RecordingMaintainer -from frigate.util.services import listen +from frigate.util import Process as FrigateProcess logger = logging.getLogger(__name__) -def manage_recordings(config: FrigateConfig) -> None: - stop_event = mp.Event() +class RecordProcess(FrigateProcess): + def __init__(self, config: FrigateConfig) -> None: + super().__init__(name="frigate.recording_manager", daemon=True) + self.config = config - def receiveSignal(signalNumber: int, frame: Optional[FrameType]) -> None: - stop_event.set() + def run(self) -> None: + self.pre_run_setup() + db = SqliteQueueDatabase( + self.config.database.path, + pragmas={ + "auto_vacuum": "FULL", # Does not defragment database + "cache_size": -512 * 1000, # 512MB of cache + "synchronous": "NORMAL", # Safe when using WAL https://www.sqlite.org/pragma.html#pragma_synchronous + }, + timeout=max( + 60, 10 * len([c for c in self.config.cameras.values() if c.enabled]) + ), + ) + models = [ReviewSegment, Recordings] + db.bind(models) - signal.signal(signal.SIGTERM, receiveSignal) - signal.signal(signal.SIGINT, receiveSignal) - - threading.current_thread().name = "process:recording_manager" - setproctitle("frigate.recording_manager") - listen() - - db = SqliteQueueDatabase( - config.database.path, - pragmas={ - "auto_vacuum": "FULL", # Does not defragment database - "cache_size": -512 * 1000, # 512MB of cache - "synchronous": "NORMAL", # Safe when using WAL https://www.sqlite.org/pragma.html#pragma_synchronous - }, - timeout=max(60, 10 * len([c for c in config.cameras.values() if c.enabled])), - ) - models = [ReviewSegment, Recordings] - db.bind(models) - - maintainer = RecordingMaintainer( - config, - stop_event, - ) - maintainer.start() + maintainer = RecordingMaintainer( + self.config, + self.stop_event, + ) + maintainer.start() diff --git a/frigate/review/review.py b/frigate/review/review.py index dafa6c802..00910e439 100644 --- a/frigate/review/review.py +++ b/frigate/review/review.py @@ -1,36 +1,23 @@ """Run recording maintainer and cleanup.""" import logging -import multiprocessing as mp -import signal -import threading -from types import FrameType -from typing import Optional - -from setproctitle import setproctitle +import frigate.util as util from frigate.config import FrigateConfig from frigate.review.maintainer import ReviewSegmentMaintainer -from frigate.util.services import listen logger = logging.getLogger(__name__) -def manage_review_segments(config: FrigateConfig) -> None: - stop_event = mp.Event() +class ReviewProcess(util.Process): + def __init__(self, config: FrigateConfig) -> None: + super().__init__(name="frigate.review_segment_manager", daemon=True) + self.config = config - def receiveSignal(signalNumber: int, frame: Optional[FrameType]) -> None: - stop_event.set() - - signal.signal(signal.SIGTERM, receiveSignal) - signal.signal(signal.SIGINT, receiveSignal) - - threading.current_thread().name = "process:review_segment_manager" - setproctitle("frigate.review_segment_manager") - listen() - - maintainer = ReviewSegmentMaintainer( - config, - stop_event, - ) - maintainer.start() + def run(self) -> None: + self.pre_run_setup() + maintainer = ReviewSegmentMaintainer( + self.config, + self.stop_event, + ) + maintainer.start() diff --git a/frigate/stats/util.py b/frigate/stats/util.py index f5807e1e6..3c41ca3b1 100644 --- a/frigate/stats/util.py +++ b/frigate/stats/util.py @@ -5,13 +5,13 @@ import os import shutil import time from json import JSONDecodeError +from multiprocessing.managers import DictProxy from typing import Any, Optional import psutil import requests from requests.exceptions import RequestException -from frigate.camera import CameraMetrics from frigate.config import FrigateConfig from frigate.const import CACHE_DIR, CLIPS_DIR, RECORD_DIR from frigate.data_processing.types import DataProcessorMetrics @@ -53,7 +53,7 @@ def get_latest_version(config: FrigateConfig) -> str: def stats_init( config: FrigateConfig, - camera_metrics: dict[str, CameraMetrics], + camera_metrics: DictProxy, embeddings_metrics: DataProcessorMetrics | None, detectors: dict[str, ObjectDetectProcess], processes: dict[str, int], @@ -273,10 +273,12 @@ def stats_snapshot( stats["cameras"] = {} for name, camera_stats in camera_metrics.items(): total_detection_fps += camera_stats.detection_fps.value - pid = camera_stats.process.pid if camera_stats.process else None + pid = camera_stats.process_pid.value if camera_stats.process_pid.value else None ffmpeg_pid = camera_stats.ffmpeg_pid.value if camera_stats.ffmpeg_pid else None capture_pid = ( - camera_stats.capture_process.pid if camera_stats.capture_process else None + camera_stats.capture_process_pid.value + if camera_stats.capture_process_pid.value + else None ) stats["cameras"][name] = { "camera_fps": round(camera_stats.camera_fps.value, 2), diff --git a/frigate/util/builtin.py b/frigate/util/builtin.py index 0433af18e..90c0f9227 100644 --- a/frigate/util/builtin.py +++ b/frigate/util/builtin.py @@ -341,11 +341,14 @@ def clear_and_unlink(file: Path, missing_ok: bool = True) -> None: def empty_and_close_queue(q: mp.Queue): while True: try: - q.get(block=True, timeout=0.5) - except queue.Empty: - q.close() - q.join_thread() - return + try: + q.get(block=True, timeout=0.5) + except (queue.Empty, EOFError): + q.close() + q.join_thread() + return + except AttributeError: + pass def generate_color_palette(n): diff --git a/frigate/util/process.py b/frigate/util/process.py index ac15539fe..3501e585e 100644 --- a/frigate/util/process.py +++ b/frigate/util/process.py @@ -4,9 +4,8 @@ import multiprocessing as mp import signal import sys import threading -from functools import wraps from logging.handlers import QueueHandler -from typing import Any, Callable, Optional +from typing import Callable, Optional import frigate.log @@ -30,34 +29,12 @@ class BaseProcess(mp.Process): super().start(*args, **kwargs) self.after_start() - def __getattribute__(self, name: str) -> Any: - if name == "run": - run = super().__getattribute__("run") - - @wraps(run) - def run_wrapper(*args, **kwargs): - try: - self.before_run() - return run(*args, **kwargs) - finally: - self.after_run() - - return run_wrapper - - return super().__getattribute__(name) - def before_start(self) -> None: pass def after_start(self) -> None: pass - def before_run(self) -> None: - pass - - def after_run(self) -> None: - pass - class Process(BaseProcess): logger: logging.Logger @@ -73,7 +50,7 @@ class Process(BaseProcess): def before_start(self) -> None: self.__log_queue = frigate.log.log_listener.queue - def before_run(self) -> None: + def pre_run_setup(self) -> None: faulthandler.enable() def receiveSignal(signalNumber, frame): @@ -88,8 +65,6 @@ class Process(BaseProcess): signal.signal(signal.SIGTERM, receiveSignal) signal.signal(signal.SIGINT, receiveSignal) - self.logger = logging.getLogger(self.name) - logging.basicConfig(handlers=[], force=True) logging.getLogger().addHandler(QueueHandler(self.__log_queue)) diff --git a/frigate/video.py b/frigate/video.py index 9710dbd81..2869c2bc2 100755 --- a/frigate/video.py +++ b/frigate/video.py @@ -1,9 +1,7 @@ import datetime import logging -import multiprocessing as mp import os import queue -import signal import subprocess as sp import threading import time @@ -12,8 +10,8 @@ from multiprocessing.synchronize import Event as MpEvent from typing import Any import cv2 -from setproctitle import setproctitle +import frigate.util as util from frigate.camera import CameraMetrics, PTZMetrics from frigate.comms.inter_process import InterProcessRequestor from frigate.config import CameraConfig, DetectConfig, ModelConfig @@ -53,7 +51,6 @@ from frigate.util.object import ( is_object_filtered, reduce_detections, ) -from frigate.util.services import listen logger = logging.getLogger(__name__) @@ -328,7 +325,7 @@ class CameraWatchdog(threading.Thread): ffmpeg_cmd, self.logger, self.logpipe, self.frame_size ) self.ffmpeg_pid.value = self.ffmpeg_detect_process.pid - self.capture_thread = CameraCapture( + self.capture_thread = CameraCaptureRunner( self.config, self.shm_frame_count, self.frame_index, @@ -406,7 +403,7 @@ class CameraWatchdog(threading.Thread): return newest_segment_time -class CameraCapture(threading.Thread): +class CameraCaptureRunner(threading.Thread): def __init__( self, config: CameraConfig, @@ -450,103 +447,103 @@ class CameraCapture(threading.Thread): ) -def capture_camera( - config: CameraConfig, shm_frame_count: int, camera_metrics: CameraMetrics -): - stop_event = mp.Event() +class CameraCapture(util.Process): + def __init__( + self, config: CameraConfig, shm_frame_count: int, camera_metrics: CameraMetrics + ) -> None: + super().__init__(name=f"camera_capture:{config.name}", daemon=True) + self.config = config + self.shm_frame_count = shm_frame_count + self.camera_metrics = camera_metrics - def receiveSignal(signalNumber, frame): - stop_event.set() - - signal.signal(signal.SIGTERM, receiveSignal) - signal.signal(signal.SIGINT, receiveSignal) - - threading.current_thread().name = f"capture:{config.name}" - setproctitle(f"frigate.capture:{config.name}") - - camera_watchdog = CameraWatchdog( - config, - shm_frame_count, - camera_metrics.frame_queue, - camera_metrics.camera_fps, - camera_metrics.skipped_fps, - camera_metrics.ffmpeg_pid, - stop_event, - ) - camera_watchdog.start() - camera_watchdog.join() + def run(self) -> None: + self.pre_run_setup() + camera_watchdog = CameraWatchdog( + self.config, + self.shm_frame_count, + self.camera_metrics.frame_queue, + self.camera_metrics.camera_fps, + self.camera_metrics.skipped_fps, + self.camera_metrics.ffmpeg_pid, + self.stop_event, + ) + camera_watchdog.start() + camera_watchdog.join() -def track_camera( - name, - config: CameraConfig, - model_config: ModelConfig, - labelmap: dict[int, str], - detection_queue: Queue, - detected_objects_queue, - camera_metrics: CameraMetrics, - ptz_metrics: PTZMetrics, - region_grid: list[list[dict[str, Any]]], -): - stop_event = mp.Event() - - def receiveSignal(signalNumber, frame): - stop_event.set() - - signal.signal(signal.SIGTERM, receiveSignal) - signal.signal(signal.SIGINT, receiveSignal) - - threading.current_thread().name = f"process:{name}" - setproctitle(f"frigate.process:{name}") - listen() - - frame_queue = camera_metrics.frame_queue - - frame_shape = config.frame_shape - - motion_detector = ImprovedMotionDetector( - frame_shape, - config.motion, - config.detect.fps, - name=config.name, - ptz_metrics=ptz_metrics, - ) - object_detector = RemoteObjectDetector( - name, labelmap, detection_queue, model_config, stop_event - ) - - object_tracker = NorfairTracker(config, ptz_metrics) - - frame_manager = SharedMemoryFrameManager() - - # create communication for region grid updates - requestor = InterProcessRequestor() - - process_frames( - name, - requestor, - frame_queue, - frame_shape, - model_config, - config, - frame_manager, - motion_detector, - object_detector, - object_tracker, +class CameraTracker(util.Process): + def __init__( + self, + config: CameraConfig, + model_config: ModelConfig, + labelmap: dict[int, str], + detection_queue: Queue, detected_objects_queue, - camera_metrics, - stop_event, - ptz_metrics, - region_grid, - ) + camera_metrics: CameraMetrics, + ptz_metrics: PTZMetrics, + region_grid: list[list[dict[str, Any]]], + ) -> None: + super().__init__(name=f"camera_processor:{config.name}", daemon=True) + self.config = config + self.model_config = model_config + self.labelmap = labelmap + self.detection_queue = detection_queue + self.detected_objects_queue = detected_objects_queue + self.camera_metrics = camera_metrics + self.ptz_metrics = ptz_metrics + self.region_grid = region_grid - # empty the frame queue - logger.info(f"{name}: emptying frame queue") - while not frame_queue.empty(): - (frame_name, _) = frame_queue.get(False) - frame_manager.delete(frame_name) + def run(self) -> None: + self.pre_run_setup() + frame_queue = self.camera_metrics.frame_queue + frame_shape = self.config.frame_shape - logger.info(f"{name}: exiting subprocess") + motion_detector = ImprovedMotionDetector( + frame_shape, + self.config.motion, + self.config.detect.fps, + name=self.config.name, + ptz_metrics=self.ptz_metrics, + ) + object_detector = RemoteObjectDetector( + self.config.name, + self.labelmap, + self.detection_queue, + self.model_config, + self.stop_event, + ) + + object_tracker = NorfairTracker(self.config, self.ptz_metrics) + + frame_manager = SharedMemoryFrameManager() + + # create communication for region grid updates + requestor = InterProcessRequestor() + + process_frames( + requestor, + frame_queue, + frame_shape, + self.model_config, + self.config, + frame_manager, + motion_detector, + object_detector, + object_tracker, + self.detected_objects_queue, + self.camera_metrics, + self.stop_event, + self.ptz_metrics, + self.region_grid, + ) + + # empty the frame queue + logger.info(f"{self.config.name}: emptying frame queue") + while not frame_queue.empty(): + (frame_name, _) = frame_queue.get(False) + frame_manager.delete(frame_name) + + logger.info(f"{self.config.name}: exiting subprocess") def detect( @@ -587,7 +584,6 @@ def detect( def process_frames( - camera_name: str, requestor: InterProcessRequestor, frame_queue: Queue, frame_shape: tuple[int, int], @@ -607,7 +603,7 @@ def process_frames( next_region_update = get_tomorrow_at_time(2) config_subscriber = CameraConfigUpdateSubscriber( None, - {camera_name: camera_config}, + {camera_config.name: camera_config}, [ CameraConfigUpdateEnum.detect, CameraConfigUpdateEnum.enabled, @@ -663,7 +659,9 @@ def process_frames( and prev_enabled != camera_enabled and camera_metrics.frame_queue.empty() ): - logger.debug(f"Camera {camera_name} disabled, clearing tracked objects") + logger.debug( + f"Camera {camera_config.name} disabled, clearing tracked objects" + ) prev_enabled = camera_enabled # Clear norfair's dictionaries @@ -688,7 +686,7 @@ def process_frames( datetime.datetime.now().astimezone(datetime.timezone.utc) > next_region_update ): - region_grid = requestor.send_data(REQUEST_REGION_GRID, camera_name) + region_grid = requestor.send_data(REQUEST_REGION_GRID, camera_config.name) next_region_update = get_tomorrow_at_time(2) try: @@ -708,7 +706,9 @@ def process_frames( frame = frame_manager.get(frame_name, (frame_shape[0] * 3 // 2, frame_shape[1])) if frame is None: - logger.debug(f"{camera_name}: frame {frame_time} is not in memory store.") + logger.debug( + f"{camera_config.name}: frame {frame_time} is not in memory store." + ) continue # look for motion if enabled @@ -947,7 +947,7 @@ def process_frames( ) cv2.imwrite( - f"debug/frames/{camera_name}-{'{:.6f}'.format(frame_time)}.jpg", + f"debug/frames/{camera_config.name}-{'{:.6f}'.format(frame_time)}.jpg", bgr_frame, ) # add to the queue if not full @@ -959,7 +959,7 @@ def process_frames( camera_metrics.process_fps.value = fps_tracker.eps() detected_objects_queue.put( ( - camera_name, + camera_config.name, frame_name, frame_time, detections, diff --git a/web/src/views/system/CameraMetrics.tsx b/web/src/views/system/CameraMetrics.tsx index ba2701926..3f5891265 100644 --- a/web/src/views/system/CameraMetrics.tsx +++ b/web/src/views/system/CameraMetrics.tsx @@ -173,7 +173,7 @@ export default function CameraMetrics({ }); series[key]["detect"].data.push({ x: statsIdx, - y: stats.cpu_usages[camStats.pid.toString()].cpu, + y: stats.cpu_usages[camStats.pid?.toString()]?.cpu, }); }); }); From a6b80c0f9ca9f8a14b749b9157aac9b13b138e9f Mon Sep 17 00:00:00 2001 From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> Date: Thu, 12 Jun 2025 14:34:45 -0500 Subject: [PATCH 026/530] Add basic camera settings to UI for testing (#18690) * add basic camera add/edit pane to the UI for testing * only init model runner if transcription is enabled globally * fix role checkboxes --- frigate/events/audio.py | 7 +- web/public/locales/en/views/settings.json | 29 + .../components/settings/CameraEditForm.tsx | 439 ++++++++++ web/src/views/settings/CameraSettingsView.tsx | 782 ++++++++++-------- 4 files changed, 907 insertions(+), 350 deletions(-) create mode 100644 web/src/components/settings/CameraEditForm.tsx diff --git a/frigate/events/audio.py b/frigate/events/audio.py index 9152428fa..791ba80e4 100644 --- a/frigate/events/audio.py +++ b/frigate/events/audio.py @@ -91,12 +91,7 @@ class AudioProcessor(util.Process): self.cameras = cameras self.config = config - if any( - [ - conf.audio_transcription.enabled_in_config == True - for conf in config.cameras.values() - ] - ): + if self.config.audio_transcription.enabled: self.transcription_model_runner = AudioTranscriptionModelRunner( self.config.audio_transcription.device, self.config.audio_transcription.model_size, diff --git a/web/public/locales/en/views/settings.json b/web/public/locales/en/views/settings.json index 2b92e81cd..14dc809bc 100644 --- a/web/public/locales/en/views/settings.json +++ b/web/public/locales/en/views/settings.json @@ -176,6 +176,35 @@ "toast": { "success": "Review Classification configuration has been saved. Restart Frigate to apply changes." } + }, + "addCamera": "Add New Camera", + "editCamera": "Edit Camera:", + "selectCamera": "Select a Camera", + "backToSettings": "Back to Camera Settings", + "cameraConfig": { + "add": "Add Camera", + "edit": "Edit Camera", + "description": "Configure camera settings including stream inputs and roles.", + "name": "Camera Name", + "nameRequired": "Camera name is required", + "nameInvalid": "Camera name must contain only letters, numbers, underscores, or hyphens", + "namePlaceholder": "e.g., front_door", + "enabled": "Enabled", + "ffmpeg": { + "inputs": "Input Streams", + "path": "Stream Path", + "pathRequired": "Stream path is required", + "pathPlaceholder": "rtsp://...", + "roles": "Roles", + "rolesRequired": "At least one role is required", + "rolesUnique": "Each role (audio, detect, record) can only be assigned to one stream", + "addInput": "Add Input Stream", + "removeInput": "Remove Input Stream", + "inputsRequired": "At least one input stream is required" + }, + "toast": { + "success": "Camera {{cameraName}} saved successfully" + } } }, "masksAndZones": { diff --git a/web/src/components/settings/CameraEditForm.tsx b/web/src/components/settings/CameraEditForm.tsx new file mode 100644 index 000000000..eb731b2b3 --- /dev/null +++ b/web/src/components/settings/CameraEditForm.tsx @@ -0,0 +1,439 @@ +import { Button } from "@/components/ui/button"; +import { + Form, + FormControl, + FormField, + FormItem, + FormLabel, + FormMessage, +} from "@/components/ui/form"; +import { Input } from "@/components/ui/input"; +import { Switch } from "@/components/ui/switch"; +import Heading from "@/components/ui/heading"; +import { Separator } from "@/components/ui/separator"; +import { zodResolver } from "@hookform/resolvers/zod"; +import { useForm, useFieldArray } from "react-hook-form"; +import { z } from "zod"; +import axios from "axios"; +import { toast, Toaster } from "sonner"; +import { useTranslation } from "react-i18next"; +import { useState, useMemo } from "react"; +import { LuTrash2, LuPlus } from "react-icons/lu"; +import ActivityIndicator from "@/components/indicators/activity-indicator"; +import { FrigateConfig } from "@/types/frigateConfig"; +import useSWR from "swr"; + +type ConfigSetBody = { + requires_restart: number; + // TODO: type this better + // eslint-disable-next-line @typescript-eslint/no-explicit-any + config_data: any; + update_topic?: string; +}; + +const RoleEnum = z.enum(["audio", "detect", "record"]); +type Role = z.infer; + +type CameraEditFormProps = { + cameraName?: string; + onSave?: () => void; + onCancel?: () => void; +}; + +export default function CameraEditForm({ + cameraName, + onSave, + onCancel, +}: CameraEditFormProps) { + const { t } = useTranslation(["views/settings"]); + const { data: config } = useSWR("config"); + const [isLoading, setIsLoading] = useState(false); + + const formSchema = useMemo( + () => + z.object({ + cameraName: z + .string() + .min(1, { message: t("camera.cameraConfig.nameRequired") }) + .regex(/^[a-zA-Z0-9_-]+$/, { + message: t("camera.cameraConfig.nameInvalid"), + }), + enabled: z.boolean(), + ffmpeg: z.object({ + inputs: z + .array( + z.object({ + path: z.string().min(1, { + message: t("camera.cameraConfig.ffmpeg.pathRequired"), + }), + roles: z.array(RoleEnum).min(1, { + message: t("camera.cameraConfig.ffmpeg.rolesRequired"), + }), + }), + ) + .min(1, { + message: t("camera.cameraConfig.ffmpeg.inputsRequired"), + }) + .refine( + (inputs) => { + const roleOccurrences = new Map(); + inputs.forEach((input) => { + input.roles.forEach((role) => { + roleOccurrences.set( + role, + (roleOccurrences.get(role) || 0) + 1, + ); + }); + }); + return Array.from(roleOccurrences.values()).every( + (count) => count <= 1, + ); + }, + { + message: t("camera.cameraConfig.ffmpeg.rolesUnique"), + path: ["inputs"], + }, + ), + }), + }), + [t], + ); + + type FormValues = z.infer; + + // Determine available roles for default values + const usedRoles = useMemo(() => { + const roles = new Set(); + if (cameraName && config?.cameras[cameraName]) { + const camera = config.cameras[cameraName]; + camera.ffmpeg?.inputs?.forEach((input) => { + input.roles.forEach((role) => roles.add(role as Role)); + }); + } + return roles; + }, [cameraName, config]); + + const defaultValues: FormValues = { + cameraName: cameraName || "", + enabled: true, + ffmpeg: { + inputs: [ + { + path: "", + roles: usedRoles.has("detect") ? [] : ["detect"], + }, + ], + }, + }; + + // Load existing camera config if editing + if (cameraName && config?.cameras[cameraName]) { + const camera = config.cameras[cameraName]; + defaultValues.enabled = camera.enabled ?? true; + defaultValues.ffmpeg.inputs = camera.ffmpeg?.inputs?.length + ? camera.ffmpeg.inputs.map((input) => ({ + path: input.path, + roles: input.roles as Role[], + })) + : defaultValues.ffmpeg.inputs; + } + + const form = useForm({ + resolver: zodResolver(formSchema), + defaultValues, + mode: "onChange", + }); + + const { fields, append, remove } = useFieldArray({ + control: form.control, + name: "ffmpeg.inputs", + }); + + // Watch ffmpeg.inputs to track used roles + const watchedInputs = form.watch("ffmpeg.inputs"); + + const saveCameraConfig = (values: FormValues) => { + setIsLoading(true); + const configData: ConfigSetBody["config_data"] = { + cameras: { + [values.cameraName]: { + enabled: values.enabled, + ffmpeg: { + inputs: values.ffmpeg.inputs.map((input) => ({ + path: input.path, + roles: input.roles, + })), + }, + }, + }, + }; + + const requestBody: ConfigSetBody = { + requires_restart: 1, + config_data: configData, + }; + + // Add update_topic for new cameras + if (!cameraName) { + requestBody.update_topic = `config/cameras/${values.cameraName}/add`; + } + + axios + .put("config/set", requestBody) + .then((res) => { + if (res.status === 200) { + toast.success( + t("camera.cameraConfig.toast.success", { + cameraName: values.cameraName, + }), + { position: "top-center" }, + ); + if (onSave) onSave(); + } else { + throw new Error(res.statusText); + } + }) + .catch((error) => { + const errorMessage = + error.response?.data?.message || + error.response?.data?.detail || + "Unknown error"; + toast.error( + t("toast.save.error.title", { errorMessage, ns: "common" }), + { position: "top-center" }, + ); + }) + .finally(() => { + setIsLoading(false); + }); + }; + + const onSubmit = (values: FormValues) => { + if (cameraName && values.cameraName !== cameraName) { + // If camera name changed, delete old camera config + const deleteRequestBody: ConfigSetBody = { + requires_restart: 1, + config_data: { + cameras: { + [cameraName]: "", + }, + }, + update_topic: `config/cameras/${cameraName}/remove`, + }; + + axios + .put("config/set", deleteRequestBody) + .then(() => saveCameraConfig(values)) + .catch((error) => { + const errorMessage = + error.response?.data?.message || + error.response?.data?.detail || + "Unknown error"; + toast.error( + t("toast.save.error.title", { errorMessage, ns: "common" }), + { position: "top-center" }, + ); + }) + .finally(() => { + setIsLoading(false); + }); + } else { + saveCameraConfig(values); + } + }; + + // Determine available roles for new streams + const getAvailableRoles = (): Role[] => { + const used = new Set(); + watchedInputs.forEach((input) => { + input.roles.forEach((role) => used.add(role)); + }); + return used.has("detect") ? [] : ["detect"]; + }; + + const getUsedRolesExcludingIndex = (excludeIndex: number) => { + const roles = new Set(); + watchedInputs.forEach((input, idx) => { + if (idx !== excludeIndex) { + input.roles.forEach((role) => roles.add(role)); + } + }); + return roles; + }; + + return ( + <> + + + {cameraName + ? t("camera.cameraConfig.edit") + : t("camera.cameraConfig.add")} + +
+ {t("camera.cameraConfig.description")} +
+ + +
+ + ( + + {t("camera.cameraConfig.name")} + + + + + + )} + /> + + ( + + + + + {t("camera.cameraConfig.enabled")} + + + )} + /> + +
+ {t("camera.cameraConfig.ffmpeg.inputs")} + {fields.map((field, index) => ( +
+ ( + + + {t("camera.cameraConfig.ffmpeg.path")} + + + + + + + )} + /> + + ( + + + {t("camera.cameraConfig.ffmpeg.roles")} + + +
+ {(["audio", "detect", "record"] as const).map( + (role) => ( + + ), + )} +
+
+ +
+ )} + /> + + +
+ ))} + + {form.formState.errors.ffmpeg?.inputs?.root && + form.formState.errors.ffmpeg.inputs.root.message} + + +
+ +
+ + +
+ + + + ); +} diff --git a/web/src/views/settings/CameraSettingsView.tsx b/web/src/views/settings/CameraSettingsView.tsx index 994936b8f..6d5527c82 100644 --- a/web/src/views/settings/CameraSettingsView.tsx +++ b/web/src/views/settings/CameraSettingsView.tsx @@ -1,7 +1,6 @@ import Heading from "@/components/ui/heading"; import { useCallback, useContext, useEffect, useMemo, useState } from "react"; -import { Toaster } from "sonner"; -import { toast } from "sonner"; +import { Toaster, toast } from "sonner"; import { Form, FormControl, @@ -14,8 +13,8 @@ import { import { zodResolver } from "@hookform/resolvers/zod"; import { useForm } from "react-hook-form"; import { z } from "zod"; -import { Separator } from "../../components/ui/separator"; -import { Button } from "../../components/ui/button"; +import { Separator } from "@/components/ui/separator"; +import { Button } from "@/components/ui/button"; import useSWR from "swr"; import { FrigateConfig } from "@/types/frigateConfig"; import { Checkbox } from "@/components/ui/checkbox"; @@ -33,6 +32,17 @@ import { Label } from "@/components/ui/label"; import { useAlertsState, useDetectionsState, useEnabledState } from "@/api/ws"; import { useDocDomain } from "@/hooks/use-doc-domain"; import { getTranslatedLabel } from "@/utils/i18n"; +import CameraEditForm from "@/components/settings/CameraEditForm"; +import { LuPlus } from "react-icons/lu"; +import { + Select, + SelectContent, + SelectItem, + SelectTrigger, + SelectValue, +} from "@/components/ui/select"; +import { IoMdArrowRoundBack } from "react-icons/io"; +import { isDesktop } from "react-device-detect"; type CameraSettingsViewProps = { selectedCamera: string; @@ -63,9 +73,23 @@ export default function CameraSettingsView({ const [changedValue, setChangedValue] = useState(false); const [isLoading, setIsLoading] = useState(false); const [selectDetections, setSelectDetections] = useState(false); + const [viewMode, setViewMode] = useState<"settings" | "add" | "edit">( + "settings", + ); // Control view state + const [editCameraName, setEditCameraName] = useState( + undefined, + ); // Track camera being edited const { addMessage, removeMessage } = useContext(StatusBarMessagesContext)!; + // List of cameras for dropdown + const cameras = useMemo(() => { + if (config) { + return Object.keys(config.cameras).sort(); + } + return []; + }, [config]); + // zones and labels const zones = useMemo(() => { @@ -259,7 +283,14 @@ export default function CameraSettingsView({ document.title = t("documentTitle.camera"); }, [t]); - if (!cameraConfig && !selectedCamera) { + // Handle back navigation from add/edit form + const handleBack = useCallback(() => { + setViewMode("settings"); + setEditCameraName(undefined); + updateConfig(); + }, [updateConfig]); + + if (!cameraConfig && !selectedCamera && viewMode === "settings") { return ; } @@ -268,254 +299,184 @@ export default function CameraSettingsView({
- - camera.title - - - - - - camera.streams.title - - -
- { - sendEnabled(isChecked ? "ON" : "OFF"); - }} - /> -
- -
-
-
- camera.streams.desc -
- - - - camera.review.title - - -
-
- { - sendAlerts(isChecked ? "ON" : "OFF"); - }} - /> -
- + {viewMode === "settings" ? ( + <> + + {t("camera.title")} + +
+ + {cameras.length > 0 && ( +
+ + +
+ )}
-
-
+ + + + camera.streams.title + +
{ - sendDetections(isChecked ? "ON" : "OFF"); + sendEnabled(isChecked ? "ON" : "OFF"); }} />
-
- camera.review.desc + camera.streams.desc
-
-
+ - + + camera.review.title + - - camera.reviewClassification.title - +
+
+ { + sendAlerts(isChecked ? "ON" : "OFF"); + }} + /> +
+ +
+
+
+
+ { + sendDetections(isChecked ? "ON" : "OFF"); + }} + /> +
+ +
+
+
+ camera.review.desc +
+
+
-
-
-

+ + + - camera.reviewClassification.desc + camera.reviewClassification.title -

-
- - - camera.reviewClassification.readTheDocumentation - {" "} - - + + +
+
+

+ + camera.reviewClassification.desc + +

+
+ + + camera.reviewClassification.readTheDocumentation + {" "} + + +
+
-
-
-
- -
0 && - "grid items-start gap-5 md:grid-cols-2", - )} - > - ( - - {zones && zones?.length > 0 ? ( - <> -
- - - camera.review.alerts - - - - - - camera.reviewClassification.selectAlertsZones - - -
-
- {zones?.map((zone) => ( - { - return ( - - - { - setChangedValue(true); - return checked - ? field.onChange([ - ...field.value, - zone.name, - ]) - : field.onChange( - field.value?.filter( - (value) => - value !== zone.name, - ), - ); - }} - /> - - - {zone.name.replaceAll("_", " ")} - - - ); - }} - /> - ))} -
- - ) : ( -
- - camera.reviewClassification.noDefinedZones - -
- )} - -
- {watchedAlertsZones && watchedAlertsZones.length > 0 - ? t( - "camera.reviewClassification.zoneObjectAlertsTips", - { - alertsLabels, - zone: watchedAlertsZones - .map((zone) => - capitalizeFirstLetter(zone).replaceAll( - "_", - " ", - ), - ) - .join(", "), - cameraName: capitalizeFirstLetter( - cameraConfig?.name ?? "", - ).replaceAll("_", " "), - }, - ) - : t("camera.reviewClassification.objectAlertsTips", { - alertsLabels, - cameraName: capitalizeFirstLetter( - cameraConfig?.name ?? "", - ).replaceAll("_", " "), - })} -
-
- )} - /> - - ( - - {zones && zones?.length > 0 && ( - <> -
- - - camera.review.detections - - - - {selectDetections && ( - - - camera.reviewClassification.selectDetectionsZones - - - )} -
- - {selectDetections && ( -
- {zones?.map((zone) => ( - { - return ( + + +
0 && + "grid items-start gap-5 md:grid-cols-2", + )} + > + ( + + {zones && zones?.length > 0 ? ( + <> +
+ + + camera.review.alerts + + + + + + camera.reviewClassification.selectAlertsZones + + +
+
+ {zones?.map((zone) => ( + ( { + setChangedValue(true); return checked ? field.onChange([ ...field.value, @@ -545,126 +507,258 @@ export default function CameraSettingsView({ {zone.name.replaceAll("_", " ")} - ); - }} - /> - ))} + )} + /> + ))} +
+ + ) : ( +
+ + camera.reviewClassification.noDefinedZones +
)} - -
- -
- -
+
+ {watchedAlertsZones && watchedAlertsZones.length > 0 + ? t( + "camera.reviewClassification.zoneObjectAlertsTips", + { + alertsLabels, + zone: watchedAlertsZones + .map((zone) => + capitalizeFirstLetter(zone).replaceAll( + "_", + " ", + ), + ) + .join(", "), + cameraName: capitalizeFirstLetter( + cameraConfig?.name ?? "", + ).replaceAll("_", " "), + }, + ) + : t( + "camera.reviewClassification.objectAlertsTips", + { + alertsLabels, + cameraName: capitalizeFirstLetter( + cameraConfig?.name ?? "", + ).replaceAll("_", " "), + }, + )}
- + )} + /> -
- {watchedDetectionsZones && - watchedDetectionsZones.length > 0 ? ( - !selectDetections ? ( - - capitalizeFirstLetter(zone).replaceAll( - "_", - " ", - ), - ) - .join(", "), - cameraName: capitalizeFirstLetter( - cameraConfig?.name ?? "", - ).replaceAll("_", " "), - }} - ns="views/settings" - > - ) : ( - - capitalizeFirstLetter(zone).replaceAll( - "_", - " ", - ), - ) - .join(", "), - cameraName: capitalizeFirstLetter( - cameraConfig?.name ?? "", - ).replaceAll("_", " "), - }} - ns="views/settings" - /> - ) - ) : ( - - )} -
- + ( + + {zones && zones?.length > 0 && ( + <> +
+ + + camera.review.detections + + + + {selectDetections && ( + + + camera.reviewClassification.selectDetectionsZones + + + )} +
+ + {selectDetections && ( +
+ {zones?.map((zone) => ( + ( + + + { + return checked + ? field.onChange([ + ...field.value, + zone.name, + ]) + : field.onChange( + field.value?.filter( + (value) => + value !== zone.name, + ), + ); + }} + /> + + + {zone.name.replaceAll("_", " ")} + + + )} + /> + ))} +
+ )} + + +
+ +
+ +
+
+ + )} + +
+ {watchedDetectionsZones && + watchedDetectionsZones.length > 0 ? ( + !selectDetections ? ( + + capitalizeFirstLetter(zone).replaceAll( + "_", + " ", + ), + ) + .join(", "), + cameraName: capitalizeFirstLetter( + cameraConfig?.name ?? "", + ).replaceAll("_", " "), + }} + ns="views/settings" + /> + ) : ( + + capitalizeFirstLetter(zone).replaceAll( + "_", + " ", + ), + ) + .join(", "), + cameraName: capitalizeFirstLetter( + cameraConfig?.name ?? "", + ).replaceAll("_", " "), + }} + ns="views/settings" + /> + ) + ) : ( + + )} +
+
+ )} + /> +
+ + +
+ + +
+ + + + ) : ( + <> +
+ +
+
+
- - -
- - -
- - + + )}
From 4deccf08a1051b9cca4852cf2a3cc11dd0108afe Mon Sep 17 00:00:00 2001 From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> Date: Fri, 13 Jun 2025 09:43:38 -0500 Subject: [PATCH 027/530] Ensure logging config is propagated to forked processes (#18704) * Move log level initialization to log * Use logger config * Formatting * Fix config order * Set process names --------- Co-authored-by: Nicolas Mowen --- frigate/app.py | 1 + frigate/config/logger.py | 29 ++++------------------------- frigate/embeddings/__init__.py | 2 +- frigate/events/audio.py | 2 +- frigate/log.py | 26 ++++++++++++++++++++++++++ frigate/object_detection/base.py | 8 +++++++- frigate/output/output.py | 2 +- frigate/record/record.py | 2 +- frigate/review/review.py | 2 +- frigate/util/process.py | 12 +++++++++++- 10 files changed, 54 insertions(+), 32 deletions(-) diff --git a/frigate/app.py b/frigate/app.py index 010f311b9..687a06be4 100644 --- a/frigate/app.py +++ b/frigate/app.py @@ -387,6 +387,7 @@ class FrigateApp: name, self.detection_queue, list(self.config.cameras.keys()), + self.config, detector_config, ) diff --git a/frigate/config/logger.py b/frigate/config/logger.py index a3eed23d0..0ba3e6972 100644 --- a/frigate/config/logger.py +++ b/frigate/config/logger.py @@ -1,20 +1,11 @@ -import logging -from enum import Enum - from pydantic import Field, ValidationInfo, model_validator from typing_extensions import Self +from frigate.log import LogLevel, apply_log_levels + from .base import FrigateBaseModel -__all__ = ["LoggerConfig", "LogLevel"] - - -class LogLevel(str, Enum): - debug = "debug" - info = "info" - warning = "warning" - error = "error" - critical = "critical" +__all__ = ["LoggerConfig"] class LoggerConfig(FrigateBaseModel): @@ -26,18 +17,6 @@ class LoggerConfig(FrigateBaseModel): @model_validator(mode="after") def post_validation(self, info: ValidationInfo) -> Self: if isinstance(info.context, dict) and info.context.get("install", False): - logging.getLogger().setLevel(self.default.value.upper()) - - log_levels = { - "absl": LogLevel.error, - "httpx": LogLevel.error, - "tensorflow": LogLevel.error, - "werkzeug": LogLevel.error, - "ws4py": LogLevel.error, - **self.logs, - } - - for log, level in log_levels.items(): - logging.getLogger(log).setLevel(level.value.upper()) + apply_log_levels(self.default.value.upper(), self.logs) return self diff --git a/frigate/embeddings/__init__.py b/frigate/embeddings/__init__.py index 054f2c334..9c72bcd03 100644 --- a/frigate/embeddings/__init__.py +++ b/frigate/embeddings/__init__.py @@ -36,7 +36,7 @@ class EmbeddingProcess(FrigateProcess): self.metrics = metrics def run(self) -> None: - self.pre_run_setup() + self.pre_run_setup(self.config.logger) maintainer = EmbeddingMaintainer( self.config, self.metrics, diff --git a/frigate/events/audio.py b/frigate/events/audio.py index 791ba80e4..d7242cf2b 100644 --- a/frigate/events/audio.py +++ b/frigate/events/audio.py @@ -100,7 +100,7 @@ class AudioProcessor(util.Process): self.transcription_model_runner = None def run(self) -> None: - self.pre_run_setup() + self.pre_run_setup(self.config.logger) audio_threads: list[AudioEventMaintainer] = [] threading.current_thread().name = "process:audio_manager" diff --git a/frigate/log.py b/frigate/log.py index f535a278c..2e9c781f1 100644 --- a/frigate/log.py +++ b/frigate/log.py @@ -5,6 +5,7 @@ import os import sys import threading from collections import deque +from enum import Enum from logging.handlers import QueueHandler, QueueListener from multiprocessing.managers import SyncManager from queue import Queue @@ -33,6 +34,15 @@ LOG_HANDLER.addFilter( not in record.getMessage() ) + +class LogLevel(str, Enum): + debug = "debug" + info = "info" + warning = "warning" + error = "error" + critical = "critical" + + log_listener: Optional[QueueListener] = None log_queue: Optional[Queue] = None @@ -61,6 +71,22 @@ def _stop_logging() -> None: log_listener = None +def apply_log_levels(default: str, log_levels: dict[str, LogLevel]) -> None: + logging.getLogger().setLevel(default) + + log_levels = { + "absl": LogLevel.error, + "httpx": LogLevel.error, + "tensorflow": LogLevel.error, + "werkzeug": LogLevel.error, + "ws4py": LogLevel.error, + **log_levels, + } + + for log, level in log_levels.items(): + logging.getLogger(log).setLevel(level.value.upper()) + + # When a multiprocessing.Process exits, python tries to flush stdout and stderr. However, if the # process is created after a thread (for example a logging thread) is created and the process fork # happens while an internal lock is held, the stdout/err flush can cause a deadlock. diff --git a/frigate/object_detection/base.py b/frigate/object_detection/base.py index d203e8574..e86b1b036 100644 --- a/frigate/object_detection/base.py +++ b/frigate/object_detection/base.py @@ -12,6 +12,7 @@ from frigate.comms.object_detector_signaler import ( ObjectDetectorPublisher, ObjectDetectorSubscriber, ) +from frigate.config import FrigateConfig from frigate.detectors import create_detector from frigate.detectors.detector_config import ( BaseDetectorConfig, @@ -92,6 +93,7 @@ class DetectorRunner(util.Process): cameras: list[str], avg_speed: Value, start_time: Value, + config: FrigateConfig, detector_config: BaseDetectorConfig, ) -> None: super().__init__(name=name, daemon=True) @@ -99,6 +101,7 @@ class DetectorRunner(util.Process): self.cameras = cameras self.avg_speed = avg_speed self.start_time = start_time + self.config = config self.detector_config = detector_config self.outputs: dict = {} @@ -108,7 +111,7 @@ class DetectorRunner(util.Process): self.outputs[name] = {"shm": out_shm, "np": out_np} def run(self) -> None: - self.pre_run_setup() + self.pre_run_setup(self.config.logger) frame_manager = SharedMemoryFrameManager() object_detector = LocalObjectDetector(detector_config=self.detector_config) @@ -161,6 +164,7 @@ class ObjectDetectProcess: name: str, detection_queue: Queue, cameras: list[str], + config: FrigateConfig, detector_config: BaseDetectorConfig, ): self.name = name @@ -169,6 +173,7 @@ class ObjectDetectProcess: self.avg_inference_speed = Value("d", 0.01) self.detection_start = Value("d", 0.0) self.detect_process: util.Process | None = None + self.config = config self.detector_config = detector_config self.start_or_restart() @@ -195,6 +200,7 @@ class ObjectDetectProcess: self.cameras, self.avg_inference_speed, self.detection_start, + self.config, self.detector_config, ) self.detect_process.start() diff --git a/frigate/output/output.py b/frigate/output/output.py index 8c60e51c7..0cb8a649f 100644 --- a/frigate/output/output.py +++ b/frigate/output/output.py @@ -77,7 +77,7 @@ class OutputProcess(util.Process): self.config = config def run(self) -> None: - self.pre_run_setup() + self.pre_run_setup(self.config.logger) frame_manager = SharedMemoryFrameManager() diff --git a/frigate/record/record.py b/frigate/record/record.py index 40a943a43..153560a11 100644 --- a/frigate/record/record.py +++ b/frigate/record/record.py @@ -18,7 +18,7 @@ class RecordProcess(FrigateProcess): self.config = config def run(self) -> None: - self.pre_run_setup() + self.pre_run_setup(self.config.logger) db = SqliteQueueDatabase( self.config.database.path, pragmas={ diff --git a/frigate/review/review.py b/frigate/review/review.py index 00910e439..e687f4f45 100644 --- a/frigate/review/review.py +++ b/frigate/review/review.py @@ -15,7 +15,7 @@ class ReviewProcess(util.Process): self.config = config def run(self) -> None: - self.pre_run_setup() + self.pre_run_setup(self.config.logger) maintainer = ReviewSegmentMaintainer( self.config, self.stop_event, diff --git a/frigate/util/process.py b/frigate/util/process.py index 3501e585e..6e3459c6b 100644 --- a/frigate/util/process.py +++ b/frigate/util/process.py @@ -7,7 +7,10 @@ import threading from logging.handlers import QueueHandler from typing import Callable, Optional +from setproctitle import setproctitle + import frigate.log +from frigate.config.logger import LoggerConfig class BaseProcess(mp.Process): @@ -50,7 +53,9 @@ class Process(BaseProcess): def before_start(self) -> None: self.__log_queue = frigate.log.log_listener.queue - def pre_run_setup(self) -> None: + def pre_run_setup(self, logConfig: LoggerConfig | None = None) -> None: + setproctitle(self.name) + threading.current_thread().name = f"process:{self.name}" faulthandler.enable() def receiveSignal(signalNumber, frame): @@ -68,3 +73,8 @@ class Process(BaseProcess): self.logger = logging.getLogger(self.name) logging.basicConfig(handlers=[], force=True) logging.getLogger().addHandler(QueueHandler(self.__log_queue)) + + if logConfig: + frigate.log.apply_log_levels( + logConfig.default.value.upper(), logConfig.logs + ) From e832bb4badbf7ad015d8dfba9e2242cb114b3860 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Fri, 13 Jun 2025 11:09:51 -0600 Subject: [PATCH 028/530] Fix go2rtc init (#18708) * Cleanup process handling * Adjust process name --- benchmark.py | 4 ++-- frigate/embeddings/__init__.py | 2 +- frigate/events/audio.py | 4 ++-- frigate/object_detection/base.py | 8 ++++---- frigate/output/output.py | 4 ++-- frigate/record/record.py | 2 +- frigate/review/review.py | 4 ++-- frigate/util/__init__.py | 3 --- frigate/util/classification.py | 4 ++-- frigate/util/process.py | 2 +- frigate/video.py | 6 +++--- 11 files changed, 20 insertions(+), 23 deletions(-) diff --git a/benchmark.py b/benchmark.py index 1f39302a7..46adc59df 100755 --- a/benchmark.py +++ b/benchmark.py @@ -4,13 +4,13 @@ from statistics import mean import numpy as np -import frigate.util as util from frigate.config import DetectorTypeEnum from frigate.object_detection.base import ( ObjectDetectProcess, RemoteObjectDetector, load_labels, ) +from frigate.util.process import FrigateProcess my_frame = np.expand_dims(np.full((300, 300, 3), 1, np.uint8), axis=0) labels = load_labels("/labelmap.txt") @@ -91,7 +91,7 @@ edgetpu_process_2 = ObjectDetectProcess( ) for x in range(0, 10): - camera_process = util.Process( + camera_process = FrigateProcess( target=start, args=(x, 300, detection_queue, events[str(x)]) ) camera_process.daemon = True diff --git a/frigate/embeddings/__init__.py b/frigate/embeddings/__init__.py index 9c72bcd03..cb897ed4b 100644 --- a/frigate/embeddings/__init__.py +++ b/frigate/embeddings/__init__.py @@ -17,9 +17,9 @@ from frigate.const import CONFIG_DIR, FACE_DIR from frigate.data_processing.types import DataProcessorMetrics from frigate.db.sqlitevecq import SqliteVecQueueDatabase from frigate.models import Event -from frigate.util import Process as FrigateProcess from frigate.util.builtin import serialize from frigate.util.classification import kickoff_model_training +from frigate.util.process import FrigateProcess from .maintainer import EmbeddingMaintainer from .util import ZScoreNormalization diff --git a/frigate/events/audio.py b/frigate/events/audio.py index d7242cf2b..7f94c2a00 100644 --- a/frigate/events/audio.py +++ b/frigate/events/audio.py @@ -11,7 +11,6 @@ from typing import Any, Tuple import numpy as np -import frigate.util as util from frigate.comms.detections_updater import DetectionPublisher, DetectionTypeEnum from frigate.comms.event_metadata_updater import ( EventMetadataPublisher, @@ -40,6 +39,7 @@ from frigate.ffmpeg_presets import parse_preset_input from frigate.log import LogPipe from frigate.object_detection.base import load_labels from frigate.util.builtin import get_ffmpeg_arg_list +from frigate.util.process import FrigateProcess from frigate.video import start_or_restart_ffmpeg, stop_ffmpeg try: @@ -76,7 +76,7 @@ def get_ffmpeg_command(ffmpeg: FfmpegConfig) -> list[str]: ) -class AudioProcessor(util.Process): +class AudioProcessor(FrigateProcess): name = "frigate.audio_manager" def __init__( diff --git a/frigate/object_detection/base.py b/frigate/object_detection/base.py index e86b1b036..2953f86eb 100644 --- a/frigate/object_detection/base.py +++ b/frigate/object_detection/base.py @@ -7,7 +7,6 @@ from multiprocessing.synchronize import Event as MpEvent import numpy as np -import frigate.util as util from frigate.comms.object_detector_signaler import ( ObjectDetectorPublisher, ObjectDetectorSubscriber, @@ -21,6 +20,7 @@ from frigate.detectors.detector_config import ( ) from frigate.util.builtin import EventsPerSecond, load_labels from frigate.util.image import SharedMemoryFrameManager, UntrackedSharedMemory +from frigate.util.process import FrigateProcess from .util import tensor_transform @@ -85,7 +85,7 @@ class LocalObjectDetector(ObjectDetector): return self.detect_api.detect_raw(tensor_input=tensor_input) -class DetectorRunner(util.Process): +class DetectorRunner(FrigateProcess): def __init__( self, name, @@ -172,7 +172,7 @@ class ObjectDetectProcess: self.detection_queue = detection_queue self.avg_inference_speed = Value("d", 0.01) self.detection_start = Value("d", 0.0) - self.detect_process: util.Process | None = None + self.detect_process: FrigateProcess | None = None self.config = config self.detector_config = detector_config self.start_or_restart() @@ -195,7 +195,7 @@ class ObjectDetectProcess: if (self.detect_process is not None) and self.detect_process.is_alive(): self.stop() self.detect_process = DetectorRunner( - f"detector:{self.name}", + f"frigate.detector:{self.name}", self.detection_queue, self.cameras, self.avg_inference_speed, diff --git a/frigate/output/output.py b/frigate/output/output.py index 0cb8a649f..da5906e78 100644 --- a/frigate/output/output.py +++ b/frigate/output/output.py @@ -14,7 +14,6 @@ from ws4py.server.wsgirefserver import ( ) from ws4py.server.wsgiutils import WebSocketWSGIApplication -import frigate.util as util from frigate.comms.detections_updater import DetectionSubscriber, DetectionTypeEnum from frigate.comms.ws import WebSocket from frigate.config import FrigateConfig @@ -27,6 +26,7 @@ from frigate.output.birdseye import Birdseye from frigate.output.camera import JsmpegCamera from frigate.output.preview import PreviewRecorder from frigate.util.image import SharedMemoryFrameManager, get_blank_yuv_frame +from frigate.util.process import FrigateProcess logger = logging.getLogger(__name__) @@ -71,7 +71,7 @@ def check_disabled_camera_update( birdseye.all_cameras_disabled() -class OutputProcess(util.Process): +class OutputProcess(FrigateProcess): def __init__(self, config: FrigateConfig) -> None: super().__init__(name="frigate.output", daemon=True) self.config = config diff --git a/frigate/record/record.py b/frigate/record/record.py index 153560a11..c52260745 100644 --- a/frigate/record/record.py +++ b/frigate/record/record.py @@ -7,7 +7,7 @@ from playhouse.sqliteq import SqliteQueueDatabase from frigate.config import FrigateConfig from frigate.models import Recordings, ReviewSegment from frigate.record.maintainer import RecordingMaintainer -from frigate.util import Process as FrigateProcess +from frigate.util.process import FrigateProcess logger = logging.getLogger(__name__) diff --git a/frigate/review/review.py b/frigate/review/review.py index e687f4f45..677d07776 100644 --- a/frigate/review/review.py +++ b/frigate/review/review.py @@ -2,14 +2,14 @@ import logging -import frigate.util as util from frigate.config import FrigateConfig from frigate.review.maintainer import ReviewSegmentMaintainer +from frigate.util.process import FrigateProcess logger = logging.getLogger(__name__) -class ReviewProcess(util.Process): +class ReviewProcess(FrigateProcess): def __init__(self, config: FrigateConfig) -> None: super().__init__(name="frigate.review_segment_manager", daemon=True) self.config = config diff --git a/frigate/util/__init__.py b/frigate/util/__init__.py index 307bf4f8b..e69de29bb 100644 --- a/frigate/util/__init__.py +++ b/frigate/util/__init__.py @@ -1,3 +0,0 @@ -from .process import Process - -__all__ = ["Process"] diff --git a/frigate/util/classification.py b/frigate/util/classification.py index 842f38fa2..c6e2b5878 100644 --- a/frigate/util/classification.py +++ b/frigate/util/classification.py @@ -14,7 +14,7 @@ from frigate.comms.embeddings_updater import EmbeddingsRequestEnum, EmbeddingsRe from frigate.comms.inter_process import InterProcessRequestor from frigate.const import CLIPS_DIR, MODEL_CACHE_DIR, UPDATE_MODEL_STATE from frigate.types import ModelStatusTypesEnum -from frigate.util import Process +from frigate.util.process import FrigateProcess BATCH_SIZE = 16 EPOCHS = 50 @@ -144,7 +144,7 @@ def kickoff_model_training( # run training in sub process so that # tensorflow will free CPU / GPU memory # upon training completion - training_process = Process( + training_process = FrigateProcess( target=__train_classification_model, name=f"model_training:{model_name}", args=(model_name,), diff --git a/frigate/util/process.py b/frigate/util/process.py index 6e3459c6b..9234a0ea6 100644 --- a/frigate/util/process.py +++ b/frigate/util/process.py @@ -39,7 +39,7 @@ class BaseProcess(mp.Process): pass -class Process(BaseProcess): +class FrigateProcess(BaseProcess): logger: logging.Logger @property diff --git a/frigate/video.py b/frigate/video.py index 2869c2bc2..98f3c7a8b 100755 --- a/frigate/video.py +++ b/frigate/video.py @@ -11,7 +11,6 @@ from typing import Any import cv2 -import frigate.util as util from frigate.camera import CameraMetrics, PTZMetrics from frigate.comms.inter_process import InterProcessRequestor from frigate.config import CameraConfig, DetectConfig, ModelConfig @@ -51,6 +50,7 @@ from frigate.util.object import ( is_object_filtered, reduce_detections, ) +from frigate.util.process import FrigateProcess logger = logging.getLogger(__name__) @@ -447,7 +447,7 @@ class CameraCaptureRunner(threading.Thread): ) -class CameraCapture(util.Process): +class CameraCapture(FrigateProcess): def __init__( self, config: CameraConfig, shm_frame_count: int, camera_metrics: CameraMetrics ) -> None: @@ -471,7 +471,7 @@ class CameraCapture(util.Process): camera_watchdog.join() -class CameraTracker(util.Process): +class CameraTracker(FrigateProcess): def __init__( self, config: CameraConfig, From ef060b97ca88ef41e3e425ed9615e4651a7dbbb2 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Fri, 13 Jun 2025 18:22:13 -0600 Subject: [PATCH 029/530] Reduce tf initialization --- frigate/util/classification.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/frigate/util/classification.py b/frigate/util/classification.py index c6e2b5878..2f5cc89f2 100644 --- a/frigate/util/classification.py +++ b/frigate/util/classification.py @@ -5,10 +5,6 @@ import sys import cv2 import numpy as np -import tensorflow as tf -from tensorflow.keras import layers, models, optimizers -from tensorflow.keras.applications import MobileNetV2 -from tensorflow.keras.preprocessing.image import ImageDataGenerator from frigate.comms.embeddings_updater import EmbeddingsRequestEnum, EmbeddingsRequestor from frigate.comms.inter_process import InterProcessRequestor @@ -44,6 +40,13 @@ def __generate_representative_dataset_factory(dataset_dir: str): @staticmethod def __train_classification_model(model_name: str) -> bool: """Train a classification model.""" + + # import in the function so that tensorflow is not initialized multiple times + import tensorflow as tf + from tensorflow.keras import layers, models, optimizers + from tensorflow.keras.applications import MobileNetV2 + from tensorflow.keras.preprocessing.image import ImageDataGenerator + dataset_dir = os.path.join(CLIPS_DIR, model_name, "dataset") model_dir = os.path.join(MODEL_CACHE_DIR, model_name) num_classes = len( From 2f4d7353f4125cb5dc8b25d1a362a420735b73d4 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Sat, 14 Jun 2025 09:27:50 -0600 Subject: [PATCH 030/530] Don't use staticmethod --- frigate/util/classification.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/frigate/util/classification.py b/frigate/util/classification.py index 2f5cc89f2..a2ba1bf26 100644 --- a/frigate/util/classification.py +++ b/frigate/util/classification.py @@ -17,7 +17,6 @@ EPOCHS = 50 LEARNING_RATE = 0.001 -@staticmethod def __generate_representative_dataset_factory(dataset_dir: str): def generate_representative_dataset(): image_paths = [] @@ -37,7 +36,6 @@ def __generate_representative_dataset_factory(dataset_dir: str): return generate_representative_dataset -@staticmethod def __train_classification_model(model_name: str) -> bool: """Train a classification model.""" From 0d5a49ab82b64208cabdc2033b2f2d3f0d5e7212 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Tue, 17 Jun 2025 12:11:11 -0600 Subject: [PATCH 031/530] Don't fail on unicode debug for config updates --- frigate/comms/config_updater.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frigate/comms/config_updater.py b/frigate/comms/config_updater.py index 06b870c62..0486cbdd1 100644 --- a/frigate/comms/config_updater.py +++ b/frigate/comms/config_updater.py @@ -50,7 +50,7 @@ class ConfigSubscriber: return (topic, obj) else: return (None, None) - except zmq.ZMQError: + except (zmq.ZMQError, UnicodeDecodeError): return (None, None) def stop(self) -> None: From 847b03e71bb75b490c38a5307c6e838c8b5ad28a Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Fri, 20 Jun 2025 06:44:48 -0600 Subject: [PATCH 032/530] Catch unpickling error --- frigate/comms/config_updater.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/frigate/comms/config_updater.py b/frigate/comms/config_updater.py index 0486cbdd1..866315d95 100644 --- a/frigate/comms/config_updater.py +++ b/frigate/comms/config_updater.py @@ -1,6 +1,7 @@ """Facilitates communication between processes.""" import multiprocessing as mp +from _pickle import UnpicklingError from multiprocessing.synchronize import Event as MpEvent from typing import Any, Optional @@ -50,7 +51,7 @@ class ConfigSubscriber: return (topic, obj) else: return (None, None) - except (zmq.ZMQError, UnicodeDecodeError): + except (zmq.ZMQError, UnicodeDecodeError, UnpicklingError): return (None, None) def stop(self) -> None: From 7c8164aa993302123fde92501b70a5a4ed62f6d1 Mon Sep 17 00:00:00 2001 From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> Date: Sat, 21 Jun 2025 15:38:34 -0500 Subject: [PATCH 033/530] Fix birdseye crash when dynamically adding a camera (#18821) --- frigate/app.py | 1 + frigate/camera/maintainer.py | 6 ++- frigate/output/birdseye.py | 73 ++++++++++++++++++++++++------------ frigate/output/output.py | 8 +++- 4 files changed, 60 insertions(+), 28 deletions(-) diff --git a/frigate/app.py b/frigate/app.py index 687a06be4..48d36988f 100644 --- a/frigate/app.py +++ b/frigate/app.py @@ -425,6 +425,7 @@ class FrigateApp: self.camera_metrics, self.ptz_metrics, self.stop_event, + self.metrics_manager, ) self.camera_maintainer.start() diff --git a/frigate/camera/maintainer.py b/frigate/camera/maintainer.py index dd978bbfc..dd122d4fd 100644 --- a/frigate/camera/maintainer.py +++ b/frigate/camera/maintainer.py @@ -6,7 +6,7 @@ import os import shutil import threading from multiprocessing import Queue -from multiprocessing.managers import DictProxy +from multiprocessing.managers import DictProxy, SyncManager from multiprocessing.synchronize import Event as MpEvent from frigate.camera import CameraMetrics, PTZMetrics @@ -35,6 +35,7 @@ class CameraMaintainer(threading.Thread): camera_metrics: DictProxy, ptz_metrics: dict[str, PTZMetrics], stop_event: MpEvent, + metrics_manager: SyncManager, ): super().__init__(name="camera_processor") self.config = config @@ -56,6 +57,7 @@ class CameraMaintainer(threading.Thread): self.shm_count = self.__calculate_shm_frame_count() self.camera_processes: dict[str, mp.Process] = {} self.capture_processes: dict[str, mp.Process] = {} + self.metrics_manager = metrics_manager def __init_historical_regions(self) -> None: # delete region grids for removed or renamed cameras @@ -128,7 +130,7 @@ class CameraMaintainer(threading.Thread): return if runtime: - self.camera_metrics[name] = CameraMetrics() + self.camera_metrics[name] = CameraMetrics(self.metrics_manager) self.ptz_metrics[name] = PTZMetrics(autotracker_enabled=False) self.region_grids[name] = get_camera_regions_grid( name, diff --git a/frigate/output/birdseye.py b/frigate/output/birdseye.py index a19436d5e..0939b5ce4 100644 --- a/frigate/output/birdseye.py +++ b/frigate/output/birdseye.py @@ -319,35 +319,48 @@ class BirdsEyeFrameManager: self.frame[:] = self.blank_frame self.cameras = {} - for camera, settings in self.config.cameras.items(): - # precalculate the coordinates for all the channels - y, u1, u2, v1, v2 = get_yuv_crop( - settings.frame_shape_yuv, - ( - 0, - 0, - settings.frame_shape[1], - settings.frame_shape[0], - ), - ) - self.cameras[camera] = { - "dimensions": [settings.detect.width, settings.detect.height], - "last_active_frame": 0.0, - "current_frame": 0.0, - "layout_frame": 0.0, - "channel_dims": { - "y": y, - "u1": u1, - "u2": u2, - "v1": v1, - "v2": v2, - }, - } + for camera in self.config.cameras.keys(): + self.add_camera(camera) self.camera_layout = [] self.active_cameras = set() self.last_output_time = 0.0 + def add_camera(self, cam: str): + """Add a camera to self.cameras with the correct structure.""" + settings = self.config.cameras[cam] + # precalculate the coordinates for all the channels + y, u1, u2, v1, v2 = get_yuv_crop( + settings.frame_shape_yuv, + ( + 0, + 0, + settings.frame_shape[1], + settings.frame_shape[0], + ), + ) + self.cameras[cam] = { + "dimensions": [ + settings.detect.width, + settings.detect.height, + ], + "last_active_frame": 0.0, + "current_frame": 0.0, + "layout_frame": 0.0, + "channel_dims": { + "y": y, + "u1": u1, + "u2": u2, + "v1": v1, + "v2": v2, + }, + } + + def remove_camera(self, cam: str): + """Remove a camera from self.cameras.""" + if cam in self.cameras: + del self.cameras[cam] + def clear_frame(self): logger.debug("Clearing the birdseye frame") self.frame[:] = self.blank_frame @@ -774,7 +787,7 @@ class Birdseye: self.broadcaster = BroadcastThread( "birdseye", self.converter, websocket_server, stop_event ) - self.birdseye_manager = BirdsEyeFrameManager(config, stop_event) + self.birdseye_manager = BirdsEyeFrameManager(self.config, stop_event) self.frame_manager = SharedMemoryFrameManager() self.stop_event = stop_event self.requestor = InterProcessRequestor() @@ -804,6 +817,16 @@ class Birdseye: self.birdseye_manager.clear_frame() self.__send_new_frame() + def add_camera(self, camera: str) -> None: + """Add a camera to the birdseye manager.""" + self.birdseye_manager.add_camera(camera) + logger.debug(f"Added camera {camera} to birdseye") + + def remove_camera(self, camera: str) -> None: + """Remove a camera from the birdseye manager.""" + self.birdseye_manager.remove_camera(camera) + logger.debug(f"Removed camera {camera} from birdseye") + def write_data( self, camera: str, diff --git a/frigate/output/output.py b/frigate/output/output.py index da5906e78..f176b2e4c 100644 --- a/frigate/output/output.py +++ b/frigate/output/output.py @@ -133,7 +133,7 @@ class OutputProcess(FrigateProcess): # check if there is an updated config updates = config_subscriber.check_for_updates() - if "add" in updates: + if CameraConfigUpdateEnum.add in updates: for camera in updates["add"]: jsmpeg_cameras[camera] = JsmpegCamera( cam_config, self.stop_event, websocket_server @@ -141,6 +141,12 @@ class OutputProcess(FrigateProcess): preview_recorders[camera] = PreviewRecorder(cam_config) preview_write_times[camera] = 0 + if ( + self.config.birdseye.enabled + and self.config.cameras[camera].birdseye.enabled + ): + birdseye.add_camera(camera) + (topic, data) = detection_subscriber.check_for_update(timeout=1) now = datetime.datetime.now().timestamp() From 9c2ba152e1f4b7ef40853f2452cb289ad2181684 Mon Sep 17 00:00:00 2001 From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> Date: Sat, 21 Jun 2025 21:44:37 -0500 Subject: [PATCH 034/530] Catch invalid character index in lpr CTC decoder (#18825) --- frigate/data_processing/common/license_plate/mixin.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/frigate/data_processing/common/license_plate/mixin.py b/frigate/data_processing/common/license_plate/mixin.py index 2d63c1c69..7f6a27c62 100644 --- a/frigate/data_processing/common/license_plate/mixin.py +++ b/frigate/data_processing/common/license_plate/mixin.py @@ -1610,9 +1610,9 @@ class CTCDecoder: self.characters = [] if character_dict_path and os.path.exists(character_dict_path): with open(character_dict_path, "r", encoding="utf-8") as f: - self.characters = ["blank"] + [ - line.strip() for line in f if line.strip() - ] + self.characters = ( + ["blank"] + [line.strip() for line in f if line.strip()] + [" "] + ) else: self.characters = [ "blank", @@ -1747,7 +1747,7 @@ class CTCDecoder: merged_path.append(char_index) merged_probs.append(seq_log_probs[t, char_index]) - result = "".join(self.char_map[idx] for idx in merged_path) + result = "".join(self.char_map.get(idx, "") for idx in merged_path) results.append(result) confidence = np.exp(merged_probs).tolist() From 3327be05ea98a587e7faceeb09a3244cb2a140ef Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Mon, 23 Jun 2025 08:40:28 -0600 Subject: [PATCH 035/530] Classification model cover images (#18843) * Move to separate component * Add cover images for clssification models --- .../classification/ModelSelectionView.tsx | 74 ++++++++++++++----- 1 file changed, 54 insertions(+), 20 deletions(-) diff --git a/web/src/views/classification/ModelSelectionView.tsx b/web/src/views/classification/ModelSelectionView.tsx index 63133842a..aa2f94c6a 100644 --- a/web/src/views/classification/ModelSelectionView.tsx +++ b/web/src/views/classification/ModelSelectionView.tsx @@ -1,3 +1,4 @@ +import { baseUrl } from "@/api/baseUrl"; import ActivityIndicator from "@/components/indicators/activity-indicator"; import { cn } from "@/lib/utils"; import { @@ -37,27 +38,60 @@ export default function ModelSelectionView({ return (
{classificationConfigs.map((config) => ( -
onClick(config)} - onContextMenu={() => { - // e.stopPropagation(); - // e.preventDefault(); - // handleClickEvent(true); - }} - > -
-
- {config.name} ({config.state_config != null ? "State" : "Object"}{" "} - Classification) -
-
+ onClick(config)} /> ))}
); } + +type ModelCardProps = { + config: CustomClassificationModelConfig; + onClick: () => void; +}; +function ModelCard({ config, onClick }: ModelCardProps) { + const { data: dataset } = useSWR<{ + [id: string]: string[]; + }>(`classification/${config.name}/dataset`, { revalidateOnFocus: false }); + + const coverImages = useMemo(() => { + if (!dataset) { + return {}; + } + + const imageMap: { [key: string]: string } = {}; + + for (const [key, imageList] of Object.entries(dataset)) { + if (imageList.length > 0) { + imageMap[key] = imageList[0]; + } + } + + return imageMap; + }, [dataset]); + + return ( +
onClick()} + > +
+ {Object.entries(coverImages).map(([key, image]) => ( + + ))} +
+
+ {config.name} ({config.state_config != null ? "State" : "Object"}{" "} + Classification) +
+
+ ); +} From e1ee6f010fdcad2d3277eb9782f885f17d41bf1d Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Mon, 23 Jun 2025 15:55:57 -0600 Subject: [PATCH 036/530] Fix process name --- frigate/video.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/frigate/video.py b/frigate/video.py index 98f3c7a8b..e82faf268 100755 --- a/frigate/video.py +++ b/frigate/video.py @@ -55,7 +55,7 @@ from frigate.util.process import FrigateProcess logger = logging.getLogger(__name__) -def stop_ffmpeg(ffmpeg_process, logger): +def stop_ffmpeg(ffmpeg_process: sp.Popen[Any], logger: logging.Logger): logger.info("Terminating the existing ffmpeg process...") ffmpeg_process.terminate() try: @@ -451,7 +451,7 @@ class CameraCapture(FrigateProcess): def __init__( self, config: CameraConfig, shm_frame_count: int, camera_metrics: CameraMetrics ) -> None: - super().__init__(name=f"camera_capture:{config.name}", daemon=True) + super().__init__(name=f"frigate.capture:{config.name}", daemon=True) self.config = config self.shm_frame_count = shm_frame_count self.camera_metrics = camera_metrics @@ -483,7 +483,7 @@ class CameraTracker(FrigateProcess): ptz_metrics: PTZMetrics, region_grid: list[list[dict[str, Any]]], ) -> None: - super().__init__(name=f"camera_processor:{config.name}", daemon=True) + super().__init__(name=f"frigate.process:{config.name}", daemon=True) self.config = config self.model_config = model_config self.labelmap = labelmap From 542bf05bb8fa05b22df89785b8b5312a3fe79845 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Tue, 24 Jun 2025 11:41:11 -0600 Subject: [PATCH 037/530] Handle SIGINT with forkserver (#18860) * Pass stopevent from main start * Share stop event across processes * preload modules * remove explicit os._exit call --------- Co-authored-by: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> --- frigate/__main__.py | 20 +++++++++++++++++++- frigate/app.py | 19 ++++++++++--------- frigate/camera/maintainer.py | 5 ++++- frigate/embeddings/__init__.py | 8 ++++++-- frigate/events/audio.py | 4 +++- frigate/object_detection/base.py | 6 +++++- frigate/output/output.py | 5 +++-- frigate/record/record.py | 5 +++-- frigate/review/review.py | 5 +++-- frigate/util/process.py | 26 ++++---------------------- frigate/video.py | 11 ++++++++--- 11 files changed, 68 insertions(+), 46 deletions(-) diff --git a/frigate/__main__.py b/frigate/__main__.py index 6dd5d130e..f3181e494 100644 --- a/frigate/__main__.py +++ b/frigate/__main__.py @@ -23,6 +23,10 @@ def main() -> None: setup_logging(manager) threading.current_thread().name = "frigate" + stop_event = mp.Event() + + # send stop event on SIGINT + signal.signal(signal.SIGINT, lambda sig, frame: stop_event.set()) # Make sure we exit cleanly on SIGTERM. signal.signal(signal.SIGTERM, lambda sig, frame: sys.exit()) @@ -110,9 +114,23 @@ def main() -> None: sys.exit(0) # Run the main application. - FrigateApp(config, manager).start() + FrigateApp(config, manager, stop_event).start() if __name__ == "__main__": + mp.set_forkserver_preload( + [ + # Standard library and core dependencies + "sqlite3", + # Third-party libraries commonly used in Frigate + "numpy", + "cv2", + "peewee", + "zmq", + "ruamel.yaml", + # Frigate core modules + "frigate.camera.maintainer", + ] + ) mp.set_start_method("forkserver", force=True) main() diff --git a/frigate/app.py b/frigate/app.py index 48d36988f..9a662dd18 100644 --- a/frigate/app.py +++ b/frigate/app.py @@ -81,10 +81,12 @@ logger = logging.getLogger(__name__) class FrigateApp: - def __init__(self, config: FrigateConfig, manager: SyncManager) -> None: + def __init__( + self, config: FrigateConfig, manager: SyncManager, stop_event: MpEvent + ) -> None: self.metrics_manager = manager self.audio_process: Optional[mp.Process] = None - self.stop_event: MpEvent = mp.Event() + self.stop_event = stop_event self.detection_queue: Queue = mp.Queue() self.detectors: dict[str, ObjectDetectProcess] = {} self.detection_shms: list[mp.shared_memory.SharedMemory] = [] @@ -225,14 +227,14 @@ class FrigateApp: self.processes["go2rtc"] = proc.info["pid"] def init_recording_manager(self) -> None: - recording_process = RecordProcess(self.config) + recording_process = RecordProcess(self.config, self.stop_event) self.recording_process = recording_process recording_process.start() self.processes["recording"] = recording_process.pid or 0 logger.info(f"Recording process started: {recording_process.pid}") def init_review_segment_manager(self) -> None: - review_segment_process = ReviewProcess(self.config) + review_segment_process = ReviewProcess(self.config, self.stop_event) self.review_segment_process = review_segment_process review_segment_process.start() self.processes["review_segment"] = review_segment_process.pid or 0 @@ -252,8 +254,7 @@ class FrigateApp: return embedding_process = EmbeddingProcess( - self.config, - self.embeddings_metrics, + self.config, self.embeddings_metrics, self.stop_event ) self.embedding_process = embedding_process embedding_process.start() @@ -389,6 +390,7 @@ class FrigateApp: list(self.config.cameras.keys()), self.config, detector_config, + self.stop_event, ) def start_ptz_autotracker(self) -> None: @@ -412,7 +414,7 @@ class FrigateApp: self.detected_frames_processor.start() def start_video_output_processor(self) -> None: - output_processor = OutputProcess(self.config) + output_processor = OutputProcess(self.config, self.stop_event) self.output_processor = output_processor output_processor.start() logger.info(f"Output process started: {output_processor.pid}") @@ -438,7 +440,7 @@ class FrigateApp: if audio_cameras: self.audio_process = AudioProcessor( - self.config, audio_cameras, self.camera_metrics + self.config, audio_cameras, self.camera_metrics, self.stop_event ) self.audio_process.start() self.processes["audio_detector"] = self.audio_process.pid or 0 @@ -666,4 +668,3 @@ class FrigateApp: _stop_logging() self.metrics_manager.shutdown() - os._exit(os.EX_OK) diff --git a/frigate/camera/maintainer.py b/frigate/camera/maintainer.py index dd122d4fd..5bd97136c 100644 --- a/frigate/camera/maintainer.py +++ b/frigate/camera/maintainer.py @@ -165,6 +165,7 @@ class CameraMaintainer(threading.Thread): self.camera_metrics[name], self.ptz_metrics[name], self.region_grids[name], + self.stop_event, ) self.camera_processes[config.name] = camera_process camera_process.start() @@ -184,7 +185,9 @@ class CameraMaintainer(threading.Thread): frame_size = config.frame_shape_yuv[0] * config.frame_shape_yuv[1] self.frame_manager.create(f"{config.name}_frame{i}", frame_size) - capture_process = CameraCapture(config, count, self.camera_metrics[name]) + capture_process = CameraCapture( + config, count, self.camera_metrics[name], self.stop_event + ) capture_process.daemon = True self.capture_processes[name] = capture_process capture_process.start() diff --git a/frigate/embeddings/__init__.py b/frigate/embeddings/__init__.py index cb897ed4b..d4887e0d2 100644 --- a/frigate/embeddings/__init__.py +++ b/frigate/embeddings/__init__.py @@ -6,6 +6,7 @@ import logging import os import threading from json.decoder import JSONDecodeError +from multiprocessing.synchronize import Event as MpEvent from typing import Any, Union import regex @@ -29,9 +30,12 @@ logger = logging.getLogger(__name__) class EmbeddingProcess(FrigateProcess): def __init__( - self, config: FrigateConfig, metrics: DataProcessorMetrics | None + self, + config: FrigateConfig, + metrics: DataProcessorMetrics | None, + stop_event: MpEvent, ) -> None: - super().__init__(name="frigate.embeddings_manager", daemon=True) + super().__init__(stop_event, name="frigate.embeddings_manager", daemon=True) self.config = config self.metrics = metrics diff --git a/frigate/events/audio.py b/frigate/events/audio.py index 7f94c2a00..03c750a06 100644 --- a/frigate/events/audio.py +++ b/frigate/events/audio.py @@ -7,6 +7,7 @@ import string import threading import time from multiprocessing.managers import DictProxy +from multiprocessing.synchronize import Event as MpEvent from typing import Any, Tuple import numpy as np @@ -84,8 +85,9 @@ class AudioProcessor(FrigateProcess): config: FrigateConfig, cameras: list[CameraConfig], camera_metrics: DictProxy, + stop_event: MpEvent, ): - super().__init__(name="frigate.audio_manager", daemon=True) + super().__init__(stop_event, name="frigate.audio_manager", daemon=True) self.camera_metrics = camera_metrics self.cameras = cameras diff --git a/frigate/object_detection/base.py b/frigate/object_detection/base.py index 2953f86eb..32f33ffa5 100644 --- a/frigate/object_detection/base.py +++ b/frigate/object_detection/base.py @@ -95,8 +95,9 @@ class DetectorRunner(FrigateProcess): start_time: Value, config: FrigateConfig, detector_config: BaseDetectorConfig, + stop_event: MpEvent, ) -> None: - super().__init__(name=name, daemon=True) + super().__init__(stop_event, name=name, daemon=True) self.detection_queue = detection_queue self.cameras = cameras self.avg_speed = avg_speed @@ -166,6 +167,7 @@ class ObjectDetectProcess: cameras: list[str], config: FrigateConfig, detector_config: BaseDetectorConfig, + stop_event: MpEvent, ): self.name = name self.cameras = cameras @@ -175,6 +177,7 @@ class ObjectDetectProcess: self.detect_process: FrigateProcess | None = None self.config = config self.detector_config = detector_config + self.stop_event = stop_event self.start_or_restart() def stop(self): @@ -202,6 +205,7 @@ class ObjectDetectProcess: self.detection_start, self.config, self.detector_config, + self.stop_event, ) self.detect_process.start() diff --git a/frigate/output/output.py b/frigate/output/output.py index f176b2e4c..34c9e33c0 100644 --- a/frigate/output/output.py +++ b/frigate/output/output.py @@ -5,6 +5,7 @@ import logging import os import shutil import threading +from multiprocessing.synchronize import Event as MpEvent from wsgiref.simple_server import make_server from ws4py.server.wsgirefserver import ( @@ -72,8 +73,8 @@ def check_disabled_camera_update( class OutputProcess(FrigateProcess): - def __init__(self, config: FrigateConfig) -> None: - super().__init__(name="frigate.output", daemon=True) + def __init__(self, config: FrigateConfig, stop_event: MpEvent) -> None: + super().__init__(stop_event, name="frigate.output", daemon=True) self.config = config def run(self) -> None: diff --git a/frigate/record/record.py b/frigate/record/record.py index c52260745..b04a68e8c 100644 --- a/frigate/record/record.py +++ b/frigate/record/record.py @@ -1,6 +1,7 @@ """Run recording maintainer and cleanup.""" import logging +from multiprocessing.synchronize import Event as MpEvent from playhouse.sqliteq import SqliteQueueDatabase @@ -13,8 +14,8 @@ logger = logging.getLogger(__name__) class RecordProcess(FrigateProcess): - def __init__(self, config: FrigateConfig) -> None: - super().__init__(name="frigate.recording_manager", daemon=True) + def __init__(self, config: FrigateConfig, stop_event: MpEvent) -> None: + super().__init__(stop_event, name="frigate.recording_manager", daemon=True) self.config = config def run(self) -> None: diff --git a/frigate/review/review.py b/frigate/review/review.py index 677d07776..917a53a4b 100644 --- a/frigate/review/review.py +++ b/frigate/review/review.py @@ -1,6 +1,7 @@ """Run recording maintainer and cleanup.""" import logging +from multiprocessing.synchronize import Event as MpEvent from frigate.config import FrigateConfig from frigate.review.maintainer import ReviewSegmentMaintainer @@ -10,8 +11,8 @@ logger = logging.getLogger(__name__) class ReviewProcess(FrigateProcess): - def __init__(self, config: FrigateConfig) -> None: - super().__init__(name="frigate.review_segment_manager", daemon=True) + def __init__(self, config: FrigateConfig, stop_event: MpEvent) -> None: + super().__init__(stop_event, name="frigate.review_segment_manager", daemon=True) self.config = config def run(self) -> None: diff --git a/frigate/util/process.py b/frigate/util/process.py index 9234a0ea6..830818d4d 100644 --- a/frigate/util/process.py +++ b/frigate/util/process.py @@ -1,10 +1,9 @@ import faulthandler import logging import multiprocessing as mp -import signal -import sys import threading from logging.handlers import QueueHandler +from multiprocessing.synchronize import Event as MpEvent from typing import Callable, Optional from setproctitle import setproctitle @@ -16,6 +15,7 @@ from frigate.config.logger import LoggerConfig class BaseProcess(mp.Process): def __init__( self, + stop_event: MpEvent, *, name: Optional[str] = None, target: Optional[Callable] = None, @@ -23,6 +23,7 @@ class BaseProcess(mp.Process): kwargs: dict = {}, daemon: Optional[bool] = None, ): + self.stop_event = stop_event super().__init__( name=name, target=target, args=args, kwargs=kwargs, daemon=daemon ) @@ -42,14 +43,6 @@ class BaseProcess(mp.Process): class FrigateProcess(BaseProcess): logger: logging.Logger - @property - def stop_event(self) -> threading.Event: - # Lazily create the stop_event. This allows the signal handler to tell if anyone is - # monitoring the stop event, and to raise a SystemExit if not. - if "stop_event" not in self.__dict__: - self.__dict__["stop_event"] = threading.Event() - return self.__dict__["stop_event"] - def before_start(self) -> None: self.__log_queue = frigate.log.log_listener.queue @@ -58,18 +51,7 @@ class FrigateProcess(BaseProcess): threading.current_thread().name = f"process:{self.name}" faulthandler.enable() - def receiveSignal(signalNumber, frame): - # Get the stop_event through the dict to bypass lazy initialization. - stop_event = self.__dict__.get("stop_event") - if stop_event is not None: - # Someone is monitoring stop_event. We should set it. - stop_event.set() - else: - # Nobody is monitoring stop_event. We should raise SystemExit. - sys.exit() - - signal.signal(signal.SIGTERM, receiveSignal) - signal.signal(signal.SIGINT, receiveSignal) + # setup logging self.logger = logging.getLogger(self.name) logging.basicConfig(handlers=[], force=True) logging.getLogger().addHandler(QueueHandler(self.__log_queue)) diff --git a/frigate/video.py b/frigate/video.py index e82faf268..3bc2702a5 100755 --- a/frigate/video.py +++ b/frigate/video.py @@ -449,9 +449,13 @@ class CameraCaptureRunner(threading.Thread): class CameraCapture(FrigateProcess): def __init__( - self, config: CameraConfig, shm_frame_count: int, camera_metrics: CameraMetrics + self, + config: CameraConfig, + shm_frame_count: int, + camera_metrics: CameraMetrics, + stop_event: MpEvent, ) -> None: - super().__init__(name=f"frigate.capture:{config.name}", daemon=True) + super().__init__(stop_event, name=f"frigate.capture:{config.name}", daemon=True) self.config = config self.shm_frame_count = shm_frame_count self.camera_metrics = camera_metrics @@ -482,8 +486,9 @@ class CameraTracker(FrigateProcess): camera_metrics: CameraMetrics, ptz_metrics: PTZMetrics, region_grid: list[list[dict[str, Any]]], + stop_event: MpEvent, ) -> None: - super().__init__(name=f"frigate.process:{config.name}", daemon=True) + super().__init__(stop_event, name=f"frigate.process:{config.name}", daemon=True) self.config = config self.model_config = model_config self.labelmap = labelmap From da0248db15b4d539d606564260ddd30f4566a61a Mon Sep 17 00:00:00 2001 From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> Date: Tue, 24 Jun 2025 16:19:09 -0500 Subject: [PATCH 038/530] Don't try to close or join mp manager queues (#18866) Multiprocessing Manager queues don't have a close() or join_thread() method, and the Manager will clean it up appropriately after we empty it. This prevents an infinite loop when an AttributeError exception fires for Manager AutoProxy queue objects. --- frigate/util/builtin.py | 25 ++++++++++++++++--------- 1 file changed, 16 insertions(+), 9 deletions(-) diff --git a/frigate/util/builtin.py b/frigate/util/builtin.py index 90c0f9227..d4f8d7e37 100644 --- a/frigate/util/builtin.py +++ b/frigate/util/builtin.py @@ -5,7 +5,7 @@ import copy import datetime import logging import math -import multiprocessing as mp +import multiprocessing.queues import queue import re import shlex @@ -338,16 +338,23 @@ def clear_and_unlink(file: Path, missing_ok: bool = True) -> None: file.unlink(missing_ok=missing_ok) -def empty_and_close_queue(q: mp.Queue): +def empty_and_close_queue(q): while True: try: - try: - q.get(block=True, timeout=0.5) - except (queue.Empty, EOFError): - q.close() - q.join_thread() - return - except AttributeError: + q.get(block=True, timeout=0.5) + except (queue.Empty, EOFError): + break + except Exception as e: + logger.debug(f"Error while emptying queue: {e}") + break + + # close the queue if it is a multiprocessing queue + # manager proxy queues do not have close or join_thread method + if isinstance(q, multiprocessing.queues.Queue): + try: + q.close() + q.join_thread() + except Exception: pass From ec6c04e49aaff4c1913c8b7fd3fdccace5978940 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Wed, 25 Jun 2025 07:24:45 -0600 Subject: [PATCH 039/530] Improve logging (#18867) * Ignore numpy get limits warning * Add function wrapper to redirect stdout and stderr to logpipe * Save stderr too * Add more to catch * run logpipe * Use other logging redirect class * Use other logging redirect class * add decorator for redirecting c/c++ level output to logger * fix typing --------- Co-authored-by: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> --- docker/main/Dockerfile | 3 + frigate/data_processing/common/face/model.py | 2 + frigate/detectors/plugins/cpu_tfl.py | 2 + frigate/embeddings/onnx/face_embedding.py | 2 + frigate/events/audio.py | 6 +- frigate/log.py | 190 ++++++++++++++++++- frigate/util/classification.py | 19 +- 7 files changed, 206 insertions(+), 18 deletions(-) diff --git a/docker/main/Dockerfile b/docker/main/Dockerfile index 90e174d10..2f5db433b 100644 --- a/docker/main/Dockerfile +++ b/docker/main/Dockerfile @@ -224,6 +224,9 @@ ENV TRANSFORMERS_NO_ADVISORY_WARNINGS=1 # Set OpenCV ffmpeg loglevel to fatal: https://ffmpeg.org/doxygen/trunk/log_8h.html ENV OPENCV_FFMPEG_LOGLEVEL=8 +# Set NumPy to ignore getlimits warning +ENV PYTHONWARNINGS="ignore:::numpy.core.getlimits" + # Set HailoRT to disable logging ENV HAILORT_LOGGER_PATH=NONE diff --git a/frigate/data_processing/common/face/model.py b/frigate/data_processing/common/face/model.py index aea6751a0..f230a1b2c 100644 --- a/frigate/data_processing/common/face/model.py +++ b/frigate/data_processing/common/face/model.py @@ -11,6 +11,7 @@ from scipy import stats from frigate.config import FrigateConfig from frigate.const import MODEL_CACHE_DIR from frigate.embeddings.onnx.face_embedding import ArcfaceEmbedding, FaceNetEmbedding +from frigate.log import redirect_output_to_logger logger = logging.getLogger(__name__) @@ -37,6 +38,7 @@ class FaceRecognizer(ABC): def classify(self, face_image: np.ndarray) -> tuple[str, float] | None: pass + @redirect_output_to_logger(logger, logging.DEBUG) def init_landmark_detector(self) -> None: landmark_model = os.path.join(MODEL_CACHE_DIR, "facedet/landmarkdet.yaml") diff --git a/frigate/detectors/plugins/cpu_tfl.py b/frigate/detectors/plugins/cpu_tfl.py index fc8db0f4b..37cc10777 100644 --- a/frigate/detectors/plugins/cpu_tfl.py +++ b/frigate/detectors/plugins/cpu_tfl.py @@ -5,6 +5,7 @@ from typing_extensions import Literal from frigate.detectors.detection_api import DetectionApi from frigate.detectors.detector_config import BaseDetectorConfig +from frigate.log import redirect_output_to_logger from ..detector_utils import tflite_detect_raw, tflite_init @@ -27,6 +28,7 @@ class CpuDetectorConfig(BaseDetectorConfig): class CpuTfl(DetectionApi): type_key = DETECTOR_KEY + @redirect_output_to_logger(logger, logging.DEBUG) def __init__(self, detector_config: CpuDetectorConfig): interpreter = Interpreter( model_path=detector_config.model.path, diff --git a/frigate/embeddings/onnx/face_embedding.py b/frigate/embeddings/onnx/face_embedding.py index c0f35a581..acb4507a2 100644 --- a/frigate/embeddings/onnx/face_embedding.py +++ b/frigate/embeddings/onnx/face_embedding.py @@ -6,6 +6,7 @@ import os import numpy as np from frigate.const import MODEL_CACHE_DIR +from frigate.log import redirect_output_to_logger from frigate.util.downloader import ModelDownloader from .base_embedding import BaseEmbedding @@ -53,6 +54,7 @@ class FaceNetEmbedding(BaseEmbedding): self._load_model_and_utils() logger.debug(f"models are already downloaded for {self.model_name}") + @redirect_output_to_logger(logger, logging.DEBUG) def _load_model_and_utils(self): if self.runner is None: if self.downloader: diff --git a/frigate/events/audio.py b/frigate/events/audio.py index 03c750a06..f99e6fe41 100644 --- a/frigate/events/audio.py +++ b/frigate/events/audio.py @@ -37,7 +37,7 @@ from frigate.data_processing.real_time.audio_transcription import ( AudioTranscriptionRealTimeProcessor, ) from frigate.ffmpeg_presets import parse_preset_input -from frigate.log import LogPipe +from frigate.log import LogPipe, redirect_output_to_logger from frigate.object_detection.base import load_labels from frigate.util.builtin import get_ffmpeg_arg_list from frigate.util.process import FrigateProcess @@ -49,6 +49,9 @@ except ModuleNotFoundError: from tensorflow.lite.python.interpreter import Interpreter +logger = logging.getLogger(__name__) + + def get_ffmpeg_command(ffmpeg: FfmpegConfig) -> list[str]: ffmpeg_input: CameraInput = [i for i in ffmpeg.inputs if "audio" in i.roles][0] input_args = get_ffmpeg_arg_list(ffmpeg.global_args) + ( @@ -423,6 +426,7 @@ class AudioEventMaintainer(threading.Thread): class AudioTfl: + @redirect_output_to_logger(logger, logging.DEBUG) def __init__(self, stop_event: threading.Event, num_threads=2): self.stop_event = stop_event self.num_threads = num_threads diff --git a/frigate/log.py b/frigate/log.py index 2e9c781f1..11f2da254 100644 --- a/frigate/log.py +++ b/frigate/log.py @@ -1,15 +1,18 @@ # In log.py import atexit +import io import logging import os import sys import threading from collections import deque +from contextlib import contextmanager from enum import Enum +from functools import wraps from logging.handlers import QueueHandler, QueueListener from multiprocessing.managers import SyncManager -from queue import Queue -from typing import Deque, Optional +from queue import Empty, Queue +from typing import Any, Callable, Deque, Generator, Optional from frigate.util.builtin import clean_camera_user_pass @@ -102,11 +105,11 @@ os.register_at_fork(after_in_child=reopen_std_streams) # based on https://codereview.stackexchange.com/a/17959 class LogPipe(threading.Thread): - def __init__(self, log_name: str): + def __init__(self, log_name: str, level: int = logging.ERROR): """Setup the object with a logger and start the thread""" super().__init__(daemon=False) self.logger = logging.getLogger(log_name) - self.level = logging.ERROR + self.level = level self.deque: Deque[str] = deque(maxlen=100) self.fdRead, self.fdWrite = os.pipe() self.pipeReader = os.fdopen(self.fdRead) @@ -135,3 +138,182 @@ class LogPipe(threading.Thread): def close(self) -> None: """Close the write end of the pipe.""" os.close(self.fdWrite) + + +class LogRedirect(io.StringIO): + """ + A custom file-like object to capture stdout and process it. + It extends io.StringIO to capture output and then processes it + line by line. + """ + + def __init__(self, logger_instance: logging.Logger, level: int): + super().__init__() + self.logger = logger_instance + self.log_level = level + self._line_buffer: list[str] = [] + + def write(self, s: Any) -> int: + if not isinstance(s, str): + s = str(s) + + self._line_buffer.append(s) + + # Process output line by line if a newline is present + if "\n" in s: + full_output = "".join(self._line_buffer) + lines = full_output.splitlines(keepends=True) + self._line_buffer = [] + + for line in lines: + if line.endswith("\n"): + self._process_line(line.rstrip("\n")) + else: + self._line_buffer.append(line) + + return len(s) + + def _process_line(self, line: str) -> None: + self.logger.log(self.log_level, line) + + def flush(self) -> None: + if self._line_buffer: + full_output = "".join(self._line_buffer) + self._line_buffer = [] + if full_output: # Only process if there's content + self._process_line(full_output) + + def __enter__(self) -> "LogRedirect": + """Context manager entry point.""" + return self + + def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: + """Context manager exit point. Ensures buffered content is flushed.""" + self.flush() + + +@contextmanager +def redirect_fd_to_queue(queue: Queue[str]) -> Generator[None, None, None]: + """Redirect file descriptor 1 (stdout) to a pipe and capture output in a queue.""" + stdout_fd = os.dup(1) + read_fd, write_fd = os.pipe() + os.dup2(write_fd, 1) + os.close(write_fd) + + stop_event = threading.Event() + + def reader() -> None: + """Read from pipe and put lines in queue until stop_event is set.""" + try: + with os.fdopen(read_fd, "r") as pipe: + while not stop_event.is_set(): + line = pipe.readline() + if not line: # EOF + break + queue.put(line.strip()) + except OSError as e: + queue.put(f"Reader error: {e}") + finally: + if not stop_event.is_set(): + stop_event.set() + + reader_thread = threading.Thread(target=reader, daemon=False) + reader_thread.start() + + try: + yield + finally: + os.dup2(stdout_fd, 1) + os.close(stdout_fd) + stop_event.set() + reader_thread.join(timeout=1.0) + try: + os.close(read_fd) + except OSError: + pass + + +def redirect_output_to_logger(logger: logging.Logger, level: int) -> Any: + """Decorator to redirect both Python sys.stdout/stderr and C-level stdout to logger.""" + + def decorator(func: Callable) -> Callable: + @wraps(func) + def wrapper(*args: Any, **kwargs: Any) -> Any: + queue: Queue[str] = Queue() + + log_redirect = LogRedirect(logger, level) + old_stdout = sys.stdout + old_stderr = sys.stderr + sys.stdout = log_redirect + sys.stderr = log_redirect + + try: + # Redirect C-level stdout + with redirect_fd_to_queue(queue): + result = func(*args, **kwargs) + finally: + # Restore Python stdout/stderr + sys.stdout = old_stdout + sys.stderr = old_stderr + log_redirect.flush() + + # Log C-level output from queue + while True: + try: + logger.log(level, queue.get_nowait()) + except Empty: + break + + return result + + return wrapper + + return decorator + + +def suppress_os_output(func: Callable) -> Callable: + """ + A decorator that suppresses all output (stdout and stderr) + at the operating system file descriptor level for the decorated function. + This is useful for silencing noisy C/C++ libraries. + Note: This is a Unix-specific solution using os.dup2 and os.pipe. + It temporarily redirects file descriptors 1 (stdout) and 2 (stderr) + to a non-read pipe, effectively discarding their output. + """ + + @wraps(func) + def wrapper(*args: tuple, **kwargs: dict[str, Any]) -> Any: + # Save the original file descriptors for stdout (1) and stderr (2) + original_stdout_fd = os.dup(1) + original_stderr_fd = os.dup(2) + + # Create dummy pipes. We only need the write ends to redirect to. + # The data written to these pipes will be discarded as nothing + # will read from the read ends. + devnull_read_fd, devnull_write_fd = os.pipe() + + try: + # Redirect stdout (FD 1) and stderr (FD 2) to the write end of our dummy pipe + os.dup2(devnull_write_fd, 1) # Redirect stdout to devnull pipe + os.dup2(devnull_write_fd, 2) # Redirect stderr to devnull pipe + + # Execute the original function + result = func(*args, **kwargs) + + finally: + # Restore original stdout and stderr file descriptors (1 and 2) + # This is crucial to ensure normal printing resumes after the decorated function. + os.dup2(original_stdout_fd, 1) + os.dup2(original_stderr_fd, 2) + + # Close all duplicated and pipe file descriptors to prevent resource leaks. + # It's important to close the read end of the dummy pipe too, + # as nothing is explicitly reading from it. + os.close(original_stdout_fd) + os.close(original_stderr_fd) + os.close(devnull_read_fd) + os.close(devnull_write_fd) + + return result + + return wrapper diff --git a/frigate/util/classification.py b/frigate/util/classification.py index a2ba1bf26..3c030a986 100644 --- a/frigate/util/classification.py +++ b/frigate/util/classification.py @@ -1,7 +1,7 @@ """Util for classification models.""" +import logging import os -import sys import cv2 import numpy as np @@ -9,6 +9,7 @@ import numpy as np from frigate.comms.embeddings_updater import EmbeddingsRequestEnum, EmbeddingsRequestor from frigate.comms.inter_process import InterProcessRequestor from frigate.const import CLIPS_DIR, MODEL_CACHE_DIR, UPDATE_MODEL_STATE +from frigate.log import redirect_output_to_logger from frigate.types import ModelStatusTypesEnum from frigate.util.process import FrigateProcess @@ -16,6 +17,8 @@ BATCH_SIZE = 16 EPOCHS = 50 LEARNING_RATE = 0.001 +logger = logging.getLogger(__name__) + def __generate_representative_dataset_factory(dataset_dir: str): def generate_representative_dataset(): @@ -36,6 +39,7 @@ def __generate_representative_dataset_factory(dataset_dir: str): return generate_representative_dataset +@redirect_output_to_logger(logger, logging.DEBUG) def __train_classification_model(model_name: str) -> bool: """Train a classification model.""" @@ -55,14 +59,6 @@ def __train_classification_model(model_name: str) -> bool: ] ) - # TF and Keras are very loud with logging - # we want to avoid these logs so we - # temporarily redirect stdout / stderr - original_stdout = sys.stdout - original_stderr = sys.stderr - sys.stdout = open(os.devnull, "w") - sys.stderr = open(os.devnull, "w") - # Start with imagenet base model with 35% of channels in each layer base_model = MobileNetV2( input_shape=(224, 224, 3), @@ -124,10 +120,6 @@ def __train_classification_model(model_name: str) -> bool: with open(os.path.join(model_dir, "model.tflite"), "wb") as f: f.write(tflite_model) - # restore original stdout / stderr - sys.stdout = original_stdout - sys.stderr = original_stderr - @staticmethod def kickoff_model_training( @@ -146,6 +138,7 @@ def kickoff_model_training( # tensorflow will free CPU / GPU memory # upon training completion training_process = FrigateProcess( + None, target=__train_classification_model, name=f"model_training:{model_name}", args=(model_name,), From cf62bee1703495d324b30525acdcd8204179ff58 Mon Sep 17 00:00:00 2001 From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> Date: Wed, 25 Jun 2025 16:45:36 -0500 Subject: [PATCH 040/530] Add ONVIF focus support (#18883) * backend * frontend and i18n --- frigate/ptz/onvif.py | 75 ++++++++++++++++++++++++--- web/public/locales/en/views/live.json | 8 +++ web/src/types/ptz.ts | 9 +++- web/src/views/live/LiveCameraView.tsx | 40 +++++++++++++- 4 files changed, 123 insertions(+), 9 deletions(-) diff --git a/frigate/ptz/onvif.py b/frigate/ptz/onvif.py index 81c8b9852..bd5bef0b0 100644 --- a/frigate/ptz/onvif.py +++ b/frigate/ptz/onvif.py @@ -33,6 +33,8 @@ class OnvifCommandEnum(str, Enum): stop = "stop" zoom_in = "zoom_in" zoom_out = "zoom_out" + focus_in = "focus_in" + focus_out = "focus_out" class OnvifController: @@ -185,6 +187,16 @@ class OnvifController: ptz: ONVIFService = await onvif.create_ptz_service() self.cams[camera_name]["ptz"] = ptz + imaging: ONVIFService = await onvif.create_imaging_service() + self.cams[camera_name]["imaging"] = imaging + try: + video_sources = await media.GetVideoSources() + if video_sources and len(video_sources) > 0: + self.cams[camera_name]["video_source_token"] = video_sources[0].token + except (Fault, ONVIFError, TransportError, Exception) as e: + logger.debug(f"Unable to get video sources for {camera_name}: {e}") + self.cams[camera_name]["video_source_token"] = None + # setup continuous moving request move_request = ptz.create_type("ContinuousMove") move_request.ProfileToken = profile.token @@ -366,7 +378,19 @@ class OnvifController: f"Disabling autotracking zooming for {camera_name}: Absolute zoom not supported. Exception: {e}" ) - # set relative pan/tilt space for autotracker + if self.cams[camera_name]["video_source_token"] is not None: + try: + imaging_capabilities = await imaging.GetImagingSettings( + {"VideoSourceToken": self.cams[camera_name]["video_source_token"]} + ) + if ( + hasattr(imaging_capabilities, "Focus") + and imaging_capabilities.Focus + ): + supported_features.append("focus") + except (Fault, ONVIFError, TransportError, Exception) as e: + logger.debug(f"Focus not supported for {camera_name}: {e}") + if ( self.config.cameras[camera_name].onvif.autotracking.enabled_in_config and self.config.cameras[camera_name].onvif.autotracking.enabled @@ -391,6 +415,18 @@ class OnvifController: "Zoom": True, } ) + if ( + "focus" in self.cams[camera_name]["features"] + and self.cams[camera_name]["video_source_token"] + ): + try: + stop_request = self.cams[camera_name]["imaging"].create_type("Stop") + stop_request.VideoSourceToken = self.cams[camera_name][ + "video_source_token" + ] + await self.cams[camera_name]["imaging"].Stop(stop_request) + except (Fault, ONVIFError, TransportError, Exception) as e: + logger.warning(f"Failed to stop focus for {camera_name}: {e}") self.cams[camera_name]["active"] = False async def _move(self, camera_name: str, command: OnvifCommandEnum) -> None: @@ -599,6 +635,35 @@ class OnvifController: self.cams[camera_name]["active"] = False + async def _focus(self, camera_name: str, command: OnvifCommandEnum) -> None: + if self.cams[camera_name]["active"]: + logger.warning( + f"{camera_name} is already performing an action, not moving..." + ) + await self._stop(camera_name) + + if ( + "focus" not in self.cams[camera_name]["features"] + or not self.cams[camera_name]["video_source_token"] + ): + logger.error(f"{camera_name} does not support ONVIF continuous focus.") + return + + self.cams[camera_name]["active"] = True + move_request = self.cams[camera_name]["imaging"].create_type("Move") + move_request.VideoSourceToken = self.cams[camera_name]["video_source_token"] + move_request.Focus = { + "Continuous": { + "Speed": 0.5 if command == OnvifCommandEnum.focus_in else -0.5 + } + } + + try: + await self.cams[camera_name]["imaging"].Move(move_request) + except (Fault, ONVIFError, TransportError, Exception) as e: + logger.warning(f"Onvif sending focus request to {camera_name} failed: {e}") + self.cams[camera_name]["active"] = False + async def handle_command_async( self, camera_name: str, command: OnvifCommandEnum, param: str = "" ) -> None: @@ -622,11 +687,10 @@ class OnvifController: elif command == OnvifCommandEnum.move_relative: _, pan, tilt = param.split("_") await self._move_relative(camera_name, float(pan), float(tilt), 0, 1) - elif ( - command == OnvifCommandEnum.zoom_in - or command == OnvifCommandEnum.zoom_out - ): + elif command in (OnvifCommandEnum.zoom_in, OnvifCommandEnum.zoom_out): await self._zoom(camera_name, command) + elif command in (OnvifCommandEnum.focus_in, OnvifCommandEnum.focus_out): + await self._focus(camera_name, command) else: await self._move(camera_name, command) except (Fault, ONVIFError, TransportError, Exception) as e: @@ -637,7 +701,6 @@ class OnvifController: ) -> None: """ Handle ONVIF commands by scheduling them in the event loop. - This is the synchronous interface that schedules async work. """ future = asyncio.run_coroutine_threadsafe( self.handle_command_async(camera_name, command, param), self.loop diff --git a/web/public/locales/en/views/live.json b/web/public/locales/en/views/live.json index fea120601..2af399296 100644 --- a/web/public/locales/en/views/live.json +++ b/web/public/locales/en/views/live.json @@ -38,6 +38,14 @@ "label": "Zoom PTZ camera out" } }, + "focus": { + "in": { + "label": "Focus PTZ camera in" + }, + "out": { + "label": "Focus PTZ camera out" + } + }, "frame": { "center": { "label": "Click in the frame to center the PTZ camera" diff --git a/web/src/types/ptz.ts b/web/src/types/ptz.ts index 1a626972e..21a300b3d 100644 --- a/web/src/types/ptz.ts +++ b/web/src/types/ptz.ts @@ -1,4 +1,11 @@ -type PtzFeature = "pt" | "zoom" | "pt-r" | "zoom-r" | "zoom-a" | "pt-r-fov"; +type PtzFeature = + | "pt" + | "zoom" + | "pt-r" + | "zoom-r" + | "zoom-a" + | "pt-r-fov" + | "focus"; export type CameraPtzInfo = { name: string; diff --git a/web/src/views/live/LiveCameraView.tsx b/web/src/views/live/LiveCameraView.tsx index 9e9e0e974..69d4a26f4 100644 --- a/web/src/views/live/LiveCameraView.tsx +++ b/web/src/views/live/LiveCameraView.tsx @@ -92,6 +92,8 @@ import { LuX, } from "react-icons/lu"; import { + MdCenterFocusStrong, + MdCenterFocusWeak, MdClosedCaption, MdClosedCaptionDisabled, MdNoPhotography, @@ -809,10 +811,10 @@ function PtzControlPanel({ sendPtz("MOVE_DOWN"); break; case "+": - sendPtz("ZOOM_IN"); + sendPtz(modifiers.shift ? "FOCUS_IN" : "ZOOM_IN"); break; case "-": - sendPtz("ZOOM_OUT"); + sendPtz(modifiers.shift ? "FOCUS_OUT" : "ZOOM_OUT"); break; } }, @@ -923,6 +925,40 @@ function PtzControlPanel({ )} + {ptz?.features?.includes("focus") && ( + <> + { + e.preventDefault(); + sendPtz("FOCUS_IN"); + }} + onTouchStart={(e) => { + e.preventDefault(); + sendPtz("FOCUS_IN"); + }} + onMouseUp={onStop} + onTouchEnd={onStop} + > + + + { + e.preventDefault(); + sendPtz("FOCUS_OUT"); + }} + onTouchStart={(e) => { + e.preventDefault(); + sendPtz("FOCUS_OUT"); + }} + onMouseUp={onStop} + onTouchEnd={onStop} + > + + + + )} {ptz?.features?.includes("pt-r-fov") && ( From ceeb6543f53d735a8c890ba58e3e7a47ca3d054e Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Thu, 26 Jun 2025 07:32:48 -0600 Subject: [PATCH 041/530] 0.17 tweaks (#18892) * Set version * Cleanup more logs * Don't log matplotlib --- Makefile | 2 +- frigate/data_processing/real_time/bird.py | 2 ++ frigate/log.py | 5 +++-- 3 files changed, 6 insertions(+), 3 deletions(-) diff --git a/Makefile b/Makefile index e414ed65c..1c4e137a1 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ default_target: local COMMIT_HASH := $(shell git log -1 --pretty=format:"%h"|tail -1) -VERSION = 0.16.1 +VERSION = 0.17.0 IMAGE_REPO ?= ghcr.io/blakeblackshear/frigate GITHUB_REF_NAME ?= $(shell git rev-parse --abbrev-ref HEAD) BOARDS= #Initialized empty diff --git a/frigate/data_processing/real_time/bird.py b/frigate/data_processing/real_time/bird.py index 8d2c598fc..a51c7a7e8 100644 --- a/frigate/data_processing/real_time/bird.py +++ b/frigate/data_processing/real_time/bird.py @@ -13,6 +13,7 @@ from frigate.comms.event_metadata_updater import ( ) from frigate.config import FrigateConfig from frigate.const import MODEL_CACHE_DIR +from frigate.log import redirect_output_to_logger from frigate.util.object import calculate_region from ..types import DataProcessorMetrics @@ -76,6 +77,7 @@ class BirdRealTimeProcessor(RealTimeProcessorApi): except Exception as e: logger.error(f"Failed to download {path}: {e}") + @redirect_output_to_logger(logger, logging.DEBUG) def __build_detector(self) -> None: self.interpreter = Interpreter( model_path=os.path.join(MODEL_CACHE_DIR, "bird/bird.tflite"), diff --git a/frigate/log.py b/frigate/log.py index 11f2da254..f2171ffe0 100644 --- a/frigate/log.py +++ b/frigate/log.py @@ -80,6 +80,7 @@ def apply_log_levels(default: str, log_levels: dict[str, LogLevel]) -> None: log_levels = { "absl": LogLevel.error, "httpx": LogLevel.error, + "matplotlib": LogLevel.error, "tensorflow": LogLevel.error, "werkzeug": LogLevel.error, "ws4py": LogLevel.error, @@ -193,7 +194,7 @@ class LogRedirect(io.StringIO): @contextmanager -def redirect_fd_to_queue(queue: Queue[str]) -> Generator[None, None, None]: +def __redirect_fd_to_queue(queue: Queue[str]) -> Generator[None, None, None]: """Redirect file descriptor 1 (stdout) to a pipe and capture output in a queue.""" stdout_fd = os.dup(1) read_fd, write_fd = os.pipe() @@ -249,7 +250,7 @@ def redirect_output_to_logger(logger: logging.Logger, level: int) -> Any: try: # Redirect C-level stdout - with redirect_fd_to_queue(queue): + with __redirect_fd_to_queue(queue): result = func(*args, **kwargs) finally: # Restore Python stdout/stderr From 13fb7bc26092ddc726a64944f24dc662f3423d71 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Fri, 27 Jun 2025 06:28:40 -0600 Subject: [PATCH 042/530] Improve object classification (#18908) * Ui improvements * Improve image cropping and model saving * Improve naming * Add logs for training * Improve model labeling * Don't set sub label for none object classification * Cleanup --- .../real_time/custom_classification.py | 28 +++++++++++-------- frigate/util/classification.py | 1 + .../overlay/ClassificationSelectionDialog.tsx | 10 +++---- .../classification/ModelTrainingView.tsx | 22 ++++++++------- 4 files changed, 35 insertions(+), 26 deletions(-) diff --git a/frigate/data_processing/real_time/custom_classification.py b/frigate/data_processing/real_time/custom_classification.py index f153b5b92..fb1d31e89 100644 --- a/frigate/data_processing/real_time/custom_classification.py +++ b/frigate/data_processing/real_time/custom_classification.py @@ -187,7 +187,7 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi): super().__init__(config, metrics) self.model_config = model_config self.model_dir = os.path.join(MODEL_CACHE_DIR, self.model_config.name) - self.train_dir = os.path.join(self.model_dir, "train") + self.train_dir = os.path.join(CLIPS_DIR, self.model_config.name, "train") self.interpreter: Interpreter = None self.sub_label_publisher = sub_label_publisher self.tensor_input_details: dict[str, Any] = None @@ -232,20 +232,23 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi): obj_data["box"][1], obj_data["box"][2], obj_data["box"][3], - 224, + max( + obj_data["box"][1] - obj_data["box"][0], + obj_data["box"][3] - obj_data["box"][2], + ), 1.0, ) rgb = cv2.cvtColor(frame, cv2.COLOR_YUV2RGB_I420) - input = rgb[ + crop = rgb[ y:y2, x:x2, ] - if input.shape != (224, 224): - input = cv2.resize(input, (224, 224)) + if crop.shape != (224, 224): + crop = cv2.resize(crop, (224, 224)) - input = np.expand_dims(input, axis=0) + input = np.expand_dims(crop, axis=0) self.interpreter.set_tensor(self.tensor_input_details[0]["index"], input) self.interpreter.invoke() res: np.ndarray = self.interpreter.get_tensor( @@ -259,7 +262,7 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi): write_classification_attempt( self.train_dir, - cv2.cvtColor(frame, cv2.COLOR_RGB2BGR), + cv2.cvtColor(crop, cv2.COLOR_RGB2BGR), now, self.labelmap[best_id], score, @@ -269,12 +272,15 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi): logger.debug(f"Score {score} is worse than previous score {previous_score}") return - self.sub_label_publisher.publish( - EventMetadataTypeEnum.sub_label, - (obj_data["id"], self.labelmap[best_id], score), - ) + sub_label = self.labelmap[best_id] self.detected_objects[obj_data["id"]] = score + if sub_label != "none": + self.sub_label_publisher.publish( + EventMetadataTypeEnum.sub_label, + (obj_data["id"], sub_label, score), + ) + def handle_request(self, topic, request_data): if topic == EmbeddingsRequestEnum.reload_classification_model.value: if request_data.get("model_name") == self.model_config.name: diff --git a/frigate/util/classification.py b/frigate/util/classification.py index 3c030a986..6eab829f2 100644 --- a/frigate/util/classification.py +++ b/frigate/util/classification.py @@ -49,6 +49,7 @@ def __train_classification_model(model_name: str) -> bool: from tensorflow.keras.applications import MobileNetV2 from tensorflow.keras.preprocessing.image import ImageDataGenerator + logger.info(f"Kicking off classification training for {model_name}.") dataset_dir = os.path.join(CLIPS_DIR, model_name, "dataset") model_dir = os.path.join(MODEL_CACHE_DIR, model_name) num_classes = len( diff --git a/web/src/components/overlay/ClassificationSelectionDialog.tsx b/web/src/components/overlay/ClassificationSelectionDialog.tsx index 7cb8ca156..f86ced19a 100644 --- a/web/src/components/overlay/ClassificationSelectionDialog.tsx +++ b/web/src/components/overlay/ClassificationSelectionDialog.tsx @@ -82,7 +82,7 @@ export default function ClassificationSelectionDialog({ ); // control - const [newFace, setNewFace] = useState(false); + const [newClass, setNewClass] = useState(false); // components const Selector = isDesktop ? DropdownMenu : Drawer; @@ -98,10 +98,10 @@ export default function ClassificationSelectionDialog({ return (
- {newFace && ( + {newClass && ( onCategorizeImage(newCat)} /> @@ -130,7 +130,7 @@ export default function ClassificationSelectionDialog({ > setNewFace(true)} + onClick={() => setNewClass(true)} > {t("createCategory.new")} @@ -142,7 +142,7 @@ export default function ClassificationSelectionDialog({ onClick={() => onCategorizeImage(category)} > - {category} + {category.replaceAll("_", " ")} ))}
diff --git a/web/src/views/classification/ModelTrainingView.tsx b/web/src/views/classification/ModelTrainingView.tsx index 1f62a4f53..ea265bd51 100644 --- a/web/src/views/classification/ModelTrainingView.tsx +++ b/web/src/views/classification/ModelTrainingView.tsx @@ -375,7 +375,7 @@ function LibrarySelector({ }: LibrarySelectorProps) { const { t } = useTranslation(["views/classificationModel"]); const [confirmDelete, setConfirmDelete] = useState(null); - const [renameFace, setRenameFace] = useState(null); + const [renameClass, setRenameFace] = useState(null); const handleDeleteFace = useCallback( (name: string) => { @@ -390,9 +390,9 @@ function LibrarySelector({ const handleSetOpen = useCallback( (open: boolean) => { - setRenameFace(open ? renameFace : null); + setRenameFace(open ? renameClass : null); }, - [renameFace], + [renameClass], ); return ( @@ -428,15 +428,15 @@ function LibrarySelector({ { - onRename(renameFace!, newName); + onRename(renameClass!, newName); setRenameFace(null); }} - defaultValue={renameFace || ""} + defaultValue={renameClass || ""} regexPattern={/^[\p{L}\p{N}\s'_-]{1,50}$/u} regexErrorMessage={t("description.invalidName")} /> @@ -484,10 +484,10 @@ function LibrarySelector({ className="group flex items-center justify-between" >
setPageToggle(id)} > - {id} + {id.replaceAll("_", " ")} ({dataset?.[id].length}) @@ -681,7 +681,9 @@ function TrainGrid({
-
{data.label}
+
+ {data.label.replaceAll("_", " ")} +
{data.score}%
From f925154b8a63a7fda1d828594c7db0e79e9f8e89 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Fri, 27 Jun 2025 06:54:02 -0600 Subject: [PATCH 043/530] Remove TFLite init logs --- frigate/data_processing/real_time/custom_classification.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/frigate/data_processing/real_time/custom_classification.py b/frigate/data_processing/real_time/custom_classification.py index fb1d31e89..1e2b91a2d 100644 --- a/frigate/data_processing/real_time/custom_classification.py +++ b/frigate/data_processing/real_time/custom_classification.py @@ -17,6 +17,7 @@ from frigate.comms.inter_process import InterProcessRequestor from frigate.config import FrigateConfig from frigate.config.classification import CustomClassificationConfig from frigate.const import CLIPS_DIR, MODEL_CACHE_DIR +from frigate.log import redirect_output_to_logger from frigate.util.builtin import EventsPerSecond, InferenceSpeed, load_labels from frigate.util.object import box_overlaps, calculate_region @@ -55,6 +56,7 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi): self.last_run = datetime.datetime.now().timestamp() self.__build_detector() + @redirect_output_to_logger(logger, logging.DEBUG) def __build_detector(self) -> None: self.interpreter = Interpreter( model_path=os.path.join(self.model_dir, "model.tflite"), @@ -200,6 +202,7 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi): ) self.__build_detector() + @redirect_output_to_logger(logger, logging.DEBUG) def __build_detector(self) -> None: self.interpreter = Interpreter( model_path=os.path.join(self.model_dir, "model.tflite"), From 528f0d2b1f61a80700537a56e739b11a8506ac25 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Fri, 27 Jun 2025 08:35:02 -0600 Subject: [PATCH 044/530] Improve classification UI (#18910) * Move threhsold to base model config * Improve score handling * Add back button --- frigate/config/classification.py | 6 +-- .../real_time/custom_classification.py | 6 ++- web/src/types/frigateConfig.ts | 2 +- .../classification/ModelTrainingView.tsx | 48 ++++++++++++++----- 4 files changed, 46 insertions(+), 16 deletions(-) diff --git a/frigate/config/classification.py b/frigate/config/classification.py index 40a1183cd..c48ca489c 100644 --- a/frigate/config/classification.py +++ b/frigate/config/classification.py @@ -59,9 +59,6 @@ class CustomClassificationStateCameraConfig(FrigateBaseModel): crop: list[int, int, int, int] = Field( title="Crop of image frame on this camera to run classification on." ) - threshold: float = Field( - default=0.8, title="Classification score threshold to change the state." - ) class CustomClassificationStateConfig(FrigateBaseModel): @@ -86,6 +83,9 @@ class CustomClassificationObjectConfig(FrigateBaseModel): class CustomClassificationConfig(FrigateBaseModel): enabled: bool = Field(default=True, title="Enable running the model.") name: str | None = Field(default=None, title="Name of classification model.") + threshold: float = Field( + default=0.8, title="Classification score threshold to change the state." + ) object_config: CustomClassificationObjectConfig | None = Field(default=None) state_config: CustomClassificationStateConfig | None = Field(default=None) diff --git a/frigate/data_processing/real_time/custom_classification.py b/frigate/data_processing/real_time/custom_classification.py index 1e2b91a2d..05a555701 100644 --- a/frigate/data_processing/real_time/custom_classification.py +++ b/frigate/data_processing/real_time/custom_classification.py @@ -152,7 +152,7 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi): score, ) - if score >= camera_config.threshold: + if score >= self.model_config.threshold: self.requestor.send_data( f"{camera}/classification/{self.model_config.name}", self.labelmap[best_id], @@ -271,6 +271,10 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi): score, ) + if score < self.model_config.threshold: + logger.debug(f"Score {score} is less than threshold.") + return + if score <= previous_score: logger.debug(f"Score {score} is worse than previous score {previous_score}") return diff --git a/web/src/types/frigateConfig.ts b/web/src/types/frigateConfig.ts index 3ccc5b06d..7d4c27794 100644 --- a/web/src/types/frigateConfig.ts +++ b/web/src/types/frigateConfig.ts @@ -282,6 +282,7 @@ export type CameraStreamingSettings = { export type CustomClassificationModelConfig = { enabled: boolean; name: string; + threshold: number; object_config: null | { objects: string[]; }; @@ -289,7 +290,6 @@ export type CustomClassificationModelConfig = { cameras: { [cameraName: string]: { crop: [number, number, number, number]; - threshold: number; }; }; motion: boolean; diff --git a/web/src/views/classification/ModelTrainingView.tsx b/web/src/views/classification/ModelTrainingView.tsx index ea265bd51..14de1a118 100644 --- a/web/src/views/classification/ModelTrainingView.tsx +++ b/web/src/views/classification/ModelTrainingView.tsx @@ -48,12 +48,15 @@ import { TbCategoryPlus } from "react-icons/tb"; import { useModelState } from "@/api/ws"; import { ModelState } from "@/types/ws"; import ActivityIndicator from "@/components/indicators/activity-indicator"; +import { useNavigate } from "react-router-dom"; +import { IoMdArrowRoundBack } from "react-icons/io"; type ModelTrainingViewProps = { model: CustomClassificationModelConfig; }; export default function ModelTrainingView({ model }: ModelTrainingViewProps) { const { t } = useTranslation(["views/classificationModel"]); + const navigate = useNavigate(); const [page, setPage] = useState("train"); const [pageToggle, setPageToggle] = useOptimisticState(page, setPage, 100); @@ -294,14 +297,28 @@ export default function ModelTrainingView({ model }: ModelTrainingViewProps) {
- {}} - /> +
+ + {}} + /> +
{selectedImages?.length > 0 ? (
@@ -640,15 +657,17 @@ function TrainGrid({ trainImages .map((raw) => { const parts = raw.replaceAll(".webp", "").split("-"); + const rawScore = Number.parseFloat(parts[2]); return { raw, timestamp: parts[0], label: parts[1], - score: Number.parseFloat(parts[2]) * 100, + score: rawScore * 100, + truePositive: rawScore >= model.threshold, }; }) .sort((a, b) => b.timestamp.localeCompare(a.timestamp)), - [trainImages], + [model, trainImages], ); return ( @@ -684,7 +703,14 @@ function TrainGrid({
{data.label.replaceAll("_", " ")}
-
{data.score}%
+
+ {data.score}% +
Date: Mon, 7 Jul 2025 07:36:06 -0600 Subject: [PATCH 045/530] Classification improvements (#19020) * Move classification training to full process * Sort class images --- frigate/util/classification.py | 190 +++++++++--------- .../classification/ModelTrainingView.tsx | 7 +- 2 files changed, 104 insertions(+), 93 deletions(-) diff --git a/frigate/util/classification.py b/frigate/util/classification.py index 6eab829f2..533c1345a 100644 --- a/frigate/util/classification.py +++ b/frigate/util/classification.py @@ -20,106 +20,117 @@ LEARNING_RATE = 0.001 logger = logging.getLogger(__name__) -def __generate_representative_dataset_factory(dataset_dir: str): - def generate_representative_dataset(): - image_paths = [] - for root, dirs, files in os.walk(dataset_dir): - for file in files: - if file.lower().endswith((".jpg", ".jpeg", ".png")): - image_paths.append(os.path.join(root, file)) +class ClassificationTrainingProcess(FrigateProcess): + def __init__(self, model_name: str) -> None: + super().__init__( + stop_event=None, + name=f"model_training:{model_name}", + ) + self.model_name = model_name - for path in image_paths[:300]: - img = cv2.imread(path) - img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) - img = cv2.resize(img, (224, 224)) - img_array = np.array(img, dtype=np.float32) / 255.0 - img_array = img_array[None, ...] - yield [img_array] + def run(self) -> None: + self.pre_run_setup() + self.__train_classification_model() - return generate_representative_dataset + def __generate_representative_dataset_factory(self, dataset_dir: str): + def generate_representative_dataset(): + image_paths = [] + for root, dirs, files in os.walk(dataset_dir): + for file in files: + if file.lower().endswith((".jpg", ".jpeg", ".png")): + image_paths.append(os.path.join(root, file)) + for path in image_paths[:300]: + img = cv2.imread(path) + img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) + img = cv2.resize(img, (224, 224)) + img_array = np.array(img, dtype=np.float32) / 255.0 + img_array = img_array[None, ...] + yield [img_array] -@redirect_output_to_logger(logger, logging.DEBUG) -def __train_classification_model(model_name: str) -> bool: - """Train a classification model.""" + return generate_representative_dataset - # import in the function so that tensorflow is not initialized multiple times - import tensorflow as tf - from tensorflow.keras import layers, models, optimizers - from tensorflow.keras.applications import MobileNetV2 - from tensorflow.keras.preprocessing.image import ImageDataGenerator + @redirect_output_to_logger(logger, logging.DEBUG) + def __train_classification_model(self) -> bool: + """Train a classification model.""" - logger.info(f"Kicking off classification training for {model_name}.") - dataset_dir = os.path.join(CLIPS_DIR, model_name, "dataset") - model_dir = os.path.join(MODEL_CACHE_DIR, model_name) - num_classes = len( - [ - d - for d in os.listdir(dataset_dir) - if os.path.isdir(os.path.join(dataset_dir, d)) - ] - ) + # import in the function so that tensorflow is not initialized multiple times + import tensorflow as tf + from tensorflow.keras import layers, models, optimizers + from tensorflow.keras.applications import MobileNetV2 + from tensorflow.keras.preprocessing.image import ImageDataGenerator - # Start with imagenet base model with 35% of channels in each layer - base_model = MobileNetV2( - input_shape=(224, 224, 3), - include_top=False, - weights="imagenet", - alpha=0.35, - ) - base_model.trainable = False # Freeze pre-trained layers + logger.info(f"Kicking off classification training for {self.model_name}.") + dataset_dir = os.path.join(CLIPS_DIR, self.model_name, "dataset") + model_dir = os.path.join(MODEL_CACHE_DIR, self.model_name) + num_classes = len( + [ + d + for d in os.listdir(dataset_dir) + if os.path.isdir(os.path.join(dataset_dir, d)) + ] + ) - model = models.Sequential( - [ - base_model, - layers.GlobalAveragePooling2D(), - layers.Dense(128, activation="relu"), - layers.Dropout(0.3), - layers.Dense(num_classes, activation="softmax"), - ] - ) + # Start with imagenet base model with 35% of channels in each layer + base_model = MobileNetV2( + input_shape=(224, 224, 3), + include_top=False, + weights="imagenet", + alpha=0.35, + ) + base_model.trainable = False # Freeze pre-trained layers - model.compile( - optimizer=optimizers.Adam(learning_rate=LEARNING_RATE), - loss="categorical_crossentropy", - metrics=["accuracy"], - ) + model = models.Sequential( + [ + base_model, + layers.GlobalAveragePooling2D(), + layers.Dense(128, activation="relu"), + layers.Dropout(0.3), + layers.Dense(num_classes, activation="softmax"), + ] + ) - # create training set - datagen = ImageDataGenerator(rescale=1.0 / 255, validation_split=0.2) - train_gen = datagen.flow_from_directory( - dataset_dir, - target_size=(224, 224), - batch_size=BATCH_SIZE, - class_mode="categorical", - subset="training", - ) + model.compile( + optimizer=optimizers.Adam(learning_rate=LEARNING_RATE), + loss="categorical_crossentropy", + metrics=["accuracy"], + ) - # write labelmap - class_indices = train_gen.class_indices - index_to_class = {v: k for k, v in class_indices.items()} - sorted_classes = [index_to_class[i] for i in range(len(index_to_class))] - with open(os.path.join(model_dir, "labelmap.txt"), "w") as f: - for class_name in sorted_classes: - f.write(f"{class_name}\n") + # create training set + datagen = ImageDataGenerator(rescale=1.0 / 255, validation_split=0.2) + train_gen = datagen.flow_from_directory( + dataset_dir, + target_size=(224, 224), + batch_size=BATCH_SIZE, + class_mode="categorical", + subset="training", + ) - # train the model - model.fit(train_gen, epochs=EPOCHS, verbose=0) + # write labelmap + class_indices = train_gen.class_indices + index_to_class = {v: k for k, v in class_indices.items()} + sorted_classes = [index_to_class[i] for i in range(len(index_to_class))] + with open(os.path.join(model_dir, "labelmap.txt"), "w") as f: + for class_name in sorted_classes: + f.write(f"{class_name}\n") - # convert model to tflite - converter = tf.lite.TFLiteConverter.from_keras_model(model) - converter.optimizations = [tf.lite.Optimize.DEFAULT] - converter.representative_dataset = __generate_representative_dataset_factory( - dataset_dir - ) - converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8] - converter.inference_input_type = tf.uint8 - converter.inference_output_type = tf.uint8 - tflite_model = converter.convert() + # train the model + model.fit(train_gen, epochs=EPOCHS, verbose=0) - # write model - with open(os.path.join(model_dir, "model.tflite"), "wb") as f: - f.write(tflite_model) + # convert model to tflite + converter = tf.lite.TFLiteConverter.from_keras_model(model) + converter.optimizations = [tf.lite.Optimize.DEFAULT] + converter.representative_dataset = ( + self.__generate_representative_dataset_factory(dataset_dir) + ) + converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8] + converter.inference_input_type = tf.uint8 + converter.inference_output_type = tf.uint8 + tflite_model = converter.convert() + + # write model + with open(os.path.join(model_dir, "model.tflite"), "wb") as f: + f.write(tflite_model) @staticmethod @@ -138,12 +149,7 @@ def kickoff_model_training( # run training in sub process so that # tensorflow will free CPU / GPU memory # upon training completion - training_process = FrigateProcess( - None, - target=__train_classification_model, - name=f"model_training:{model_name}", - args=(model_name,), - ) + training_process = ClassificationTrainingProcess(model_name) training_process.start() training_process.join() diff --git a/web/src/views/classification/ModelTrainingView.tsx b/web/src/views/classification/ModelTrainingView.tsx index 14de1a118..145004ec3 100644 --- a/web/src/views/classification/ModelTrainingView.tsx +++ b/web/src/views/classification/ModelTrainingView.tsx @@ -577,9 +577,14 @@ function DatasetGrid({ }: DatasetGridProps) { const { t } = useTranslation(["views/classificationModel"]); + const classData = useMemo( + () => images.sort((a, b) => a.localeCompare(b)), + [images], + ); + return (
- {images.map((image) => ( + {classData.map((image) => (
Date: Mon, 7 Jul 2025 09:03:57 -0500 Subject: [PATCH 046/530] Semantic Search Triggers (#18969) * semantic trigger test * database and model * config * embeddings maintainer and trigger post-processor * api to create, edit, delete triggers * frontend and i18n keys * use thumbnail and description for trigger types * image picker tweaks * initial sync * thumbnail file management * clean up logs and use saved thumbnail on frontend * publish mqtt messages * webpush changes to enable trigger notifications * add enabled switch * add triggers from explore * renaming and deletion fixes * fix typing * UI updates and add last triggering event time and link * log exception instead of return in endpoint * highlight entry in UI when triggered * save and delete thumbnails directly * remove alert action for now and add descriptions * tweaks * clean up * fix types * docs * docs tweaks * docs * reuse enum --- docs/docs/configuration/reference.md | 17 + docs/docs/configuration/semantic_search.md | 38 ++ docs/docs/integrations/mqtt.md | 14 + frigate/api/defs/request/events_body.py | 8 + frigate/api/event.py | 434 ++++++++++++- frigate/app.py | 6 + frigate/comms/webpush.py | 100 ++- frigate/config/camera/camera.py | 5 + frigate/config/camera/updater.py | 3 + frigate/config/classification.py | 36 ++ frigate/const.py | 1 + .../data_processing/post/semantic_trigger.py | 233 +++++++ frigate/embeddings/__init__.py | 12 + frigate/embeddings/embeddings.py | 238 ++++++- frigate/embeddings/maintainer.py | 102 ++- frigate/models.py | 17 + frigate/util/builtin.py | 16 + migrations/031_create_trigger_table.py | 50 ++ web/public/locales/en/components/dialog.json | 7 + web/public/locales/en/views/explore.json | 4 + web/public/locales/en/views/settings.json | 95 +++ web/src/api/ws.tsx | 11 + .../components/card/SearchThumbnailFooter.tsx | 3 + .../components/menu/SearchResultActions.tsx | 13 + .../overlay/CreateTriggerDialog.tsx | 416 ++++++++++++ .../overlay/DeleteTriggerDialog.tsx | 80 +++ .../components/overlay/DeleteUserDialog.tsx | 2 +- web/src/components/overlay/ImagePicker.tsx | 172 +++++ web/src/pages/FaceLibrary.tsx | 1 + web/src/pages/Settings.tsx | 17 +- web/src/types/frigateConfig.ts | 12 + web/src/types/trigger.ts | 11 + web/src/types/ws.ts | 8 + .../classification/ModelTrainingView.tsx | 1 + web/src/views/explore/ExploreView.tsx | 8 + web/src/views/search/SearchView.tsx | 12 + web/src/views/settings/TriggerView.tsx | 595 ++++++++++++++++++ 37 files changed, 2736 insertions(+), 62 deletions(-) create mode 100644 frigate/data_processing/post/semantic_trigger.py create mode 100644 migrations/031_create_trigger_table.py create mode 100644 web/src/components/overlay/CreateTriggerDialog.tsx create mode 100644 web/src/components/overlay/DeleteTriggerDialog.tsx create mode 100644 web/src/components/overlay/ImagePicker.tsx create mode 100644 web/src/types/trigger.ts create mode 100644 web/src/views/settings/TriggerView.tsx diff --git a/docs/docs/configuration/reference.md b/docs/docs/configuration/reference.md index 4be10000d..43084db4a 100644 --- a/docs/docs/configuration/reference.md +++ b/docs/docs/configuration/reference.md @@ -840,6 +840,23 @@ cameras: # By default the cameras are sorted alphabetically. order: 0 + # Optional: Configuration for triggers to automate actions based on semantic search results. + triggers: + # Required: Unique identifier for the trigger (generated automatically from nickname if not specified). + trigger_name: + # Required: Enable or disable the trigger. (default: shown below) + enabled: true + # Type of trigger, either `thumbnail` for image-based matching or `description` for text-based matching. (default: none) + type: thumbnail + # Reference data for matching, either an event ID for `thumbnail` or a text string for `description`. (default: none) + data: 1751565549.853251-b69j73 + # Similarity threshold for triggering. (default: none) + threshold: 0.7 + # List of actions to perform when the trigger fires. (default: none) + # Available options: `notification` (send a webpush notification) + actions: + - notification + # Optional: Configuration for AI generated tracked object descriptions genai: # Optional: Enable AI description generation (default: shown below) diff --git a/docs/docs/configuration/semantic_search.md b/docs/docs/configuration/semantic_search.md index d9fcb5006..4d2a849c4 100644 --- a/docs/docs/configuration/semantic_search.md +++ b/docs/docs/configuration/semantic_search.md @@ -102,3 +102,41 @@ See the [Hardware Accelerated Enrichments](/configuration/hardware_acceleration_ 4. Make your search language and tone closely match exactly what you're looking for. If you are using thumbnail search, **phrase your query as an image caption**. Searching for "red car" may not work as well as "red sedan driving down a residential street on a sunny day". 5. Semantic search on thumbnails tends to return better results when matching large subjects that take up most of the frame. Small things like "cat" tend to not work well. 6. Experiment! Find a tracked object you want to test and start typing keywords and phrases to see what works for you. + +## Triggers + +Triggers utilize semantic search to automate actions when a tracked object matches a specified image or description. Triggers can be configured so that Frigate executes a specific actions when a tracked object's image or description matches a predefined image or text, based on a similarity threshold. Triggers are managed per camera and can be configured via the Frigate UI in the Settings page under the Triggers tab. + +### Configuration + +Triggers are defined within the `semantic_search` configuration for each camera in your Frigate configuration file or through the UI. Each trigger consists of a `type` (either `thumbnail` or `description`), a `data` field (the reference image event ID or text), a `threshold` for similarity matching, and a list of `actions` to perform when the trigger fires. + +#### Managing Triggers in the UI + +1. Navigate to the **Settings** page and select the **Triggers** tab. +2. Choose a camera from the dropdown menu to view or manage its triggers. +3. Click **Add Trigger** to create a new trigger or use the pencil icon to edit an existing one. +4. In the **Create Trigger** dialog: + - Enter a **Name** for the trigger (e.g., "red_car_alert"). + - Select the **Type** (`Thumbnail` or `Description`). + - For `Thumbnail`, select an image to trigger this action when a similar thumbnail image is detected, based on the threshold. + - For `Description`, enter text to trigger this action when a similar tracked object description is detected. + - Set the **Threshold** for similarity matching. + - Select **Actions** to perform when the trigger fires. +5. Save the trigger to update the configuration and store the embedding in the database. + +When a trigger fires, the UI highlights the trigger with a blue outline for 3 seconds for easy identification. + +### Usage and Best Practices + +1. **Thumbnail Triggers**: Select a representative image (event ID) from the Explore page that closely matches the object you want to detect. For best results, choose images where the object is prominent and fills most of the frame. +2. **Description Triggers**: Write concise, specific text descriptions (e.g., "Person in a red jacket") that align with the tracked object’s description. Avoid vague terms to improve matching accuracy. +3. **Threshold Tuning**: Adjust the threshold to balance sensitivity and specificity. A higher threshold (e.g., 0.8) requires closer matches, reducing false positives but potentially missing similar objects. A lower threshold (e.g., 0.6) is more inclusive but may trigger more often. +4. **Using Explore**: Use the context menu or right-click / long-press on a tracked object in the Grid View in Explore to quickly add a trigger based on the tracked object's thumbnail. +5. **Editing triggers**: For the best experience, triggers should be edited via the UI. However, Frigate will ensure triggers edited in the config will be synced with triggers created and edited in the UI. + +### Notes + +- Triggers rely on the same Jina AI CLIP models (V1 or V2) used for semantic search. Ensure `semantic_search` is enabled and properly configured. +- Reindexing embeddings (via the UI or `reindex: True`) does not affect trigger configurations but may update the embeddings used for matching. +- For optimal performance, use a system with sufficient RAM (8GB minimum, 16GB recommended) and a GPU for `large` model configurations, as described in the Semantic Search requirements. diff --git a/docs/docs/integrations/mqtt.md b/docs/docs/integrations/mqtt.md index 2710e433d..8fa5b57f6 100644 --- a/docs/docs/integrations/mqtt.md +++ b/docs/docs/integrations/mqtt.md @@ -206,6 +206,20 @@ Message published for each changed review item. The first message is published w } ``` +### `frigate/triggers` + +Message published when a trigger defined in a camera's `semantic_search` configuration fires. + +```json +{ + "name": "car_trigger", + "camera": "driveway", + "event_id": "1751565549.853251-b69j73", + "type": "thumbnail", + "score": 0.85 +} +``` + ### `frigate/stats` Same data available at `/api/stats` published at a configurable interval. diff --git a/frigate/api/defs/request/events_body.py b/frigate/api/defs/request/events_body.py index 0883d066f..dd18ff8f7 100644 --- a/frigate/api/defs/request/events_body.py +++ b/frigate/api/defs/request/events_body.py @@ -2,6 +2,8 @@ from typing import List, Optional, Union from pydantic import BaseModel, Field +from frigate.config.classification import TriggerType + class EventsSubLabelBody(BaseModel): subLabel: str = Field(title="Sub label", max_length=100) @@ -45,3 +47,9 @@ class EventsDeleteBody(BaseModel): class SubmitPlusBody(BaseModel): include_annotation: int = Field(default=1) + + +class TriggerEmbeddingBody(BaseModel): + type: TriggerType + data: str + threshold: float = Field(default=0.5, ge=0.0, le=1.0) diff --git a/frigate/api/event.py b/frigate/api/event.py index 24a6c6f4a..1fe34caec 100644 --- a/frigate/api/event.py +++ b/frigate/api/event.py @@ -1,5 +1,6 @@ """Event apis.""" +import base64 import datetime import logging import os @@ -10,6 +11,7 @@ from pathlib import Path from urllib.parse import unquote import cv2 +import numpy as np from fastapi import APIRouter, Request from fastapi.params import Depends from fastapi.responses import JSONResponse @@ -34,6 +36,7 @@ from frigate.api.defs.request.events_body import ( EventsLPRBody, EventsSubLabelBody, SubmitPlusBody, + TriggerEmbeddingBody, ) from frigate.api.defs.response.event_response import ( EventCreateResponse, @@ -44,11 +47,12 @@ from frigate.api.defs.response.event_response import ( from frigate.api.defs.response.generic_response import GenericResponse from frigate.api.defs.tags import Tags from frigate.comms.event_metadata_updater import EventMetadataTypeEnum -from frigate.const import CLIPS_DIR +from frigate.const import CLIPS_DIR, TRIGGER_DIR from frigate.embeddings import EmbeddingsContext -from frigate.models import Event, ReviewSegment, Timeline +from frigate.models import Event, ReviewSegment, Timeline, Trigger from frigate.track.object_processing import TrackedObject from frigate.util.builtin import get_tz_modifiers +from frigate.util.path import get_event_thumbnail_bytes logger = logging.getLogger(__name__) @@ -1264,6 +1268,38 @@ def regenerate_description( ) +@router.post( + "/description/generate", + response_model=GenericResponse, + # dependencies=[Depends(require_role(["admin"]))], +) +def generate_description_embedding( + request: Request, + body: EventsDescriptionBody, +): + new_description = body.description + + # If semantic search is enabled, update the index + if request.app.frigate_config.semantic_search.enabled: + context: EmbeddingsContext = request.app.embeddings + if len(new_description) > 0: + result = context.generate_description_embedding( + new_description, + ) + + return JSONResponse( + content=( + { + "success": True, + "message": f"Embedding for description is {result}" + if result + else "Failed to generate embedding", + } + ), + status_code=200, + ) + + def delete_single_event(event_id: str, request: Request) -> dict: try: event = Event.get(Event.id == event_id) @@ -1412,3 +1448,397 @@ def end_event(request: Request, event_id: str, body: EventsEndBody): content=({"success": True, "message": "Event successfully ended."}), status_code=200, ) + + +@router.post( + "/trigger/embedding", + response_model=dict, + dependencies=[Depends(require_role(["admin"]))], +) +def create_trigger_embedding( + request: Request, + body: TriggerEmbeddingBody, + camera: str, + name: str, +): + try: + if not request.app.frigate_config.semantic_search.enabled: + return JSONResponse( + content={ + "success": False, + "message": "Semantic search is not enabled", + }, + status_code=400, + ) + + # Check if trigger already exists + if ( + Trigger.select() + .where(Trigger.camera == camera, Trigger.name == name) + .exists() + ): + return JSONResponse( + content={ + "success": False, + "message": f"Trigger {camera}:{name} already exists", + }, + status_code=400, + ) + + context: EmbeddingsContext = request.app.embeddings + # Generate embedding based on type + embedding = None + if body.type == "description": + embedding = context.generate_description_embedding(body.data) + elif body.type == "thumbnail": + try: + event: Event = Event.get(Event.id == body.data) + except DoesNotExist: + # TODO: check triggers directory for image + return JSONResponse( + content={ + "success": False, + "message": f"Failed to fetch event for {body.type} trigger", + }, + status_code=400, + ) + + # Skip the event if not an object + if event.data.get("type") != "object": + return + + if thumbnail := get_event_thumbnail_bytes(event): + cursor = context.db.execute_sql( + """ + SELECT thumbnail_embedding FROM vec_thumbnails WHERE id = ? + """, + [body.data], + ) + + row = cursor.fetchone() if cursor else None + + if row: + query_embedding = row[0] + embedding = np.frombuffer(query_embedding, dtype=np.float32) + else: + # Extract valid thumbnail + thumbnail = get_event_thumbnail_bytes(event) + + if thumbnail is None: + return JSONResponse( + content={ + "success": False, + "message": f"Failed to get thumbnail for {body.data} for {body.type} trigger", + }, + status_code=400, + ) + + embedding = context.generate_image_embedding( + body.data, (base64.b64encode(thumbnail).decode("ASCII")) + ) + + if embedding is None: + return JSONResponse( + content={ + "success": False, + "message": f"Failed to generate embedding for {body.type} trigger", + }, + status_code=400, + ) + + if body.type == "thumbnail": + # Save image to the triggers directory + try: + os.makedirs(os.path.join(TRIGGER_DIR, camera), exist_ok=True) + with open( + os.path.join(TRIGGER_DIR, camera, f"{body.data}.webp"), "wb" + ) as f: + f.write(thumbnail) + logger.debug( + f"Writing thumbnail for trigger with data {body.data} in {camera}." + ) + except Exception as e: + logger.error( + f"Failed to write thumbnail for trigger with data {body.data} in {camera}: {e}" + ) + + Trigger.create( + camera=camera, + name=name, + type=body.type, + data=body.data, + threshold=body.threshold, + model=request.app.frigate_config.semantic_search.model, + embedding=np.array(embedding, dtype=np.float32).tobytes(), + triggering_event_id="", + last_triggered=None, + ) + + return JSONResponse( + content={ + "success": True, + "message": f"Trigger created successfully for {camera}:{name}", + }, + status_code=200, + ) + + except Exception as e: + return JSONResponse( + content={ + "success": False, + "message": f"Error creating trigger embedding: {str(e)}", + }, + status_code=500, + ) + + +@router.put( + "/trigger/embedding/{camera}/{name}", + response_model=dict, + dependencies=[Depends(require_role(["admin"]))], +) +def update_trigger_embedding( + request: Request, + camera: str, + name: str, + body: TriggerEmbeddingBody, +): + try: + if not request.app.frigate_config.semantic_search.enabled: + return JSONResponse( + content={ + "success": False, + "message": "Semantic search is not enabled", + }, + status_code=400, + ) + + context: EmbeddingsContext = request.app.embeddings + # Generate embedding based on type + embedding = None + if body.type == "description": + embedding = context.generate_description_embedding(body.data) + elif body.type == "thumbnail": + webp_file = body.data + ".webp" + webp_path = os.path.join(TRIGGER_DIR, camera, webp_file) + + try: + event: Event = Event.get(Event.id == body.data) + # Skip the event if not an object + if event.data.get("type") != "object": + return JSONResponse( + content={ + "success": False, + "message": f"Event {body.data} is not a tracked object for {body.type} trigger", + }, + status_code=400, + ) + # Extract valid thumbnail + thumbnail = get_event_thumbnail_bytes(event) + + with open(webp_path, "wb") as f: + f.write(thumbnail) + except DoesNotExist: + # check triggers directory for image + if not os.path.exists(webp_path): + return JSONResponse( + content={ + "success": False, + "message": f"Failed to fetch event for {body.type} trigger", + }, + status_code=400, + ) + else: + # Load the image from the triggers directory + with open(webp_path, "rb") as f: + thumbnail = f.read() + + embedding = context.generate_image_embedding( + body.data, (base64.b64encode(thumbnail).decode("ASCII")) + ) + + if embedding is None: + return JSONResponse( + content={ + "success": False, + "message": f"Failed to generate embedding for {body.type} trigger", + }, + status_code=400, + ) + + # Check if trigger exists for upsert + trigger = Trigger.get_or_none(Trigger.camera == camera, Trigger.name == name) + + if trigger: + # Update existing trigger + if trigger.data != body.data: # Delete old thumbnail only if data changes + try: + os.remove(os.path.join(TRIGGER_DIR, camera, f"{trigger.data}.webp")) + logger.debug( + f"Deleted thumbnail for trigger with data {trigger.data} in {camera}." + ) + except Exception as e: + logger.error( + f"Failed to delete thumbnail for trigger with data {trigger.data} in {camera}: {e}" + ) + + Trigger.update( + data=body.data, + model=request.app.frigate_config.semantic_search.model, + embedding=np.array(embedding, dtype=np.float32).tobytes(), + threshold=body.threshold, + triggering_event_id="", + last_triggered=None, + ).where(Trigger.camera == camera, Trigger.name == name).execute() + else: + # Create new trigger (for rename case) + Trigger.create( + camera=camera, + name=name, + type=body.type, + data=body.data, + threshold=body.threshold, + model=request.app.frigate_config.semantic_search.model, + embedding=np.array(embedding, dtype=np.float32).tobytes(), + triggering_event_id="", + last_triggered=None, + ) + + if body.type == "thumbnail": + # Save image to the triggers directory + try: + os.makedirs(os.path.join(TRIGGER_DIR, camera), exist_ok=True) + with open( + os.path.join(TRIGGER_DIR, camera, f"{body.data}.webp"), "wb" + ) as f: + f.write(thumbnail) + logger.debug( + f"Writing thumbnail for trigger with data {body.data} in {camera}." + ) + except Exception as e: + logger.error( + f"Failed to write thumbnail for trigger with data {body.data} in {camera}: {e}" + ) + + return JSONResponse( + content={ + "success": True, + "message": f"Trigger updated successfully for {camera}:{name}", + }, + status_code=200, + ) + + except Exception as e: + return JSONResponse( + content={ + "success": False, + "message": f"Error updating trigger embedding: {str(e)}", + }, + status_code=500, + ) + + +@router.delete( + "/trigger/embedding/{camera}/{name}", + response_model=dict, + dependencies=[Depends(require_role(["admin"]))], +) +def delete_trigger_embedding( + request: Request, + camera: str, + name: str, +): + try: + trigger = Trigger.get_or_none(Trigger.camera == camera, Trigger.name == name) + if trigger is None: + return JSONResponse( + content={ + "success": False, + "message": f"Trigger {camera}:{name} not found", + }, + status_code=500, + ) + + deleted = ( + Trigger.delete() + .where(Trigger.camera == camera, Trigger.name == name) + .execute() + ) + if deleted == 0: + return JSONResponse( + content={ + "success": False, + "message": f"Error deleting trigger {camera}:{name}", + }, + status_code=401, + ) + + try: + os.remove(os.path.join(TRIGGER_DIR, camera, f"{trigger.data}.webp")) + logger.debug( + f"Deleted thumbnail for trigger with data {trigger.data} in {camera}." + ) + except Exception as e: + logger.error( + f"Failed to delete thumbnail for trigger with data {trigger.data} in {camera}: {e}" + ) + + return JSONResponse( + content={ + "success": True, + "message": f"Trigger deleted successfully for {camera}:{name}", + }, + status_code=200, + ) + + except Exception as e: + return JSONResponse( + content={ + "success": False, + "message": f"Error deleting trigger embedding: {str(e)}", + }, + status_code=500, + ) + + +@router.get( + "/triggers/status/{camera_name}", + response_model=dict, + dependencies=[Depends(require_role(["admin"]))], +) +def get_triggers_status( + camera_name: str, +): + try: + # Fetch all triggers for the specified camera + triggers = Trigger.select().where(Trigger.camera == camera_name) + + # Prepare the response with trigger status + status = { + trigger.name: { + "last_triggered": trigger.last_triggered.timestamp() + if trigger.last_triggered + else None, + "triggering_event_id": trigger.triggering_event_id + if trigger.triggering_event_id + else None, + } + for trigger in triggers + } + + if not status: + return JSONResponse( + content={ + "success": False, + "message": f"No triggers found for camera {camera_name}", + }, + status_code=404, + ) + + return {"success": True, "triggers": status} + except Exception as ex: + logger.exception(ex) + return JSONResponse( + content=({"success": False, "message": "Error fetching trigger status"}), + status_code=400, + ) diff --git a/frigate/app.py b/frigate/app.py index 9a662dd18..00d620666 100644 --- a/frigate/app.py +++ b/frigate/app.py @@ -39,6 +39,7 @@ from frigate.const import ( MODEL_CACHE_DIR, RECORD_DIR, THUMB_DIR, + TRIGGER_DIR, ) from frigate.data_processing.types import DataProcessorMetrics from frigate.db.sqlitevecq import SqliteVecQueueDatabase @@ -56,6 +57,7 @@ from frigate.models import ( Regions, ReviewSegment, Timeline, + Trigger, User, ) from frigate.object_detection.base import ObjectDetectProcess @@ -123,6 +125,9 @@ class FrigateApp: if self.config.face_recognition.enabled: dirs.append(FACE_DIR) + if self.config.semantic_search.enabled: + dirs.append(TRIGGER_DIR) + for d in dirs: if not os.path.exists(d) and not os.path.islink(d): logger.info(f"Creating directory: {d}") @@ -288,6 +293,7 @@ class FrigateApp: ReviewSegment, Timeline, User, + Trigger, ] self.db.bind(models) diff --git a/frigate/comms/webpush.py b/frigate/comms/webpush.py index 7bc66f3b7..0bc2c1457 100644 --- a/frigate/comms/webpush.py +++ b/frigate/comms/webpush.py @@ -186,6 +186,28 @@ class WebPushClient(Communicator): # type: ignore[misc] logger.debug(f"Notifications for {camera} are currently suspended.") return self.send_alert(decoded) + if topic == "triggers": + decoded = json.loads(payload) + + camera = decoded["camera"] + name = decoded["name"] + + # ensure notifications are enabled and the specific trigger has + # notification action enabled + if ( + not self.config.cameras[camera].notifications.enabled + or name not in self.config.cameras[camera].semantic_search.triggers + or "notification" + not in self.config.cameras[camera] + .semantic_search.triggers[name] + .actions + ): + return + + if self.is_camera_suspended(camera): + logger.debug(f"Notifications for {camera} are currently suspended.") + return + self.send_trigger(decoded) elif topic == "notification_test": if not self.config.notifications.enabled and not any( cam.notifications.enabled for cam in self.config.cameras.values() @@ -267,6 +289,23 @@ class WebPushClient(Communicator): # type: ignore[misc] except Exception as e: logger.error(f"Error processing notification: {str(e)}") + def _within_cooldown(self, camera: str) -> bool: + now = datetime.datetime.now().timestamp() + if now - self.last_notification_time < self.config.notifications.cooldown: + logger.debug( + f"Skipping notification for {camera} - in global cooldown period" + ) + return True + if ( + now - self.last_camera_notification_time[camera] + < self.config.cameras[camera].notifications.cooldown + ): + logger.debug( + f"Skipping notification for {camera} - in camera-specific cooldown period" + ) + return True + return False + def send_notification_test(self) -> None: if not self.config.notifications.email: return @@ -295,24 +334,7 @@ class WebPushClient(Communicator): # type: ignore[misc] camera: str = payload["after"]["camera"] current_time = datetime.datetime.now().timestamp() - # Check global cooldown period - if ( - current_time - self.last_notification_time - < self.config.notifications.cooldown - ): - logger.debug( - f"Skipping notification for {camera} - in global cooldown period" - ) - return - - # Check camera-specific cooldown period - if ( - current_time - self.last_camera_notification_time[camera] - < self.config.cameras[camera].notifications.cooldown - ): - logger.debug( - f"Skipping notification for {camera} - in camera-specific cooldown period" - ) + if self._within_cooldown(camera): return self.check_registrations() @@ -367,6 +389,48 @@ class WebPushClient(Communicator): # type: ignore[misc] self.cleanup_registrations() + def send_trigger(self, payload: dict[str, Any]) -> None: + if not self.config.notifications.email: + return + + camera: str = payload["camera"] + current_time = datetime.datetime.now().timestamp() + + if self._within_cooldown(camera): + return + + self.check_registrations() + + self.last_camera_notification_time[camera] = current_time + self.last_notification_time = current_time + + trigger_type = payload["type"] + event_id = payload["event_id"] + name = payload["name"] + score = payload["score"] + + title = f"{name.replace('_', ' ')} triggered on {titlecase(camera.replace('_', ' '))}" + message = f"{titlecase(trigger_type)} trigger fired for {titlecase(camera.replace('_', ' '))} with score {score:.2f}" + image = f"clips/triggers/{camera}/{event_id}.webp" + + direct_url = f"/explore?event_id={event_id}" + ttl = 0 + + logger.debug(f"Sending push notification for {camera}, trigger name {name}") + + for user in self.web_pushers: + self.send_push_notification( + user=user, + payload=payload, + title=title, + message=message, + direct_url=direct_url, + image=image, + ttl=ttl, + ) + + self.cleanup_registrations() + def stop(self) -> None: logger.info("Closing notification queue") self.notification_thread.join() diff --git a/frigate/config/camera/camera.py b/frigate/config/camera/camera.py index 33ad312a2..c356984f3 100644 --- a/frigate/config/camera/camera.py +++ b/frigate/config/camera/camera.py @@ -22,6 +22,7 @@ from ..classification import ( AudioTranscriptionConfig, CameraFaceRecognitionConfig, CameraLicensePlateRecognitionConfig, + CameraSemanticSearchConfig, ) from .audio import AudioConfig from .birdseye import BirdseyeCameraConfig @@ -91,6 +92,10 @@ class CameraConfig(FrigateBaseModel): review: ReviewConfig = Field( default_factory=ReviewConfig, title="Review configuration." ) + semantic_search: CameraSemanticSearchConfig = Field( + default_factory=CameraSemanticSearchConfig, + title="Semantic search configuration.", + ) snapshots: SnapshotsConfig = Field( default_factory=SnapshotsConfig, title="Snapshot configuration." ) diff --git a/frigate/config/camera/updater.py b/frigate/config/camera/updater.py index 83536fc46..756e370db 100644 --- a/frigate/config/camera/updater.py +++ b/frigate/config/camera/updater.py @@ -23,6 +23,7 @@ class CameraConfigUpdateEnum(str, Enum): record = "record" remove = "remove" # for removing a camera review = "review" + semantic_search = "semantic_search" # for semantic search triggers snapshots = "snapshots" zones = "zones" @@ -106,6 +107,8 @@ class CameraConfigUpdateSubscriber: config.record = updated_config elif update_type == CameraConfigUpdateEnum.review: config.review = updated_config + elif update_type == CameraConfigUpdateEnum.semantic_search: + config.semantic_search = updated_config elif update_type == CameraConfigUpdateEnum.snapshots: config.snapshots = updated_config elif update_type == CameraConfigUpdateEnum.zones: diff --git a/frigate/config/classification.py b/frigate/config/classification.py index c48ca489c..e92f1da78 100644 --- a/frigate/config/classification.py +++ b/frigate/config/classification.py @@ -10,6 +10,7 @@ __all__ = [ "CameraLicensePlateRecognitionConfig", "FaceRecognitionConfig", "SemanticSearchConfig", + "CameraSemanticSearchConfig", "LicensePlateRecognitionConfig", ] @@ -24,6 +25,15 @@ class EnrichmentsDeviceEnum(str, Enum): CPU = "CPU" +class TriggerType(str, Enum): + THUMBNAIL = "thumbnail" + DESCRIPTION = "description" + + +class TriggerAction(str, Enum): + NOTIFICATION = "notification" + + class AudioTranscriptionConfig(FrigateBaseModel): enabled: bool = Field(default=False, title="Enable audio transcription.") language: str = Field( @@ -113,6 +123,32 @@ class SemanticSearchConfig(FrigateBaseModel): ) +class TriggerConfig(FrigateBaseModel): + enabled: bool = Field(default=True, title="Enable this trigger") + type: TriggerType = Field(default=TriggerType.DESCRIPTION, title="Type of trigger") + data: str = Field(title="Trigger content (text phrase or image ID)") + threshold: float = Field( + title="Confidence score required to run the trigger", + default=0.8, + gt=0.0, + le=1.0, + ) + actions: Optional[List[TriggerAction]] = Field( + default=[], title="Actions to perform when trigger is matched" + ) + + model_config = ConfigDict(extra="forbid", protected_namespaces=()) + + +class CameraSemanticSearchConfig(FrigateBaseModel): + triggers: Optional[Dict[str, TriggerConfig]] = Field( + default=None, + title="Trigger actions on tracked objects that match existing thumbnails or descriptions", + ) + + model_config = ConfigDict(extra="forbid", protected_namespaces=()) + + class FaceRecognitionConfig(FrigateBaseModel): enabled: bool = Field(default=False, title="Enable face recognition.") model_size: str = Field( diff --git a/frigate/const.py b/frigate/const.py index 893e6eb52..5a5ee3f24 100644 --- a/frigate/const.py +++ b/frigate/const.py @@ -11,6 +11,7 @@ EXPORT_DIR = f"{BASE_DIR}/exports" FACE_DIR = f"{CLIPS_DIR}/faces" THUMB_DIR = f"{CLIPS_DIR}/thumbs" RECORD_DIR = f"{BASE_DIR}/recordings" +TRIGGER_DIR = f"{CLIPS_DIR}/triggers" BIRDSEYE_PIPE = "/tmp/cache/birdseye" CACHE_DIR = "/tmp/cache" FRIGATE_LOCALHOST = "http://127.0.0.1:5000" diff --git a/frigate/data_processing/post/semantic_trigger.py b/frigate/data_processing/post/semantic_trigger.py new file mode 100644 index 000000000..baa47ba1c --- /dev/null +++ b/frigate/data_processing/post/semantic_trigger.py @@ -0,0 +1,233 @@ +"""Post time processor to trigger actions based on similar embeddings.""" + +import datetime +import json +import logging +import os +from typing import Any + +import cv2 +import numpy as np +from peewee import DoesNotExist + +from frigate.comms.inter_process import InterProcessRequestor +from frigate.config import FrigateConfig +from frigate.const import CONFIG_DIR +from frigate.data_processing.types import PostProcessDataEnum +from frigate.db.sqlitevecq import SqliteVecQueueDatabase +from frigate.embeddings.util import ZScoreNormalization +from frigate.models import Event, Trigger +from frigate.util.builtin import cosine_distance +from frigate.util.path import get_event_thumbnail_bytes + +from ..post.api import PostProcessorApi +from ..types import DataProcessorMetrics + +logger = logging.getLogger(__name__) + +WRITE_DEBUG_IMAGES = False + + +class SemanticTriggerProcessor(PostProcessorApi): + def __init__( + self, + db: SqliteVecQueueDatabase, + config: FrigateConfig, + requestor: InterProcessRequestor, + metrics: DataProcessorMetrics, + embeddings, + ): + super().__init__(config, metrics, None) + self.db = db + self.embeddings = embeddings + self.requestor = requestor + self.trigger_embeddings: list[np.ndarray] = [] + + self.thumb_stats = ZScoreNormalization() + self.desc_stats = ZScoreNormalization() + + # load stats from disk + try: + with open(os.path.join(CONFIG_DIR, ".search_stats.json"), "r") as f: + data = json.loads(f.read()) + self.thumb_stats.from_dict(data["thumb_stats"]) + self.desc_stats.from_dict(data["desc_stats"]) + except FileNotFoundError: + pass + + def process_data( + self, data: dict[str, Any], data_type: PostProcessDataEnum + ) -> None: + event_id = data["event_id"] + camera = data["camera"] + process_type = data["type"] + + if self.config.cameras[camera].semantic_search.triggers is None: + return + + triggers = ( + Trigger.select( + Trigger.camera, + Trigger.name, + Trigger.data, + Trigger.type, + Trigger.embedding, + Trigger.threshold, + ) + .where(Trigger.camera == camera) + .dicts() + .iterator() + ) + + for trigger in triggers: + if ( + trigger["name"] + not in self.config.cameras[camera].semantic_search.triggers + or not self.config.cameras[camera] + .semantic_search.triggers[trigger["name"]] + .enabled + ): + logger.debug( + f"Trigger {trigger['name']} is disabled for camera {camera}" + ) + continue + + logger.debug( + f"Processing {trigger['type']} trigger for {event_id} on {trigger['camera']}: {trigger['name']}" + ) + + trigger_embedding = np.frombuffer(trigger["embedding"], dtype=np.float32) + + # Get embeddings based on type + thumbnail_embedding = None + description_embedding = None + + if process_type == "image": + cursor = self.db.execute_sql( + """ + SELECT thumbnail_embedding FROM vec_thumbnails WHERE id = ? + """, + [event_id], + ) + row = cursor.fetchone() if cursor else None + if row: + thumbnail_embedding = np.frombuffer(row[0], dtype=np.float32) + + if process_type == "text": + cursor = self.db.execute_sql( + """ + SELECT description_embedding FROM vec_descriptions WHERE id = ? + """, + [event_id], + ) + row = cursor.fetchone() if cursor else None + if row: + description_embedding = np.frombuffer(row[0], dtype=np.float32) + + # Skip processing if we don't have any embeddings + if thumbnail_embedding is None and description_embedding is None: + logger.debug(f"No embeddings found for {event_id}") + return + + # Determine which embedding to compare based on trigger type + if ( + trigger["type"] in ["text", "thumbnail"] + and thumbnail_embedding is not None + ): + data_embedding = thumbnail_embedding + normalized_distance = self.thumb_stats.normalize( + [cosine_distance(data_embedding, trigger_embedding)], + save_stats=False, + )[0] + elif trigger["type"] == "description" and description_embedding is not None: + data_embedding = description_embedding + normalized_distance = self.desc_stats.normalize( + [cosine_distance(data_embedding, trigger_embedding)], + save_stats=False, + )[0] + + else: + continue + + similarity = 1 - normalized_distance + + logger.debug( + f"Trigger {trigger['name']} ({trigger['data'] if trigger['type'] == 'text' or trigger['type'] == 'description' else 'image'}): " + f"normalized distance: {normalized_distance:.4f}, " + f"similarity: {similarity:.4f}, threshold: {trigger['threshold']}" + ) + + # Check if similarity meets threshold + if similarity >= trigger["threshold"]: + logger.info( + f"Trigger {trigger['name']} activated with similarity {similarity:.4f}" + ) + + # Update the trigger's last_triggered and triggering_event_id + Trigger.update( + last_triggered=datetime.datetime.now(), triggering_event_id=event_id + ).where( + Trigger.camera == camera, Trigger.name == trigger["name"] + ).execute() + + # Always publish MQTT message + self.requestor.send_data( + "triggers", + json.dumps( + { + "name": trigger["name"], + "camera": camera, + "event_id": event_id, + "type": trigger["type"], + "score": similarity, + } + ), + ) + + if ( + self.config.cameras[camera] + .semantic_search.triggers[trigger["name"]] + .actions + ): + # TODO: handle actions for the trigger + # notifications already handled by webpush + pass + + if WRITE_DEBUG_IMAGES: + try: + event: Event = Event.get(Event.id == event_id) + except DoesNotExist: + return + + # Skip the event if not an object + if event.data.get("type") != "object": + return + + thumbnail_bytes = get_event_thumbnail_bytes(event) + + nparr = np.frombuffer(thumbnail_bytes, np.uint8) + thumbnail = cv2.imdecode(nparr, cv2.IMREAD_COLOR) + + font_scale = 0.5 + font = cv2.FONT_HERSHEY_SIMPLEX + cv2.putText( + thumbnail, + f"{similarity:.4f}", + (10, 30), + font, + fontScale=font_scale, + color=(0, 255, 0), + thickness=2, + ) + + current_time = int(datetime.datetime.now().timestamp()) + cv2.imwrite( + f"debug/frames/trigger-{event_id}_{current_time}.jpg", + thumbnail, + ) + + def handle_request(self, topic, request_data): + return None + + def expire_object(self, object_id, camera): + pass diff --git a/frigate/embeddings/__init__.py b/frigate/embeddings/__init__.py index d4887e0d2..ab69adb68 100644 --- a/frigate/embeddings/__init__.py +++ b/frigate/embeddings/__init__.py @@ -296,3 +296,15 @@ class EmbeddingsContext: return self.requestor.send_data( EmbeddingsRequestEnum.transcribe_audio.value, {"event": event} ) + + def generate_description_embedding(self, text: str) -> None: + return self.requestor.send_data( + EmbeddingsRequestEnum.embed_description.value, + {"id": None, "description": text, "upsert": False}, + ) + + def generate_image_embedding(self, event_id: str, thumbnail: bytes) -> None: + return self.requestor.send_data( + EmbeddingsRequestEnum.embed_thumbnail.value, + {"id": str(event_id), "thumbnail": str(thumbnail), "upsert": False}, + ) diff --git a/frigate/embeddings/embeddings.py b/frigate/embeddings/embeddings.py index 833ab9ab2..a0981f669 100644 --- a/frigate/embeddings/embeddings.py +++ b/frigate/embeddings/embeddings.py @@ -7,21 +7,26 @@ import os import threading import time -from numpy import ndarray +import numpy as np +from peewee import DoesNotExist, IntegrityError from PIL import Image from playhouse.shortcuts import model_to_dict +from frigate.comms.embeddings_updater import ( + EmbeddingsRequestEnum, +) from frigate.comms.inter_process import InterProcessRequestor from frigate.config import FrigateConfig from frigate.config.classification import SemanticSearchModelEnum from frigate.const import ( CONFIG_DIR, + TRIGGER_DIR, UPDATE_EMBEDDINGS_REINDEX_PROGRESS, UPDATE_MODEL_STATE, ) from frigate.data_processing.types import DataProcessorMetrics from frigate.db.sqlitevecq import SqliteVecQueueDatabase -from frigate.models import Event +from frigate.models import Event, Trigger from frigate.types import ModelStatusTypesEnum from frigate.util.builtin import EventsPerSecond, InferenceSpeed, serialize from frigate.util.path import get_event_thumbnail_bytes @@ -167,7 +172,7 @@ class Embeddings: def embed_thumbnail( self, event_id: str, thumbnail: bytes, upsert: bool = True - ) -> ndarray: + ) -> np.ndarray: """Embed thumbnail and optionally insert into DB. @param: event_id in Events DB @@ -194,7 +199,7 @@ class Embeddings: def batch_embed_thumbnail( self, event_thumbs: dict[str, bytes], upsert: bool = True - ) -> list[ndarray]: + ) -> list[np.ndarray]: """Embed thumbnails and optionally insert into DB. @param: event_thumbs Map of Event IDs in DB to thumbnail bytes in jpg format @@ -244,7 +249,7 @@ class Embeddings: def embed_description( self, event_id: str, description: str, upsert: bool = True - ) -> ndarray: + ) -> np.ndarray: start = datetime.datetime.now().timestamp() embedding = self.text_embedding([description])[0] @@ -264,7 +269,7 @@ class Embeddings: def batch_embed_description( self, event_descriptions: dict[str, str], upsert: bool = True - ) -> ndarray: + ) -> np.ndarray: start = datetime.datetime.now().timestamp() # upsert embeddings one by one to avoid token limit embeddings = [] @@ -417,3 +422,224 @@ class Embeddings: with self.reindex_lock: self.reindex_running = False self.reindex_thread = None + + def sync_triggers(self) -> None: + for camera in self.config.cameras.values(): + # Get all existing triggers for this camera + existing_triggers = { + trigger.name: trigger + for trigger in Trigger.select().where(Trigger.camera == camera.name) + } + + # Get all configured trigger names + configured_trigger_names = set(camera.semantic_search.triggers or {}) + + # Create or update triggers from config + for trigger_name, trigger in ( + camera.semantic_search.triggers or {} + ).items(): + if trigger_name in existing_triggers: + existing_trigger = existing_triggers[trigger_name] + needs_embedding_update = False + thumbnail_missing = False + + # Check if data has changed or thumbnail is missing for thumbnail type + if trigger.type == "thumbnail": + thumbnail_path = os.path.join( + TRIGGER_DIR, camera.name, f"{trigger.data}.webp" + ) + try: + event = Event.get(Event.id == trigger.data) + if event.data.get("type") != "object": + logger.warning( + f"Event {trigger.data} is not a tracked object for {trigger.type} trigger" + ) + continue # Skip if not an object + + # Check if thumbnail needs to be updated (data changed or missing) + if ( + existing_trigger.data != trigger.data + or not os.path.exists(thumbnail_path) + ): + thumbnail = get_event_thumbnail_bytes(event) + if not thumbnail: + logger.warning( + f"Unable to retrieve thumbnail for event ID {trigger.data} for {trigger_name}." + ) + continue + self.write_trigger_thumbnail( + camera.name, trigger.data, thumbnail + ) + thumbnail_missing = True + except DoesNotExist: + logger.warning( + f"Event ID {trigger.data} for trigger {trigger_name} does not exist." + ) + continue + + # Update existing trigger if data has changed + if ( + existing_trigger.type != trigger.type + or existing_trigger.data != trigger.data + or existing_trigger.threshold != trigger.threshold + ): + existing_trigger.type = trigger.type + existing_trigger.data = trigger.data + existing_trigger.threshold = trigger.threshold + needs_embedding_update = True + + # Check if embedding is missing or needs update + if ( + not existing_trigger.embedding + or needs_embedding_update + or thumbnail_missing + ): + existing_trigger.embedding = self._calculate_trigger_embedding( + trigger + ) + needs_embedding_update = True + + if needs_embedding_update: + existing_trigger.save() + else: + # Create new trigger + try: + try: + event: Event = Event.get(Event.id == trigger.data) + except DoesNotExist: + logger.warning( + f"Event ID {trigger.data} for trigger {trigger_name} does not exist." + ) + continue + + # Skip the event if not an object + if event.data.get("type") != "object": + logger.warning( + f"Event ID {trigger.data} for trigger {trigger_name} is not a tracked object." + ) + continue + + thumbnail = get_event_thumbnail_bytes(event) + + if not thumbnail: + logger.warning( + f"Unable to retrieve thumbnail for event ID {trigger.data} for {trigger_name}." + ) + continue + + self.write_trigger_thumbnail( + camera.name, trigger.data, thumbnail + ) + + # Calculate embedding for new trigger + embedding = self._calculate_trigger_embedding(trigger) + + Trigger.create( + camera=camera.name, + name=trigger_name, + type=trigger.type, + data=trigger.data, + threshold=trigger.threshold, + model=self.config.semantic_search.model, + embedding=embedding, + triggering_event_id="", + last_triggered=None, + ) + + except IntegrityError: + pass # Handle duplicate creation attempts + + # Remove triggers that are no longer in config + triggers_to_remove = ( + set(existing_triggers.keys()) - configured_trigger_names + ) + if triggers_to_remove: + Trigger.delete().where( + Trigger.camera == camera.name, Trigger.name.in_(triggers_to_remove) + ).execute() + for trigger_name in triggers_to_remove: + self.remove_trigger_thumbnail(camera.name, trigger_name) + + def write_trigger_thumbnail( + self, camera: str, event_id: str, thumbnail: bytes + ) -> None: + """Write the thumbnail to the trigger directory.""" + try: + os.makedirs(os.path.join(TRIGGER_DIR, camera), exist_ok=True) + with open(os.path.join(TRIGGER_DIR, camera, f"{event_id}.webp"), "wb") as f: + f.write(thumbnail) + logger.debug( + f"Writing thumbnail for trigger with data {event_id} in {camera}." + ) + except Exception as e: + logger.error( + f"Failed to write thumbnail for trigger with data {event_id} in {camera}: {e}" + ) + + def remove_trigger_thumbnail(self, camera: str, event_id: str) -> None: + """Write the thumbnail to the trigger directory.""" + try: + os.remove(os.path.join(TRIGGER_DIR, camera, f"{event_id}.webp")) + logger.debug( + f"Deleted thumbnail for trigger with data {event_id} in {camera}." + ) + except Exception as e: + logger.error( + f"Failed to delete thumbnail for trigger with data {event_id} in {camera}: {e}" + ) + + def _calculate_trigger_embedding(self, trigger) -> bytes: + """Calculate embedding for a trigger based on its type and data.""" + if trigger.type == "description": + logger.debug(f"Generating embedding for trigger description {trigger.name}") + embedding = self.requestor.send_data( + EmbeddingsRequestEnum.embed_description.value, + {"id": None, "description": trigger.data, "upsert": False}, + ) + return embedding.astype(np.float32).tobytes() + + elif trigger.type == "thumbnail": + # For image triggers, trigger.data should be an image ID + # Try to get embedding from vec_thumbnails table first + cursor = self.db.execute_sql( + "SELECT thumbnail_embedding FROM vec_thumbnails WHERE id = ?", + [trigger.data], + ) + row = cursor.fetchone() if cursor else None + if row: + return row[0] # Already in bytes format + else: + logger.debug( + f"No thumbnail embedding found for image ID: {trigger.data}, generating from saved trigger thumbnail" + ) + + try: + with open( + os.path.join( + TRIGGER_DIR, trigger.camera, f"{trigger.data}.webp" + ), + "rb", + ) as f: + thumbnail = f.read() + except Exception as e: + logger.error( + f"Failed to read thumbnail for trigger {trigger.name} with ID {trigger.data}: {e}" + ) + return b"" + + logger.debug( + f"Generating embedding for trigger thumbnail {trigger.name} with ID {trigger.data}" + ) + embedding = self.requestor.send_data( + EmbeddingsRequestEnum.embed_thumbnail.value, + { + "id": str(trigger.data), + "thumbnail": str(thumbnail), + "upsert": False, + }, + ) + return embedding.astype(np.float32).tobytes() + + else: + logger.warning(f"Unknown trigger type: {trigger.type}") + return b"" diff --git a/frigate/embeddings/maintainer.py b/frigate/embeddings/maintainer.py index c659d04fe..ec8e20a48 100644 --- a/frigate/embeddings/maintainer.py +++ b/frigate/embeddings/maintainer.py @@ -14,7 +14,10 @@ import numpy as np from peewee import DoesNotExist from frigate.comms.detections_updater import DetectionSubscriber, DetectionTypeEnum -from frigate.comms.embeddings_updater import EmbeddingsRequestEnum, EmbeddingsResponder +from frigate.comms.embeddings_updater import ( + EmbeddingsRequestEnum, + EmbeddingsResponder, +) from frigate.comms.event_metadata_updater import ( EventMetadataPublisher, EventMetadataSubscriber, @@ -46,6 +49,7 @@ from frigate.data_processing.post.audio_transcription import ( from frigate.data_processing.post.license_plate import ( LicensePlatePostProcessor, ) +from frigate.data_processing.post.semantic_trigger import SemanticTriggerProcessor from frigate.data_processing.real_time.api import RealTimeProcessorApi from frigate.data_processing.real_time.bird import BirdRealTimeProcessor from frigate.data_processing.real_time.custom_classification import ( @@ -60,7 +64,7 @@ from frigate.data_processing.types import DataProcessorMetrics, PostProcessDataE from frigate.db.sqlitevecq import SqliteVecQueueDatabase from frigate.events.types import EventTypeEnum, RegenerateDescriptionEnum from frigate.genai import get_genai_client -from frigate.models import Event, Recordings +from frigate.models import Event, Recordings, Trigger from frigate.types import TrackedObjectUpdateTypesEnum from frigate.util.builtin import serialize from frigate.util.image import ( @@ -93,7 +97,11 @@ class EmbeddingMaintainer(threading.Thread): self.config_updater = CameraConfigUpdateSubscriber( self.config, self.config.cameras, - [CameraConfigUpdateEnum.add, CameraConfigUpdateEnum.remove], + [ + CameraConfigUpdateEnum.add, + CameraConfigUpdateEnum.remove, + CameraConfigUpdateEnum.semantic_search, + ], ) # Configure Frigate DB @@ -109,7 +117,7 @@ class EmbeddingMaintainer(threading.Thread): ), load_vec_extension=True, ) - models = [Event, Recordings] + models = [Event, Recordings, Trigger] db.bind(models) if config.semantic_search.enabled: @@ -119,6 +127,9 @@ class EmbeddingMaintainer(threading.Thread): if config.semantic_search.reindex: self.embeddings.reindex() + # Sync semantic search triggers in db with config + self.embeddings.sync_triggers() + # create communication for updating event descriptions self.requestor = InterProcessRequestor() @@ -211,6 +222,17 @@ class EmbeddingMaintainer(threading.Thread): AudioTranscriptionPostProcessor(self.config, self.requestor, metrics) ) + if self.config.semantic_search.enabled: + self.post_processors.append( + SemanticTriggerProcessor( + db, + self.config, + self.requestor, + metrics, + self.embeddings, + ) + ) + self.stop_event = stop_event self.tracked_events: dict[str, list[Any]] = {} self.early_request_sent: dict[str, bool] = {} @@ -387,33 +409,6 @@ class EmbeddingMaintainer(threading.Thread): event_id, camera, updated_db = ended camera_config = self.config.cameras[camera] - # call any defined post processors - for processor in self.post_processors: - if isinstance(processor, LicensePlatePostProcessor): - recordings_available = self.recordings_available_through.get(camera) - if ( - recordings_available is not None - and event_id in self.detected_license_plates - and self.config.cameras[camera].type != "lpr" - ): - processor.process_data( - { - "event_id": event_id, - "camera": camera, - "recordings_available": self.recordings_available_through[ - camera - ], - "obj_data": self.detected_license_plates[event_id][ - "obj_data" - ], - }, - PostProcessDataEnum.recording, - ) - elif isinstance(processor, AudioTranscriptionPostProcessor): - continue - else: - processor.process_data(event_id, PostProcessDataEnum.event_id) - # expire in realtime processors for processor in self.realtime_processors: processor.expire_object(event_id, camera) @@ -450,6 +445,41 @@ class EmbeddingMaintainer(threading.Thread): ): self._process_genai_description(event, camera_config, thumbnail) + # call any defined post processors + for processor in self.post_processors: + if isinstance(processor, LicensePlatePostProcessor): + recordings_available = self.recordings_available_through.get(camera) + if ( + recordings_available is not None + and event_id in self.detected_license_plates + and self.config.cameras[camera].type != "lpr" + ): + processor.process_data( + { + "event_id": event_id, + "camera": camera, + "recordings_available": self.recordings_available_through[ + camera + ], + "obj_data": self.detected_license_plates[event_id][ + "obj_data" + ], + }, + PostProcessDataEnum.recording, + ) + elif isinstance(processor, AudioTranscriptionPostProcessor): + continue + elif isinstance(processor, SemanticTriggerProcessor): + processor.process_data( + {"event_id": event_id, "camera": camera, "type": "image"}, + PostProcessDataEnum.tracked_object, + ) + else: + processor.process_data( + {"event_id": event_id, "camera": camera}, + PostProcessDataEnum.tracked_object, + ) + # Delete tracked events based on the event_id if event_id in self.tracked_events: del self.tracked_events[event_id] @@ -658,6 +688,16 @@ class EmbeddingMaintainer(threading.Thread): if self.config.semantic_search.enabled: self.embeddings.embed_description(event.id, description) + # Check semantic trigger for this description + for processor in self.post_processors: + if isinstance(processor, SemanticTriggerProcessor): + processor.process_data( + {"event_id": event.id, "camera": event.camera, "type": "text"}, + PostProcessDataEnum.tracked_object, + ) + else: + continue + logger.debug( "Generated description for %s (%d images): %s", event.id, diff --git a/frigate/models.py b/frigate/models.py index 5aa0dc5b2..0ef4650b3 100644 --- a/frigate/models.py +++ b/frigate/models.py @@ -1,6 +1,8 @@ from peewee import ( + BlobField, BooleanField, CharField, + CompositeKey, DateTimeField, FloatField, ForeignKeyField, @@ -132,3 +134,18 @@ class User(Model): # type: ignore[misc] ) password_hash = CharField(null=False, max_length=120) notification_tokens = JSONField() + + +class Trigger(Model): # type: ignore[misc] + camera = CharField(max_length=20) + name = CharField() + type = CharField(max_length=10) + data = TextField() + threshold = FloatField() + model = CharField(max_length=30) + embedding = BlobField() + triggering_event_id = CharField(max_length=30) + last_triggered = DateTimeField() + + class Meta: + primary_key = CompositeKey("camera", "name") diff --git a/frigate/util/builtin.py b/frigate/util/builtin.py index d4f8d7e37..5ab29a6ea 100644 --- a/frigate/util/builtin.py +++ b/frigate/util/builtin.py @@ -428,3 +428,19 @@ def sanitize_float(value): if isinstance(value, (int, float)) and not math.isfinite(value): return 0.0 return value + + +def cosine_similarity(a: np.ndarray, b: np.ndarray) -> float: + return 1 - cosine_distance(a, b) + + +def cosine_distance(a: np.ndarray, b: np.ndarray) -> float: + """Returns cosine distance to match sqlite-vec's calculation.""" + dot = np.dot(a, b) + a_mag = np.dot(a, a) # ||a||^2 + b_mag = np.dot(b, b) # ||b||^2 + + if a_mag == 0 or b_mag == 0: + return 1.0 + + return 1.0 - (dot / (np.sqrt(a_mag) * np.sqrt(b_mag))) diff --git a/migrations/031_create_trigger_table.py b/migrations/031_create_trigger_table.py new file mode 100644 index 000000000..7c8c289cc --- /dev/null +++ b/migrations/031_create_trigger_table.py @@ -0,0 +1,50 @@ +"""Peewee migrations -- 031_create_trigger_table.py. + +This migration creates the Trigger table to track semantic search triggers for cameras. + +Some examples (model - class or model_name):: + + > Model = migrator.orm['model_name'] # Return model in current state by name + > migrator.sql(sql) # Run custom SQL + > migrator.python(func, *args, **kwargs) # Run python code + > migrator.create_model(Model) # Create a model (could be used as decorator) + > migrator.remove_model(model, cascade=True) # Remove a model + > migrator.add_fields(model, **fields) # Add fields to a model + > migrator.change_fields(model, **fields) # Change fields + > migrator.remove_fields(model, *field_names, cascade=True) + > migrator.rename_field(model, old_field_name, new_field_name) + > migrator.rename_table(model, new_table_name) + > migrator.add_index(model, *col_names, unique=False) + > migrator.drop_index(model, *col_names) + > migrator.add_not_null(model, *field_names) + > migrator.drop_not_null(model, *field_names) + > migrator.add_default(model, field_name, default) + +""" + +import peewee as pw + +SQL = pw.SQL + + +def migrate(migrator, database, fake=False, **kwargs): + migrator.sql( + """ + CREATE TABLE IF NOT EXISTS trigger ( + camera VARCHAR(20) NOT NULL, + name VARCHAR NOT NULL, + type VARCHAR(10) NOT NULL, + model VARCHAR(30) NOT NULL, + data TEXT NOT NULL, + threshold REAL, + embedding BLOB, + triggering_event_id VARCHAR(30), + last_triggered DATETIME, + PRIMARY KEY (camera, name) + ) + """ + ) + + +def rollback(migrator, database, fake=False, **kwargs): + migrator.sql("DROP TABLE IF EXISTS trigger") diff --git a/web/public/locales/en/components/dialog.json b/web/public/locales/en/components/dialog.json index 8b2dc0b88..02ab43c4c 100644 --- a/web/public/locales/en/components/dialog.json +++ b/web/public/locales/en/components/dialog.json @@ -109,5 +109,12 @@ "markAsReviewed": "Mark as reviewed", "deleteNow": "Delete Now" } + }, + "imagePicker": { + "selectImage": "Select a tracked object's thumbnail", + "search": { + "placeholder": "Search by label or sub label..." + }, + "noImages": "No thumbnails found for this camera" } } diff --git a/web/public/locales/en/views/explore.json b/web/public/locales/en/views/explore.json index 8a61dcf58..d754fee77 100644 --- a/web/public/locales/en/views/explore.json +++ b/web/public/locales/en/views/explore.json @@ -175,6 +175,10 @@ "label": "Find similar", "aria": "Find similar tracked objects" }, + "addTrigger": { + "label": "Add trigger", + "aria": "Add a trigger for this tracked object" + }, "audioTranscription": { "label": "Transcribe", "aria": "Request audio transcription" diff --git a/web/public/locales/en/views/settings.json b/web/public/locales/en/views/settings.json index 14dc809bc..b396babde 100644 --- a/web/public/locales/en/views/settings.json +++ b/web/public/locales/en/views/settings.json @@ -644,5 +644,100 @@ "success": "Frigate+ settings have been saved. Restart Frigate to apply changes.", "error": "Failed to save config changes: {{errorMessage}}" } + }, + "triggers": { + "documentTitle": "Triggers", + "management": { + "title": "Trigger Management", + "desc": "Manage triggers for {{camera}}. Use the thumbnail type to trigger on similar thumbnails to your selected tracked object, and the description type to trigger on similar descriptions to text you specify." + }, + "addTrigger": "Add Trigger", + "table": { + "name": "Name", + "type": "Type", + "content": "Content", + "threshold": "Threshold", + "actions": "Actions", + "noTriggers": "No triggers configured for this camera.", + "edit": "Edit", + "deleteTrigger": "Delete Trigger", + "lastTriggered": "Last triggered" + }, + "type": { + "thumbnail": "Thumbnail", + "description": "Description" + }, + "actions": { + "alert": "Mark as Alert", + "notification": "Send Notification" + }, + "dialog": { + "createTrigger": { + "title": "Create Trigger", + "desc": "Create a trigger for camera {{camera}}" + }, + "editTrigger": { + "title": "Edit Trigger", + "desc": "Edit the settings for trigger on camera {{camera}}" + }, + "deleteTrigger": { + "title": "Delete Trigger", + "desc": "Are you sure you want to delete the trigger {{triggerName}}? This action cannot be undone." + }, + "form": { + "name": { + "title": "Name", + "placeholder": "Enter trigger name", + "error": { + "minLength": "Name must be at least 2 characters long.", + "invalidCharacters": "Name can only contain letters, numbers, underscores, and hyphens.", + "alreadyExists": "A trigger with this name already exists for this camera." + } + }, + "enabled": { + "description": "Enable or disable this trigger" + }, + "type": { + "title": "Type", + "placeholder": "Select trigger type" + }, + "content": { + "title": "Content", + "imagePlaceholder": "Select an image", + "textPlaceholder": "Enter text content", + "imageDesc": "Select an image to trigger this action when a similar image is detected.", + "textDesc": "Enter text to trigger this action when a similar tracked object description is detected.", + "error": { + "required": "Content is required." + } + }, + "threshold": { + "title": "Threshold", + "error": { + "min": "Threshold must be at least 0", + "max": "Threshold must be at most 1" + } + }, + "actions": { + "title": "Actions", + "desc": "By default, Frigate fires an MQTT message for all triggers. Choose an additional action to perform when this trigger fires.", + "error": { + "min": "At least one action must be selected." + } + } + } + }, + "toast": { + "success": { + "createTrigger": "Trigger {{name}} created successfully.", + "updateTrigger": "Trigger {{name}} updated successfully.", + "deleteTrigger": "Trigger {{name}} deleted successfully." + }, + "error": { + "createTriggerFailed": "Failed to create trigger: {{errorMessage}}", + "updateTriggerFailed": "Failed to update trigger: {{errorMessage}}", + "deleteTriggerFailed": "Failed to delete trigger: {{errorMessage}}" + } + } } } diff --git a/web/src/api/ws.tsx b/web/src/api/ws.tsx index 78c596e13..cc3ea05bf 100644 --- a/web/src/api/ws.tsx +++ b/web/src/api/ws.tsx @@ -9,6 +9,7 @@ import { ModelState, ToggleableSetting, TrackedObjectUpdateReturnType, + TriggerStatus, } from "@/types/ws"; import { FrigateStats } from "@/types/stats"; import { createContainer } from "react-tracked"; @@ -572,3 +573,13 @@ export function useNotificationTest(): { } = useWs("notification_test", "notification_test"); return { payload: payload as string, send }; } + +export function useTriggers(): { payload: TriggerStatus } { + const { + value: { payload }, + } = useWs("triggers", ""); + const parsed = payload + ? JSON.parse(payload as string) + : { name: "", camera: "", event_id: "", type: "", score: 0 }; + return { payload: useDeepMemo(parsed) }; +} diff --git a/web/src/components/card/SearchThumbnailFooter.tsx b/web/src/components/card/SearchThumbnailFooter.tsx index c86e9c3c6..e23d1c3f6 100644 --- a/web/src/components/card/SearchThumbnailFooter.tsx +++ b/web/src/components/card/SearchThumbnailFooter.tsx @@ -15,6 +15,7 @@ type SearchThumbnailProps = { refreshResults: () => void; showObjectLifecycle: () => void; showSnapshot: () => void; + addTrigger: () => void; }; export default function SearchThumbnailFooter({ @@ -24,6 +25,7 @@ export default function SearchThumbnailFooter({ refreshResults, showObjectLifecycle, showSnapshot, + addTrigger, }: SearchThumbnailProps) { const { t } = useTranslation(["views/search"]); const { data: config } = useSWR("config"); @@ -61,6 +63,7 @@ export default function SearchThumbnailFooter({ refreshResults={refreshResults} showObjectLifecycle={showObjectLifecycle} showSnapshot={showSnapshot} + addTrigger={addTrigger} />
diff --git a/web/src/components/menu/SearchResultActions.tsx b/web/src/components/menu/SearchResultActions.tsx index 1779430f0..2c928becf 100644 --- a/web/src/components/menu/SearchResultActions.tsx +++ b/web/src/components/menu/SearchResultActions.tsx @@ -41,6 +41,7 @@ import { import useSWR from "swr"; import { Trans, useTranslation } from "react-i18next"; +import { BsFillLightningFill } from "react-icons/bs"; type SearchResultActionsProps = { searchResult: SearchResult; @@ -48,6 +49,7 @@ type SearchResultActionsProps = { refreshResults: () => void; showObjectLifecycle: () => void; showSnapshot: () => void; + addTrigger: () => void; isContextMenu?: boolean; children?: ReactNode; }; @@ -58,6 +60,7 @@ export default function SearchResultActions({ refreshResults, showObjectLifecycle, showSnapshot, + addTrigger, isContextMenu = false, children, }: SearchResultActionsProps) { @@ -138,6 +141,16 @@ export default function SearchResultActions({ {t("itemMenu.findSimilar.label")} )} + {config?.semantic_search?.enabled && + searchResult.data.type == "object" && ( + + + {t("itemMenu.addTrigger.label")} + + )} {isMobileOnly && config?.plus?.enabled && searchResult.has_snapshot && diff --git a/web/src/components/overlay/CreateTriggerDialog.tsx b/web/src/components/overlay/CreateTriggerDialog.tsx new file mode 100644 index 000000000..5672c4802 --- /dev/null +++ b/web/src/components/overlay/CreateTriggerDialog.tsx @@ -0,0 +1,416 @@ +import { useEffect, useMemo } from "react"; +import { useTranslation } from "react-i18next"; +import { useForm } from "react-hook-form"; +import { zodResolver } from "@hookform/resolvers/zod"; +import { z } from "zod"; +import useSWR from "swr"; +import { + Dialog, + DialogContent, + DialogDescription, + DialogFooter, + DialogHeader, + DialogTitle, +} from "@/components/ui/dialog"; +import { + Form, + FormControl, + FormDescription, + FormField, + FormItem, + FormLabel, + FormMessage, +} from "@/components/ui/form"; +import { Input } from "@/components/ui/input"; +import { + Select, + SelectContent, + SelectItem, + SelectTrigger, + SelectValue, +} from "@/components/ui/select"; +import { Checkbox } from "@/components/ui/checkbox"; +import { Button } from "@/components/ui/button"; +import ActivityIndicator from "@/components/indicators/activity-indicator"; +import { FrigateConfig } from "@/types/frigateConfig"; +import ImagePicker from "@/components/overlay/ImagePicker"; +import { Trigger, TriggerAction, TriggerType } from "@/types/trigger"; +import { Switch } from "@/components/ui/switch"; +import { Textarea } from "../ui/textarea"; + +type CreateTriggerDialogProps = { + show: boolean; + trigger: Trigger | null; + selectedCamera: string; + isLoading: boolean; + onCreate: ( + enabled: boolean, + name: string, + type: TriggerType, + data: string, + threshold: number, + actions: TriggerAction[], + ) => void; + onEdit: (trigger: Trigger) => void; + onCancel: () => void; +}; + +export default function CreateTriggerDialog({ + show, + trigger, + selectedCamera, + isLoading, + onCreate, + onEdit, + onCancel, +}: CreateTriggerDialogProps) { + const { t } = useTranslation("views/settings"); + const { data: config } = useSWR("config"); + + const existingTriggerNames = useMemo(() => { + if ( + !config || + !selectedCamera || + !config.cameras[selectedCamera]?.semantic_search?.triggers + ) { + return []; + } + return Object.keys(config.cameras[selectedCamera].semantic_search.triggers); + }, [config, selectedCamera]); + + const formSchema = z.object({ + enabled: z.boolean(), + name: z + .string() + .min(2, t("triggers.dialog.form.name.error.minLength")) + .regex( + /^[a-zA-Z0-9_-]+$/, + t("triggers.dialog.form.name.error.invalidCharacters"), + ) + .refine( + (value) => + !existingTriggerNames.includes(value) || value === trigger?.name, + t("triggers.dialog.form.name.error.alreadyExists"), + ), + type: z.enum(["thumbnail", "description"]), + data: z.string().min(1, t("triggers.dialog.form.content.error.required")), + threshold: z + .number() + .min(0, t("triggers.dialog.form.threshold.error.min")) + .max(1, t("triggers.dialog.form.threshold.error.max")), + actions: z.array(z.enum(["notification"])), + }); + + const form = useForm>({ + resolver: zodResolver(formSchema), + mode: "onChange", + defaultValues: { + enabled: trigger?.enabled ?? true, + name: trigger?.name ?? "", + type: trigger?.type ?? "description", + data: trigger?.data ?? "", + threshold: trigger?.threshold ?? 0.5, + actions: trigger?.actions ?? [], + }, + }); + + const onSubmit = async (values: z.infer) => { + if (trigger) { + onEdit({ ...values }); + } else { + onCreate( + values.enabled, + values.name, + values.type, + values.data, + values.threshold, + values.actions, + ); + } + }; + + useEffect(() => { + if (!show) { + form.reset({ + enabled: true, + name: "", + type: "description", + data: "", + threshold: 0.5, + actions: [], + }); + } else if (trigger) { + form.reset( + { + enabled: trigger.enabled, + name: trigger.name, + type: trigger.type, + data: trigger.data, + threshold: trigger.threshold, + actions: trigger.actions, + }, + { keepDirty: false, keepTouched: false }, // Reset validation state + ); + // Trigger validation to ensure isValid updates + // form.trigger(); + } + }, [show, trigger, form]); + + const handleCancel = () => { + form.reset(); + onCancel(); + }; + + return ( + + + + + {t( + trigger + ? "triggers.dialog.editTrigger.title" + : "triggers.dialog.createTrigger.title", + )} + + + {t( + trigger + ? "triggers.dialog.editTrigger.desc" + : "triggers.dialog.createTrigger.desc", + { camera: selectedCamera }, + )} + + + +
+ + ( + + {t("triggers.dialog.form.name.title")} + + + + + + )} + /> + + ( + +
+ + {t("enabled", { ns: "common" })} + +
+ {t("triggers.dialog.form.enabled.description")} +
+
+ + + +
+ )} + /> + + ( + + {t("triggers.dialog.form.type.title")} + + + + )} + /> + + ( + + + {t("triggers.dialog.form.content.title")} + + {form.watch("type") === "thumbnail" ? ( + <> + + + + + {t("triggers.dialog.form.content.imageDesc")} + + + ) : ( + <> + +