From c54259ecc6d94a44e56c32f5749a2adda3d6065c Mon Sep 17 00:00:00 2001 From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> Date: Tue, 11 Feb 2025 07:56:15 -0600 Subject: [PATCH 01/21] use persistence for hls player muting (#16481) --- web/src/components/player/HlsVideoPlayer.tsx | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/web/src/components/player/HlsVideoPlayer.tsx b/web/src/components/player/HlsVideoPlayer.tsx index 0661fb0c9..6c04bb6dd 100644 --- a/web/src/components/player/HlsVideoPlayer.tsx +++ b/web/src/components/player/HlsVideoPlayer.tsx @@ -144,7 +144,7 @@ export default function HlsVideoPlayer({ const [tallCamera, setTallCamera] = useState(false); const [isPlaying, setIsPlaying] = useState(true); - const [muted, setMuted] = useOverlayState("playerMuted", true); + const [muted, setMuted] = usePersistence("hlsPlayerMuted", true); const [volume, setVolume] = useOverlayState("playerVolume", 1.0); const [defaultPlaybackRate] = usePersistence("playbackRate", 1); const [playbackRate, setPlaybackRate] = useOverlayState( @@ -211,7 +211,7 @@ export default function HlsVideoPlayer({ fullscreen: supportsFullscreen, }} setControlsOpen={setControlsOpen} - setMuted={(muted) => setMuted(muted, true)} + setMuted={(muted) => setMuted(muted)} playbackRate={playbackRate ?? 1} hotKeys={hotKeys} onPlayPause={onPlayPause} @@ -280,9 +280,12 @@ export default function HlsVideoPlayer({ } : undefined } - onVolumeChange={() => - setVolume(videoRef.current?.volume ?? 1.0, true) - } + onVolumeChange={() => { + setVolume(videoRef.current?.volume ?? 1.0, true); + if (!frigateControls) { + setMuted(videoRef.current?.muted); + } + }} onPlay={() => { setIsPlaying(true); From 82f86944642be22eb9d90f92f2100e258ace1633 Mon Sep 17 00:00:00 2001 From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> Date: Tue, 11 Feb 2025 08:46:25 -0600 Subject: [PATCH 02/21] Toggle review alerts and detections (#16482) * backend * frontend * docs * fix topic name and initial websocket state * update reference config * fix mqtt docs * fix initial topics * don't apply max severity when alerts/detections are disabled * fix ws merge * tweaks --- docs/docs/configuration/reference.md | 4 + docs/docs/integrations/mqtt.md | 16 ++ frigate/comms/dispatcher.py | 48 ++++++ frigate/comms/mqtt.py | 10 ++ frigate/config/camera/review.py | 12 ++ frigate/config/config.py | 6 + frigate/events/maintainer.py | 2 +- frigate/review/maintainer.py | 137 ++++++++++++------ frigate/track/tracked_object.py | 25 +++- web/src/api/ws.tsx | 31 ++++ web/src/types/frigateConfig.ts | 2 + web/src/views/settings/CameraSettingsView.tsx | 49 +++++++ 12 files changed, 290 insertions(+), 52 deletions(-) diff --git a/docs/docs/configuration/reference.md b/docs/docs/configuration/reference.md index 65a0f4825..ce16042b4 100644 --- a/docs/docs/configuration/reference.md +++ b/docs/docs/configuration/reference.md @@ -340,6 +340,8 @@ objects: review: # Optional: alerts configuration alerts: + # Optional: enables alerts for the camera (default: shown below) + enabled: True # Optional: labels that qualify as an alert (default: shown below) labels: - car @@ -352,6 +354,8 @@ review: - driveway # Optional: detections configuration detections: + # Optional: enables detections for the camera (default: shown below) + enabled: True # Optional: labels that qualify as a detection (default: all labels that are tracked / listened to) labels: - car diff --git a/docs/docs/integrations/mqtt.md b/docs/docs/integrations/mqtt.md index c344a5aaa..4eaf61919 100644 --- a/docs/docs/integrations/mqtt.md +++ b/docs/docs/integrations/mqtt.md @@ -316,6 +316,22 @@ Topic with current state of the PTZ autotracker for a camera. Published values a Topic to determine if PTZ autotracker is actively tracking an object. Published values are `ON` and `OFF`. +### `frigate//review_alerts/set` + +Topic to turn review alerts for a camera on or off. Expected values are `ON` and `OFF`. + +### `frigate//review_alerts/state` + +Topic with current state of review alerts for a camera. Published values are `ON` and `OFF`. + +### `frigate//review_detections/set` + +Topic to turn review detections for a camera on or off. Expected values are `ON` and `OFF`. + +### `frigate//review_detections/state` + +Topic with current state of review detections for a camera. Published values are `ON` and `OFF`. + ### `frigate//birdseye/set` Topic to turn Birdseye for a camera on and off. Expected values are `ON` and `OFF`. Birdseye mode diff --git a/frigate/comms/dispatcher.py b/frigate/comms/dispatcher.py index e0c2d96e3..61530d086 100644 --- a/frigate/comms/dispatcher.py +++ b/frigate/comms/dispatcher.py @@ -65,6 +65,8 @@ class Dispatcher: "snapshots": self._on_snapshots_command, "birdseye": self._on_birdseye_command, "birdseye_mode": self._on_birdseye_mode_command, + "review_alerts": self._on_alerts_command, + "review_detections": self._on_detections_command, } self._global_settings_handlers: dict[str, Callable] = { "notifications": self._on_global_notification_command, @@ -178,6 +180,8 @@ class Dispatcher: "autotracking": self.config.cameras[ camera ].onvif.autotracking.enabled, + "alerts": self.config.cameras[camera].review.alerts.enabled, + "detections": self.config.cameras[camera].review.detections.enabled, } self.publish("camera_activity", json.dumps(camera_status)) @@ -565,3 +569,47 @@ class Dispatcher: ), retain=True, ) + + def _on_alerts_command(self, camera_name: str, payload: str) -> None: + """Callback for alerts topic.""" + review_settings = self.config.cameras[camera_name].review + + if payload == "ON": + if not self.config.cameras[camera_name].review.alerts.enabled_in_config: + logger.error( + "Alerts must be enabled in the config to be turned on via MQTT." + ) + return + + if not review_settings.alerts.enabled: + logger.info(f"Turning on alerts for {camera_name}") + review_settings.alerts.enabled = True + elif payload == "OFF": + if review_settings.alerts.enabled: + logger.info(f"Turning off alerts for {camera_name}") + review_settings.alerts.enabled = False + + self.config_updater.publish(f"config/review/{camera_name}", review_settings) + self.publish(f"{camera_name}/review_alerts/state", payload, retain=True) + + def _on_detections_command(self, camera_name: str, payload: str) -> None: + """Callback for detections topic.""" + review_settings = self.config.cameras[camera_name].review + + if payload == "ON": + if not self.config.cameras[camera_name].review.detections.enabled_in_config: + logger.error( + "Detections must be enabled in the config to be turned on via MQTT." + ) + return + + if not review_settings.detections.enabled: + logger.info(f"Turning on detections for {camera_name}") + review_settings.detections.enabled = True + elif payload == "OFF": + if review_settings.detections.enabled: + logger.info(f"Turning off detections for {camera_name}") + review_settings.detections.enabled = False + + self.config_updater.publish(f"config/review/{camera_name}", review_settings) + self.publish(f"{camera_name}/review_detections/state", payload, retain=True) diff --git a/frigate/comms/mqtt.py b/frigate/comms/mqtt.py index 57460b29b..9e11a0af1 100644 --- a/frigate/comms/mqtt.py +++ b/frigate/comms/mqtt.py @@ -107,6 +107,16 @@ class MqttClient(Communicator): # type: ignore[misc] ), retain=True, ) + self.publish( + f"{camera_name}/review_alerts/state", + "ON" if camera.review.alerts.enabled_in_config else "OFF", + retain=True, + ) + self.publish( + f"{camera_name}/review_detections/state", + "ON" if camera.review.detections.enabled_in_config else "OFF", + retain=True, + ) if self.config.notifications.enabled_in_config: self.publish( diff --git a/frigate/config/camera/review.py b/frigate/config/camera/review.py index 549c37db4..d8d26edb9 100644 --- a/frigate/config/camera/review.py +++ b/frigate/config/camera/review.py @@ -13,6 +13,8 @@ DEFAULT_ALERT_OBJECTS = ["person", "car"] class AlertsConfig(FrigateBaseModel): """Configure alerts""" + enabled: bool = Field(default=True, title="Enable alerts.") + labels: list[str] = Field( default=DEFAULT_ALERT_OBJECTS, title="Labels to create alerts for." ) @@ -21,6 +23,10 @@ class AlertsConfig(FrigateBaseModel): title="List of required zones to be entered in order to save the event as an alert.", ) + enabled_in_config: Optional[bool] = Field( + default=None, title="Keep track of original state of alerts." + ) + @field_validator("required_zones", mode="before") @classmethod def validate_required_zones(cls, v): @@ -33,6 +39,8 @@ class AlertsConfig(FrigateBaseModel): class DetectionsConfig(FrigateBaseModel): """Configure detections""" + enabled: bool = Field(default=True, title="Enable detections.") + labels: Optional[list[str]] = Field( default=None, title="Labels to create detections for." ) @@ -41,6 +49,10 @@ class DetectionsConfig(FrigateBaseModel): title="List of required zones to be entered in order to save the event as a detection.", ) + enabled_in_config: Optional[bool] = Field( + default=None, title="Keep track of original state of detections." + ) + @field_validator("required_zones", mode="before") @classmethod def validate_required_zones(cls, v): diff --git a/frigate/config/config.py b/frigate/config/config.py index aea41a7bc..39ee31411 100644 --- a/frigate/config/config.py +++ b/frigate/config/config.py @@ -534,6 +534,12 @@ class FrigateConfig(FrigateBaseModel): camera_config.onvif.autotracking.enabled_in_config = ( camera_config.onvif.autotracking.enabled ) + camera_config.review.alerts.enabled_in_config = ( + camera_config.review.alerts.enabled + ) + camera_config.review.detections.enabled_in_config = ( + camera_config.review.detections.enabled + ) # Add default filters object_keys = camera_config.objects.track diff --git a/frigate/events/maintainer.py b/frigate/events/maintainer.py index ebc506c73..d49da5a97 100644 --- a/frigate/events/maintainer.py +++ b/frigate/events/maintainer.py @@ -187,7 +187,7 @@ class EventProcessor(threading.Thread): ) # keep these from being set back to false because the event - # may have started while recordings and snapshots were enabled + # may have started while recordings/snapshots/alerts/detections were enabled # this would be an issue for long running events if self.events_in_process[event_data["id"]]["has_clip"]: event_data["has_clip"] = True diff --git a/frigate/review/maintainer.py b/frigate/review/maintainer.py index c99479a67..158bc3ac4 100644 --- a/frigate/review/maintainer.py +++ b/frigate/review/maintainer.py @@ -148,7 +148,8 @@ class ReviewSegmentMaintainer(threading.Thread): # create communication for review segments self.requestor = InterProcessRequestor() - self.config_subscriber = ConfigSubscriber("config/record/") + self.record_config_subscriber = ConfigSubscriber("config/record/") + self.review_config_subscriber = ConfigSubscriber("config/review/") self.detection_subscriber = DetectionSubscriber(DetectionTypeEnum.all) # manual events @@ -226,6 +227,13 @@ class ReviewSegmentMaintainer(threading.Thread): ) self.active_review_segments[segment.camera] = None + def end_segment(self, camera: str) -> None: + """End the pending segment for a camera.""" + segment = self.active_review_segments.get(camera) + if segment: + prev_data = segment.get_data(False) + self._publish_segment_end(segment, prev_data) + def update_existing_segment( self, segment: PendingReviewSegment, @@ -273,6 +281,7 @@ class ReviewSegmentMaintainer(threading.Thread): & set(camera_config.review.alerts.required_zones) ) ) + and camera_config.review.alerts.enabled ): segment.severity = SeverityEnum.alert should_update = True @@ -369,13 +378,14 @@ class ReviewSegmentMaintainer(threading.Thread): & set(camera_config.review.alerts.required_zones) ) ) + and camera_config.review.alerts.enabled ): severity = SeverityEnum.alert # if object is detection label # and review is not already a detection or alert # and has entered required zones or required zones is not set - # mark this review as alert + # mark this review as detection if ( not severity and ( @@ -390,6 +400,7 @@ class ReviewSegmentMaintainer(threading.Thread): & set(camera_config.review.detections.required_zones) ) ) + and camera_config.review.detections.enabled ): severity = SeverityEnum.detection @@ -430,15 +441,25 @@ class ReviewSegmentMaintainer(threading.Thread): # check if there is an updated config while True: ( - updated_topic, + updated_record_topic, updated_record_config, - ) = self.config_subscriber.check_for_update() + ) = self.record_config_subscriber.check_for_update() - if not updated_topic: + ( + updated_review_topic, + updated_review_config, + ) = self.review_config_subscriber.check_for_update() + + if not updated_record_topic and not updated_review_topic: break - camera_name = updated_topic.rpartition("/")[-1] - self.config.cameras[camera_name].record = updated_record_config + if updated_record_topic: + camera_name = updated_record_topic.rpartition("/")[-1] + self.config.cameras[camera_name].record = updated_record_config + + if updated_review_topic: + camera_name = updated_review_topic.rpartition("/")[-1] + self.config.cameras[camera_name].review = updated_review_config (topic, data) = self.detection_subscriber.check_for_update(timeout=1) @@ -475,12 +496,22 @@ class ReviewSegmentMaintainer(threading.Thread): if not self.config.cameras[camera].record.enabled: if current_segment: - self.update_existing_segment( - current_segment, frame_name, frame_time, [] - ) - + self.end_segment(camera) continue + # Check if the current segment should be processed based on enabled settings + if current_segment: + if ( + current_segment.severity == SeverityEnum.alert + and not self.config.cameras[camera].review.alerts.enabled + ) or ( + current_segment.severity == SeverityEnum.detection + and not self.config.cameras[camera].review.detections.enabled + ): + self.end_segment(camera) + continue + + # If we reach here, the segment can be processed (if it exists) if current_segment is not None: if topic == DetectionTypeEnum.video: self.update_existing_segment( @@ -496,20 +527,24 @@ class ReviewSegmentMaintainer(threading.Thread): current_segment.last_update = frame_time for audio in audio_detections: - if audio in camera_config.review.alerts.labels: + if ( + audio in camera_config.review.alerts.labels + and camera_config.review.alerts.enabled + ): current_segment.audio.add(audio) current_segment.severity = SeverityEnum.alert elif ( camera_config.review.detections.labels is None or audio in camera_config.review.detections.labels - ): + ) and camera_config.review.detections.enabled: current_segment.audio.add(audio) elif topic == DetectionTypeEnum.api: if manual_info["state"] == ManualEventState.complete: current_segment.detections[manual_info["event_id"]] = ( manual_info["label"] ) - current_segment.severity = SeverityEnum.alert + if self.config.cameras[camera].review.alerts.enabled: + current_segment.severity = SeverityEnum.alert current_segment.last_update = manual_info["end_time"] elif manual_info["state"] == ManualEventState.start: self.indefinite_events[camera][manual_info["event_id"]] = ( @@ -518,7 +553,8 @@ class ReviewSegmentMaintainer(threading.Thread): current_segment.detections[manual_info["event_id"]] = ( manual_info["label"] ) - current_segment.severity = SeverityEnum.alert + if self.config.cameras[camera].review.alerts.enabled: + current_segment.severity = SeverityEnum.alert # temporarily make it so this event can not end current_segment.last_update = sys.maxsize @@ -536,12 +572,16 @@ class ReviewSegmentMaintainer(threading.Thread): ) else: if topic == DetectionTypeEnum.video: - self.check_if_new_segment( - camera, - frame_name, - frame_time, - current_tracked_objects, - ) + if ( + self.config.cameras[camera].review.alerts.enabled + or self.config.cameras[camera].review.detections.enabled + ): + self.check_if_new_segment( + camera, + frame_name, + frame_time, + current_tracked_objects, + ) elif topic == DetectionTypeEnum.audio and len(audio_detections) > 0: severity = None @@ -549,13 +589,16 @@ class ReviewSegmentMaintainer(threading.Thread): detections = set() for audio in audio_detections: - if audio in camera_config.review.alerts.labels: + if ( + audio in camera_config.review.alerts.labels + and camera_config.review.alerts.enabled + ): detections.add(audio) severity = SeverityEnum.alert elif ( camera_config.review.detections.labels is None or audio in camera_config.review.detections.labels - ): + ) and camera_config.review.detections.enabled: detections.add(audio) if not severity: @@ -572,28 +615,36 @@ class ReviewSegmentMaintainer(threading.Thread): detections, ) elif topic == DetectionTypeEnum.api: - self.active_review_segments[camera] = PendingReviewSegment( - camera, - frame_time, - SeverityEnum.alert, - {manual_info["event_id"]: manual_info["label"]}, - {}, - [], - set(), - ) - - if manual_info["state"] == ManualEventState.start: - self.indefinite_events[camera][manual_info["event_id"]] = ( - manual_info["label"] + if self.config.cameras[camera].review.alerts.enabled: + self.active_review_segments[camera] = PendingReviewSegment( + camera, + frame_time, + SeverityEnum.alert, + {manual_info["event_id"]: manual_info["label"]}, + {}, + [], + set(), ) - # temporarily make it so this event can not end - self.active_review_segments[camera].last_update = sys.maxsize - elif manual_info["state"] == ManualEventState.complete: - self.active_review_segments[camera].last_update = manual_info[ - "end_time" - ] - self.config_subscriber.stop() + if manual_info["state"] == ManualEventState.start: + self.indefinite_events[camera][manual_info["event_id"]] = ( + manual_info["label"] + ) + # temporarily make it so this event can not end + self.active_review_segments[ + camera + ].last_update = sys.maxsize + elif manual_info["state"] == ManualEventState.complete: + self.active_review_segments[ + camera + ].last_update = manual_info["end_time"] + else: + logger.warning( + f"Manual event API has been called for {camera}, but alerts are disabled. This manual event will not appear as an alert." + ) + + self.record_config_subscriber.stop() + self.review_config_subscriber.stop() self.requestor.stop() self.detection_subscriber.stop() logger.info("Exiting review maintainer...") diff --git a/frigate/track/tracked_object.py b/frigate/track/tracked_object.py index ea1aeedcb..ac57083df 100644 --- a/frigate/track/tracked_object.py +++ b/frigate/track/tracked_object.py @@ -72,18 +72,27 @@ class TrackedObject: def max_severity(self) -> Optional[str]: review_config = self.camera_config.review - if self.obj_data["label"] in review_config.alerts.labels and ( - not review_config.alerts.required_zones - or set(self.entered_zones) & set(review_config.alerts.required_zones) + if ( + self.camera_config.review.alerts.enabled + and self.obj_data["label"] in review_config.alerts.labels + and ( + not review_config.alerts.required_zones + or set(self.entered_zones) & set(review_config.alerts.required_zones) + ) ): return SeverityEnum.alert if ( - not review_config.detections.labels - or self.obj_data["label"] in review_config.detections.labels - ) and ( - not review_config.detections.required_zones - or set(self.entered_zones) & set(review_config.detections.required_zones) + self.camera_config.review.detections.enabled + and ( + not review_config.detections.labels + or self.obj_data["label"] in review_config.detections.labels + ) + and ( + not review_config.detections.required_zones + or set(self.entered_zones) + & set(review_config.detections.required_zones) + ) ): return SeverityEnum.detection diff --git a/web/src/api/ws.tsx b/web/src/api/ws.tsx index 3ac7c9fee..a8cedf953 100644 --- a/web/src/api/ws.tsx +++ b/web/src/api/ws.tsx @@ -61,6 +61,8 @@ function useValue(): useValueReturn { notifications, notifications_suspended, autotracking, + alerts, + detections, } = // @ts-expect-error we know this is correct state["config"]; @@ -76,6 +78,10 @@ function useValue(): useValueReturn { cameraStates[`${name}/ptz_autotracker/state`] = autotracking ? "ON" : "OFF"; + cameraStates[`${name}/review_alerts/state`] = alerts ? "ON" : "OFF"; + cameraStates[`${name}/review_detections/state`] = detections + ? "ON" + : "OFF"; }); setWsState((prevState) => ({ @@ -213,6 +219,31 @@ export function useAutotrackingState(camera: string): { return { payload: payload as ToggleableSetting, send }; } +export function useAlertsState(camera: string): { + payload: ToggleableSetting; + send: (payload: ToggleableSetting, retain?: boolean) => void; +} { + const { + value: { payload }, + send, + } = useWs(`${camera}/review_alerts/state`, `${camera}/review_alerts/set`); + return { payload: payload as ToggleableSetting, send }; +} + +export function useDetectionsState(camera: string): { + payload: ToggleableSetting; + send: (payload: ToggleableSetting, retain?: boolean) => void; +} { + const { + value: { payload }, + send, + } = useWs( + `${camera}/review_detections/state`, + `${camera}/review_detections/set`, + ); + return { payload: payload as ToggleableSetting, send }; +} + export function usePtzCommand(camera: string): { payload: string; send: (payload: string, retain?: boolean) => void; diff --git a/web/src/types/frigateConfig.ts b/web/src/types/frigateConfig.ts index 073308d58..263883976 100644 --- a/web/src/types/frigateConfig.ts +++ b/web/src/types/frigateConfig.ts @@ -179,6 +179,7 @@ export interface CameraConfig { }; review: { alerts: { + enabled: boolean; required_zones: string[]; labels: string[]; retain: { @@ -187,6 +188,7 @@ export interface CameraConfig { }; }; detections: { + enabled: boolean; required_zones: string[]; labels: string[]; retain: { diff --git a/web/src/views/settings/CameraSettingsView.tsx b/web/src/views/settings/CameraSettingsView.tsx index 30c6229e1..fa9d0ba58 100644 --- a/web/src/views/settings/CameraSettingsView.tsx +++ b/web/src/views/settings/CameraSettingsView.tsx @@ -27,6 +27,9 @@ import { LuExternalLink } from "react-icons/lu"; import { capitalizeFirstLetter } from "@/utils/stringUtil"; import { MdCircle } from "react-icons/md"; import { cn } from "@/lib/utils"; +import { Switch } from "@/components/ui/switch"; +import { Label } from "@/components/ui/label"; +import { useAlertsState, useDetectionsState } from "@/api/ws"; type CameraSettingsViewProps = { selectedCamera: string; @@ -105,6 +108,11 @@ export default function CameraSettingsView({ const watchedAlertsZones = form.watch("alerts_zones"); const watchedDetectionsZones = form.watch("detections_zones"); + const { payload: alertsState, send: sendAlerts } = + useAlertsState(selectedCamera); + const { payload: detectionsState, send: sendDetections } = + useDetectionsState(selectedCamera); + const handleCheckedChange = useCallback( (isChecked: boolean) => { if (!isChecked) { @@ -244,6 +252,47 @@ export default function CameraSettingsView({ + + Review + + +
+
+ { + sendAlerts(isChecked ? "ON" : "OFF"); + }} + /> +
+ +
+
+
+
+ { + sendDetections(isChecked ? "ON" : "OFF"); + }} + /> +
+ +
+
+
+ Enable/disable alerts and detections for this camera. When + disabled, no new review items will be generated. +
+
+
+ + + Review Classification From 4ef621402915d70920c085253b82e587cdf44124 Mon Sep 17 00:00:00 2001 From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> Date: Tue, 11 Feb 2025 09:37:58 -0600 Subject: [PATCH 03/21] Tracking improvements (#16484) * norfair tracker config per object type * change default R back to 3.4 * separate trackers for static and autotracking cameras * tweak params and fix debug draw * ensure all trackers are correctly updated even when there are no detections * basic reid with histograms * check mp value * check mp value again * stationary objects won't have embeddings * don't switch trackers when autotracking is toggled after startup * improve motion detection during autotracking * use helper function * get histogram in tracker instead of detect --- frigate/motion/improved_motion.py | 38 ++++ frigate/ptz/onvif.py | 1 - frigate/track/norfair_tracker.py | 316 ++++++++++++++++++++++++++---- frigate/util/image.py | 10 + frigate/video.py | 15 +- 5 files changed, 327 insertions(+), 53 deletions(-) diff --git a/frigate/motion/improved_motion.py b/frigate/motion/improved_motion.py index 297337560..d865cc92d 100644 --- a/frigate/motion/improved_motion.py +++ b/frigate/motion/improved_motion.py @@ -5,6 +5,7 @@ import imutils import numpy as np from scipy.ndimage import gaussian_filter +from frigate.camera import PTZMetrics from frigate.comms.config_updater import ConfigSubscriber from frigate.config import MotionConfig from frigate.motion import MotionDetector @@ -18,6 +19,7 @@ class ImprovedMotionDetector(MotionDetector): frame_shape, config: MotionConfig, fps: int, + ptz_metrics: PTZMetrics = None, name="improved", blur_radius=1, interpolation=cv2.INTER_NEAREST, @@ -48,6 +50,8 @@ class ImprovedMotionDetector(MotionDetector): self.contrast_values[:, 1:2] = 255 self.contrast_values_index = 0 self.config_subscriber = ConfigSubscriber(f"config/motion/{name}") + self.ptz_metrics = ptz_metrics + self.last_stop_time = None def is_calibrating(self): return self.calibrating @@ -64,6 +68,21 @@ class ImprovedMotionDetector(MotionDetector): if not self.config.enabled: return motion_boxes + # if ptz motor is moving from autotracking, quickly return + # a single box that is 80% of the frame + if ( + self.ptz_metrics.autotracker_enabled.value + and not self.ptz_metrics.motor_stopped.is_set() + ): + return [ + ( + int(self.frame_shape[1] * 0.1), + int(self.frame_shape[0] * 0.1), + int(self.frame_shape[1] * 0.9), + int(self.frame_shape[0] * 0.9), + ) + ] + gray = frame[0 : self.frame_shape[0], 0 : self.frame_shape[1]] # resize frame @@ -151,6 +170,25 @@ class ImprovedMotionDetector(MotionDetector): self.motion_frame_size[0] * self.motion_frame_size[1] ) + # check if the motor has just stopped from autotracking + # if so, reassign the average to the current frame so we begin with a new baseline + if ( + # ensure we only do this for cameras with autotracking enabled + self.ptz_metrics.autotracker_enabled.value + and self.ptz_metrics.motor_stopped.is_set() + and ( + self.last_stop_time is None + or self.ptz_metrics.stop_time.value != self.last_stop_time + ) + # value is 0 on startup or when motor is moving + and self.ptz_metrics.stop_time.value != 0 + ): + self.last_stop_time = self.ptz_metrics.stop_time.value + + self.avg_frame = resized_frame.astype(np.float32) + motion_boxes = [] + pct_motion = 0 + # once the motion is less than 5% and the number of contours is < 4, assume its calibrated if pct_motion < 0.05 and len(motion_boxes) <= 4: self.calibrating = False diff --git a/frigate/ptz/onvif.py b/frigate/ptz/onvif.py index 7969a169b..1a813c799 100644 --- a/frigate/ptz/onvif.py +++ b/frigate/ptz/onvif.py @@ -465,7 +465,6 @@ class OnvifController: return self.cams[camera_name]["active"] = True - self.ptz_metrics[camera_name].motor_stopped.clear() self.ptz_metrics[camera_name].start_time.value = 0 self.ptz_metrics[camera_name].stop_time.value = 0 move_request = self.cams[camera_name]["move_request"] diff --git a/frigate/track/norfair_tracker.py b/frigate/track/norfair_tracker.py index 67950bd0c..d168bfe94 100644 --- a/frigate/track/norfair_tracker.py +++ b/frigate/track/norfair_tracker.py @@ -1,7 +1,9 @@ import logging import random import string +from typing import Sequence +import cv2 import numpy as np from norfair import ( Detection, @@ -11,12 +13,19 @@ from norfair import ( draw_boxes, ) from norfair.drawing.drawer import Drawer +from rich import print +from rich.console import Console +from rich.table import Table from frigate.camera import PTZMetrics from frigate.config import CameraConfig from frigate.ptz.autotrack import PtzMotionEstimator from frigate.track import ObjectTracker -from frigate.util.image import intersection_over_union +from frigate.util.image import ( + SharedMemoryFrameManager, + get_histogram, + intersection_over_union, +) from frigate.util.object import average_boxes, median_of_boxes logger = logging.getLogger(__name__) @@ -71,12 +80,36 @@ def frigate_distance(detection: Detection, tracked_object) -> float: return distance(detection.points, tracked_object.estimate) +def histogram_distance(matched_not_init_trackers, unmatched_trackers): + snd_embedding = unmatched_trackers.last_detection.embedding + + if snd_embedding is None: + for detection in reversed(unmatched_trackers.past_detections): + if detection.embedding is not None: + snd_embedding = detection.embedding + break + else: + return 1 + + for detection_fst in matched_not_init_trackers.past_detections: + if detection_fst.embedding is None: + continue + + distance = 1 - cv2.compareHist( + snd_embedding, detection_fst.embedding, cv2.HISTCMP_CORREL + ) + if distance < 0.5: + return distance + return 1 + + class NorfairTracker(ObjectTracker): def __init__( self, config: CameraConfig, ptz_metrics: PTZMetrics, ): + self.frame_manager = SharedMemoryFrameManager() self.tracked_objects = {} self.untracked_object_boxes: list[list[int]] = [] self.disappeared = {} @@ -88,26 +121,137 @@ class NorfairTracker(ObjectTracker): self.ptz_motion_estimator = {} self.camera_name = config.name self.track_id_map = {} - # TODO: could also initialize a tracker per object class if there - # was a good reason to have different distance calculations - self.tracker = Tracker( - distance_function=frigate_distance, - distance_threshold=2.5, - initialization_delay=self.detect_config.min_initialized, - hit_counter_max=self.detect_config.max_disappeared, - # use default filter factory with custom values - # R is the multiplier for the sensor measurement noise matrix, default of 4.0 - # lowering R means that we trust the position of the bounding boxes more - # testing shows that the prediction was being relied on a bit too much - # TODO: could use different kalman filter values along with - # the different tracker per object class - filter_factory=OptimizedKalmanFilterFactory(R=3.4), - ) + + # Define tracker configurations for static camera + self.object_type_configs = { + "car": { + "filter_factory": OptimizedKalmanFilterFactory(R=3.4, Q=0.03), + "distance_function": frigate_distance, + "distance_threshold": 2.5, + }, + } + + # Define autotracking PTZ-specific configurations + self.ptz_object_type_configs = { + "person": { + "filter_factory": OptimizedKalmanFilterFactory( + R=4.5, + Q=0.25, + ), + "distance_function": frigate_distance, + "distance_threshold": 2, + "past_detections_length": 5, + "reid_distance_function": histogram_distance, + "reid_distance_threshold": 0.5, + "reid_hit_counter_max": 10, + }, + } + + # Default tracker configuration + # use default filter factory with custom values + # R is the multiplier for the sensor measurement noise matrix, default of 4.0 + # lowering R means that we trust the position of the bounding boxes more + # testing shows that the prediction was being relied on a bit too much + self.default_tracker_config = { + "filter_factory": OptimizedKalmanFilterFactory(R=3.4), + "distance_function": frigate_distance, + "distance_threshold": 2.5, + } + + self.default_ptz_tracker_config = { + "filter_factory": OptimizedKalmanFilterFactory(R=4, Q=0.2), + "distance_function": frigate_distance, + "distance_threshold": 3, + } + + self.trackers = {} + # Handle static trackers + for obj_type, tracker_config in self.object_type_configs.items(): + if obj_type in self.camera_config.objects.track: + if obj_type not in self.trackers: + self.trackers[obj_type] = {} + self.trackers[obj_type]["static"] = self._create_tracker( + obj_type, tracker_config + ) + + # Handle PTZ trackers + for obj_type, tracker_config in self.ptz_object_type_configs.items(): + if ( + obj_type in self.camera_config.onvif.autotracking.track + and self.camera_config.onvif.autotracking.enabled_in_config + ): + if obj_type not in self.trackers: + self.trackers[obj_type] = {} + self.trackers[obj_type]["ptz"] = self._create_tracker( + obj_type, tracker_config + ) + + # Initialize default trackers + self.default_tracker = { + "static": Tracker( + distance_function=frigate_distance, + distance_threshold=self.default_tracker_config["distance_threshold"], + initialization_delay=self.detect_config.min_initialized, + hit_counter_max=self.detect_config.max_disappeared, + filter_factory=self.default_tracker_config["filter_factory"], + ), + "ptz": Tracker( + distance_function=frigate_distance, + distance_threshold=self.default_ptz_tracker_config[ + "distance_threshold" + ], + initialization_delay=self.detect_config.min_initialized, + hit_counter_max=self.detect_config.max_disappeared, + filter_factory=self.default_ptz_tracker_config["filter_factory"], + ), + } + if self.ptz_metrics.autotracker_enabled.value: self.ptz_motion_estimator = PtzMotionEstimator( self.camera_config, self.ptz_metrics ) + def _create_tracker(self, obj_type, tracker_config): + """Helper function to create a tracker with given configuration.""" + tracker_params = { + "distance_function": tracker_config["distance_function"], + "distance_threshold": tracker_config["distance_threshold"], + "initialization_delay": self.detect_config.min_initialized, + "hit_counter_max": self.detect_config.max_disappeared, + "filter_factory": tracker_config["filter_factory"], + } + + # Add reid parameters if max_frames is None + if ( + self.detect_config.stationary.max_frames.objects.get( + obj_type, self.detect_config.stationary.max_frames.default + ) + is None + ): + reid_keys = [ + "past_detections_length", + "reid_distance_function", + "reid_distance_threshold", + "reid_hit_counter_max", + ] + tracker_params.update( + {key: tracker_config[key] for key in reid_keys if key in tracker_config} + ) + + return Tracker(**tracker_params) + + def get_tracker(self, object_type: str) -> Tracker: + """Get the appropriate tracker based on object type and camera mode.""" + mode = ( + "ptz" + if self.camera_config.onvif.autotracking.enabled_in_config + and object_type in self.camera_config.onvif.autotracking.track + else "static" + ) + if object_type in self.trackers: + return self.trackers[object_type][mode] + return self.default_tracker[mode] + def register(self, track_id, obj): rand_id = "".join(random.choices(string.ascii_lowercase + string.digits, k=6)) id = f"{obj['frame_time']}-{rand_id}" @@ -116,10 +260,13 @@ class NorfairTracker(ObjectTracker): obj["start_time"] = obj["frame_time"] obj["motionless_count"] = 0 obj["position_changes"] = 0 + + # Get the correct tracker for this object's label + tracker = self.get_tracker(obj["label"]) obj["score_history"] = [ p.data["score"] for p in next( - (o for o in self.tracker.tracked_objects if o.global_id == track_id) + (o for o in tracker.tracked_objects if o.global_id == track_id) ).past_detections ] self.tracked_objects[id] = obj @@ -137,11 +284,25 @@ class NorfairTracker(ObjectTracker): self.stationary_box_history[id] = [] def deregister(self, id, track_id): + obj = self.tracked_objects[id] + del self.tracked_objects[id] del self.disappeared[id] - self.tracker.tracked_objects = [ - o for o in self.tracker.tracked_objects if o.global_id != track_id - ] + + # only manually deregister objects from norfair's list if max_frames is defined + if ( + self.detect_config.stationary.max_frames.objects.get( + obj["label"], self.detect_config.stationary.max_frames.default + ) + is not None + ): + tracker = self.get_tracker(obj["label"]) + tracker.tracked_objects = [ + o + for o in tracker.tracked_objects + if o.global_id != track_id and o.hit_counter < 0 + ] + del self.track_id_map[track_id] # tracks the current position of the object based on the last N bounding boxes @@ -287,9 +448,13 @@ class NorfairTracker(ObjectTracker): def match_and_update( self, frame_name: str, frame_time: float, detections: list[dict[str, any]] ): - norfair_detections = [] - + # Group detections by object type + detections_by_type = {} for obj in detections: + label = obj[0] + if label not in detections_by_type: + detections_by_type[label] = [] + # centroid is used for other things downstream centroid_x = int((obj[2][0] + obj[2][2]) / 2.0) centroid_y = int((obj[2][1] + obj[2][3]) / 2.0) @@ -297,22 +462,32 @@ class NorfairTracker(ObjectTracker): # track based on top,left and bottom,right corners instead of centroid points = np.array([[obj[2][0], obj[2][1]], [obj[2][2], obj[2][3]]]) - norfair_detections.append( - Detection( - points=points, - label=obj[0], - data={ - "label": obj[0], - "score": obj[1], - "box": obj[2], - "area": obj[3], - "ratio": obj[4], - "region": obj[5], - "frame_time": frame_time, - "centroid": (centroid_x, centroid_y), - }, + embedding = None + if self.ptz_metrics.autotracker_enabled.value: + yuv_frame = self.frame_manager.get( + frame_name, self.camera_config.frame_shape_yuv ) + embedding = get_histogram( + yuv_frame, obj[2][0], obj[2][1], obj[2][2], obj[2][3] + ) + + detection = Detection( + points=points, + label=label, + # TODO: stationary objects won't have embeddings + embedding=embedding, + data={ + "label": label, + "score": obj[1], + "box": obj[2], + "area": obj[3], + "ratio": obj[4], + "region": obj[5], + "frame_time": frame_time, + "centroid": (centroid_x, centroid_y), + }, ) + detections_by_type[label].append(detection) coord_transformations = None @@ -327,13 +502,32 @@ class NorfairTracker(ObjectTracker): detections, frame_name, frame_time, self.camera_name ) - tracked_objects = self.tracker.update( - detections=norfair_detections, coord_transformations=coord_transformations + # Update all configured trackers + all_tracked_objects = [] + for label in self.trackers: + tracker = self.get_tracker(label) + tracked_objects = tracker.update( + detections=detections_by_type.get(label, []), + coord_transformations=coord_transformations, + ) + all_tracked_objects.extend(tracked_objects) + + # Collect detections for objects without specific trackers + default_detections = [] + for label, dets in detections_by_type.items(): + if label not in self.trackers: + default_detections.extend(dets) + + # Update default tracker with untracked detections + mode = "ptz" if self.ptz_metrics.autotracker_enabled.value else "static" + tracked_objects = self.default_tracker[mode].update( + detections=default_detections, coord_transformations=coord_transformations ) + all_tracked_objects.extend(tracked_objects) # update or create new tracks active_ids = [] - for t in tracked_objects: + for t in all_tracked_objects: estimate = tuple(t.estimate.flatten().astype(int)) # keep the estimate within the bounds of the image estimate = ( @@ -373,19 +567,55 @@ class NorfairTracker(ObjectTracker): o[2] for o in detections if o[2] not in tracked_object_boxes ] + def print_objects_as_table(self, tracked_objects: Sequence): + """Used for helping in debugging""" + print() + console = Console() + table = Table(show_header=True, header_style="bold magenta") + table.add_column("Id", style="yellow", justify="center") + table.add_column("Age", justify="right") + table.add_column("Hit Counter", justify="right") + table.add_column("Last distance", justify="right") + table.add_column("Init Id", justify="center") + for obj in tracked_objects: + table.add_row( + str(obj.id), + str(obj.age), + str(obj.hit_counter), + f"{obj.last_distance:.4f}" if obj.last_distance is not None else "N/A", + str(obj.initializing_id), + ) + console.print(table) + def debug_draw(self, frame, frame_time): + # Collect all tracked objects from each tracker + all_tracked_objects = [] + + # print a table to the console with norfair tracked object info + if False: + self.print_objects_as_table(self.trackers["person"]["ptz"].tracked_objects) + + # Get tracked objects from type-specific trackers + for object_trackers in self.trackers.values(): + for tracker in object_trackers.values(): + all_tracked_objects.extend(tracker.tracked_objects) + + # Get tracked objects from default trackers + for tracker in self.default_tracker.values(): + all_tracked_objects.extend(tracker.tracked_objects) + active_detections = [ Drawable(id=obj.id, points=obj.last_detection.points, label=obj.label) - for obj in self.tracker.tracked_objects + for obj in all_tracked_objects if obj.last_detection.data["frame_time"] == frame_time ] missing_detections = [ Drawable(id=obj.id, points=obj.last_detection.points, label=obj.label) - for obj in self.tracker.tracked_objects + for obj in all_tracked_objects if obj.last_detection.data["frame_time"] != frame_time ] # draw the estimated bounding box - draw_boxes(frame, self.tracker.tracked_objects, color="green", draw_ids=True) + draw_boxes(frame, all_tracked_objects, color="green", draw_ids=True) # draw the detections that were detected in the current frame draw_boxes(frame, active_detections, color="blue", draw_ids=True) # draw the detections that are missing in the current frame @@ -393,7 +623,7 @@ class NorfairTracker(ObjectTracker): # draw the distance calculation for the last detection # estimate vs detection - for obj in self.tracker.tracked_objects: + for obj in all_tracked_objects: ld = obj.last_detection # bottom right text_anchor = ( diff --git a/frigate/util/image.py b/frigate/util/image.py index 24f523f1e..7e4915821 100644 --- a/frigate/util/image.py +++ b/frigate/util/image.py @@ -949,3 +949,13 @@ def get_image_from_recording( return process.stdout else: return None + + +def get_histogram(image, x_min, y_min, x_max, y_max): + image_bgr = cv2.cvtColor(image, cv2.COLOR_YUV2BGR_I420) + image_bgr = image_bgr[y_min:y_max, x_min:x_max] + + hist = cv2.calcHist( + [image_bgr], [0, 1, 2], None, [8, 8, 8], [0, 256, 0, 256, 0, 256] + ) + return cv2.normalize(hist, hist).flatten() diff --git a/frigate/video.py b/frigate/video.py index cb922500d..f82d86648 100755 --- a/frigate/video.py +++ b/frigate/video.py @@ -435,7 +435,11 @@ def track_camera( object_filters = config.objects.filters motion_detector = ImprovedMotionDetector( - frame_shape, config.motion, config.detect.fps, name=config.name + frame_shape, + config.motion, + config.detect.fps, + name=config.name, + ptz_metrics=ptz_metrics, ) object_detector = RemoteObjectDetector( name, labelmap, detection_queue, result_connection, model_config, stop_event @@ -506,14 +510,7 @@ def detect( height = y_max - y_min area = width * height ratio = width / max(1, height) - det = ( - d[0], - d[1], - (x_min, y_min, x_max, y_max), - area, - ratio, - region, - ) + det = (d[0], d[1], (x_min, y_min, x_max, y_max), area, ratio, region) # apply object filters if is_object_filtered(det, objects_to_track, object_filters): continue From b594f198a95f85b1bf68c7df95179f05e6eef9c2 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Tue, 11 Feb 2025 09:08:13 -0700 Subject: [PATCH 04/21] Consolidate HailoRT into the main Docker Image (#16487) * Simplify main build to include hailo * Update docs * Remove hailo docker build --- .github/workflows/ci.yml | 27 -------------- docker/hailo8l/Dockerfile | 42 ---------------------- docker/hailo8l/h8l.hcl | 34 ------------------ docker/hailo8l/h8l.mk | 15 -------- docker/hailo8l/install_hailort.sh | 19 ---------- docker/hailo8l/requirements-wheels-h8l.txt | 12 ------- docker/main/Dockerfile | 4 +++ docker/main/install_hailort.sh | 14 ++++++++ docker/main/requirements-wheels.txt | 13 +++++++ docs/docs/frigate/installation.md | 2 +- 10 files changed, 32 insertions(+), 150 deletions(-) delete mode 100644 docker/hailo8l/Dockerfile delete mode 100644 docker/hailo8l/h8l.hcl delete mode 100644 docker/hailo8l/h8l.mk delete mode 100755 docker/hailo8l/install_hailort.sh delete mode 100644 docker/hailo8l/requirements-wheels-h8l.txt create mode 100755 docker/main/install_hailort.sh diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index bf5763c00..2046ed100 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -202,33 +202,6 @@ jobs: set: | rk.tags=${{ steps.setup.outputs.image-name }}-rk *.cache-from=type=gha - combined_extra_builds: - runs-on: ubuntu-22.04 - name: Combined Extra Builds - needs: - - amd64_build - - arm64_build - steps: - - name: Check out code - uses: actions/checkout@v4 - with: - persist-credentials: false - - name: Set up QEMU and Buildx - id: setup - uses: ./.github/actions/setup - with: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - - name: Build and push Hailo-8l build - uses: docker/bake-action@v6 - with: - source: . - push: true - targets: h8l - files: docker/hailo8l/h8l.hcl - set: | - h8l.tags=${{ steps.setup.outputs.image-name }}-h8l - *.cache-from=type=registry,ref=${{ steps.setup.outputs.cache-name }}-h8l - *.cache-to=type=registry,ref=${{ steps.setup.outputs.cache-name }}-h8l,mode=max # The majority of users running arm64 are rpi users, so the rpi # build should be the primary arm64 image assemble_default_build: diff --git a/docker/hailo8l/Dockerfile b/docker/hailo8l/Dockerfile deleted file mode 100644 index 33b5b08f0..000000000 --- a/docker/hailo8l/Dockerfile +++ /dev/null @@ -1,42 +0,0 @@ -# syntax=docker/dockerfile:1.6 - -ARG DEBIAN_FRONTEND=noninteractive - -# Build Python wheels -FROM wheels AS h8l-wheels - -RUN python3 -m pip config set global.break-system-packages true -COPY docker/main/requirements-wheels.txt /requirements-wheels.txt -COPY docker/hailo8l/requirements-wheels-h8l.txt /requirements-wheels-h8l.txt - -RUN sed -i "/https:\/\//d" /requirements-wheels.txt - -# Create a directory to store the built wheels -RUN mkdir /h8l-wheels - -# Build the wheels -RUN pip3 wheel --wheel-dir=/h8l-wheels -c /requirements-wheels.txt -r /requirements-wheels-h8l.txt - -FROM wget AS hailort -ARG TARGETARCH -RUN --mount=type=bind,source=docker/hailo8l/install_hailort.sh,target=/deps/install_hailort.sh \ - /deps/install_hailort.sh - -# Use deps as the base image -FROM deps AS h8l-frigate - -# Copy the wheels from the wheels stage -COPY --from=h8l-wheels /h8l-wheels /deps/h8l-wheels -COPY --from=hailort /hailo-wheels /deps/hailo-wheels -COPY --from=hailort /rootfs/ / - -# Install the wheels -RUN python3 -m pip config set global.break-system-packages true -RUN pip3 install -U /deps/h8l-wheels/*.whl -RUN pip3 install -U /deps/hailo-wheels/*.whl - -# Copy base files from the rootfs stage -COPY --from=rootfs / / - -# Set workdir -WORKDIR /opt/frigate/ diff --git a/docker/hailo8l/h8l.hcl b/docker/hailo8l/h8l.hcl deleted file mode 100644 index 91f6d13c6..000000000 --- a/docker/hailo8l/h8l.hcl +++ /dev/null @@ -1,34 +0,0 @@ -target wget { - dockerfile = "docker/main/Dockerfile" - platforms = ["linux/arm64","linux/amd64"] - target = "wget" -} - -target wheels { - dockerfile = "docker/main/Dockerfile" - platforms = ["linux/arm64","linux/amd64"] - target = "wheels" -} - -target deps { - dockerfile = "docker/main/Dockerfile" - platforms = ["linux/arm64","linux/amd64"] - target = "deps" -} - -target rootfs { - dockerfile = "docker/main/Dockerfile" - platforms = ["linux/arm64","linux/amd64"] - target = "rootfs" -} - -target h8l { - dockerfile = "docker/hailo8l/Dockerfile" - contexts = { - wget = "target:wget" - wheels = "target:wheels" - deps = "target:deps" - rootfs = "target:rootfs" - } - platforms = ["linux/arm64","linux/amd64"] -} diff --git a/docker/hailo8l/h8l.mk b/docker/hailo8l/h8l.mk deleted file mode 100644 index 318771802..000000000 --- a/docker/hailo8l/h8l.mk +++ /dev/null @@ -1,15 +0,0 @@ -BOARDS += h8l - -local-h8l: version - docker buildx bake --file=docker/hailo8l/h8l.hcl h8l \ - --set h8l.tags=frigate:latest-h8l \ - --load - -build-h8l: version - docker buildx bake --file=docker/hailo8l/h8l.hcl h8l \ - --set h8l.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-h8l - -push-h8l: build-h8l - docker buildx bake --file=docker/hailo8l/h8l.hcl h8l \ - --set h8l.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-h8l \ - --push \ No newline at end of file diff --git a/docker/hailo8l/install_hailort.sh b/docker/hailo8l/install_hailort.sh deleted file mode 100755 index c0198b34c..000000000 --- a/docker/hailo8l/install_hailort.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash - -set -euxo pipefail - -hailo_version="4.20.0" - -if [[ "${TARGETARCH}" == "amd64" ]]; then - arch="x86_64" -elif [[ "${TARGETARCH}" == "arm64" ]]; then - arch="aarch64" -fi - -wget -qO- "https://github.com/frigate-nvr/hailort/releases/download/v${hailo_version}/hailort-${TARGETARCH}.tar.gz" | - tar -C / -xzf - - -mkdir -p /hailo-wheels - -wget -P /hailo-wheels/ "https://github.com/frigate-nvr/hailort/releases/download/v${hailo_version}/hailort-${hailo_version}-cp311-cp311-linux_${arch}.whl" - diff --git a/docker/hailo8l/requirements-wheels-h8l.txt b/docker/hailo8l/requirements-wheels-h8l.txt deleted file mode 100644 index e125f9e8b..000000000 --- a/docker/hailo8l/requirements-wheels-h8l.txt +++ /dev/null @@ -1,12 +0,0 @@ -appdirs==1.4.* -argcomplete==2.0.* -contextlib2==0.6.* -distlib==0.3.* -filelock==3.8.* -future==0.18.* -importlib-metadata==5.1.* -importlib-resources==5.1.* -netaddr==0.8.* -netifaces==0.10.* -verboselogs==1.7.* -virtualenv==20.17.* diff --git a/docker/main/Dockerfile b/docker/main/Dockerfile index e6dee8380..4c3416789 100644 --- a/docker/main/Dockerfile +++ b/docker/main/Dockerfile @@ -170,6 +170,9 @@ RUN /build_pysqlite3.sh COPY docker/main/requirements-wheels.txt /requirements-wheels.txt RUN pip3 wheel --wheel-dir=/wheels -r /requirements-wheels.txt +# Install HailoRT & Wheels +RUN --mount=type=bind,source=docker/main/install_hailort.sh,target=/deps/install_hailort.sh \ + /deps/install_hailort.sh # Collect deps in a single layer FROM scratch AS deps-rootfs @@ -180,6 +183,7 @@ COPY --from=libusb-build /usr/local/lib /usr/local/lib COPY --from=tempio /rootfs/ / COPY --from=s6-overlay /rootfs/ / COPY --from=models /rootfs/ / +COPY --from=wheels /rootfs/ / COPY docker/main/rootfs/ / diff --git a/docker/main/install_hailort.sh b/docker/main/install_hailort.sh new file mode 100755 index 000000000..e050bd591 --- /dev/null +++ b/docker/main/install_hailort.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +set -euxo pipefail + +hailo_version="4.20.0" + +if [[ "${TARGETARCH}" == "amd64" ]]; then + arch="x86_64" +elif [[ "${TARGETARCH}" == "arm64" ]]; then + arch="aarch64" +fi + +wget -qO- "https://github.com/frigate-nvr/hailort/releases/download/v${hailo_version}/hailort-${TARGETARCH}.tar.gz" | tar -C / -xzf - +wget -P /wheels/ "https://github.com/frigate-nvr/hailort/releases/download/v${hailo_version}/hailort-${hailo_version}-cp311-cp311-linux_${arch}.whl" diff --git a/docker/main/requirements-wheels.txt b/docker/main/requirements-wheels.txt index 40a2f1d8b..ab91c94a7 100644 --- a/docker/main/requirements-wheels.txt +++ b/docker/main/requirements-wheels.txt @@ -54,3 +54,16 @@ pywebpush == 2.0.* pyclipper == 1.3.* shapely == 2.0.* prometheus-client == 0.21.* +# HailoRT Wheels +appdirs==1.4.* +argcomplete==2.0.* +contextlib2==0.6.* +distlib==0.3.* +filelock==3.8.* +future==0.18.* +importlib-metadata==5.1.* +importlib-resources==5.1.* +netaddr==0.8.* +netifaces==0.10.* +verboselogs==1.7.* +virtualenv==20.17.* diff --git a/docs/docs/frigate/installation.md b/docs/docs/frigate/installation.md index 2c139d2bd..e14559697 100644 --- a/docs/docs/frigate/installation.md +++ b/docs/docs/frigate/installation.md @@ -117,7 +117,7 @@ For other installations, follow these steps for installation: #### Setup -To set up Frigate, follow the default installation instructions, but use a Docker image with the `-h8l` suffix, for example: `ghcr.io/blakeblackshear/frigate:stable-h8l` +To set up Frigate, follow the default installation instructions, for example: `ghcr.io/blakeblackshear/frigate:stable` Next, grant Docker permissions to access your hardware by adding the following lines to your `docker-compose.yml` file: From a3ede3cf8a6f462d55d27db3b8939d7b7d71ce4a Mon Sep 17 00:00:00 2001 From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> Date: Tue, 11 Feb 2025 10:08:28 -0600 Subject: [PATCH 05/21] Snap points to edges and create object mask from bounding box (#16488) --- .../overlay/detail/ObjectLifecycle.tsx | 133 +++++++++++------- .../settings/MotionMaskEditPane.tsx | 6 + .../settings/ObjectMaskEditPane.tsx | 6 + web/src/components/settings/PolygonCanvas.tsx | 62 +++++++- web/src/components/settings/PolygonDrawer.tsx | 25 +++- .../settings/PolygonEditControls.tsx | 25 ++++ web/src/components/settings/ZoneEditPane.tsx | 6 + web/src/pages/Settings.tsx | 4 +- web/src/utils/canvasUtil.ts | 70 +++++++++ web/src/views/settings/MasksAndZonesView.tsx | 57 +++++++- 10 files changed, 329 insertions(+), 65 deletions(-) diff --git a/web/src/components/overlay/detail/ObjectLifecycle.tsx b/web/src/components/overlay/detail/ObjectLifecycle.tsx index d61b5fa56..7481607eb 100644 --- a/web/src/components/overlay/detail/ObjectLifecycle.tsx +++ b/web/src/components/overlay/detail/ObjectLifecycle.tsx @@ -45,6 +45,13 @@ import { } from "@/components/ui/tooltip"; import { AnnotationSettingsPane } from "./AnnotationSettingsPane"; import { TooltipPortal } from "@radix-ui/react-tooltip"; +import { + ContextMenu, + ContextMenuContent, + ContextMenuItem, + ContextMenuTrigger, +} from "@/components/ui/context-menu"; +import { useNavigate } from "react-router-dom"; type ObjectLifecycleProps = { className?: string; @@ -68,6 +75,7 @@ export default function ObjectLifecycle({ const { data: config } = useSWR("config"); const apiHost = useApiHost(); + const navigate = useNavigate(); const [imgLoaded, setImgLoaded] = useState(false); const imgRef = useRef(null); @@ -293,62 +301,83 @@ export default function ObjectLifecycle({ imgLoaded ? "visible" : "invisible", )} > - setImgLoaded(true)} - onError={() => setHasError(true)} - /> + + + setImgLoaded(true)} + onError={() => setHasError(true)} + /> - {showZones && - lifecycleZones?.map((zone) => ( -
- - ( +
- -
- ))} + key={zone} + > + + + +
+ ))} - {boxStyle && ( -
-
-
- )} + {boxStyle && ( +
+
+
+ )} + + + +
+ navigate( + `/settings?page=masks%20/%20zones&camera=${event.camera}&object_mask=${eventSequence?.[current].data.box}`, + ) + } + > +
Create Object Mask
+
+
+
+
diff --git a/web/src/components/settings/MotionMaskEditPane.tsx b/web/src/components/settings/MotionMaskEditPane.tsx index 03d7f99b0..3b73c6a23 100644 --- a/web/src/components/settings/MotionMaskEditPane.tsx +++ b/web/src/components/settings/MotionMaskEditPane.tsx @@ -33,6 +33,8 @@ type MotionMaskEditPaneProps = { setIsLoading: React.Dispatch>; onSave?: () => void; onCancel?: () => void; + snapPoints: boolean; + setSnapPoints: React.Dispatch>; }; export default function MotionMaskEditPane({ @@ -45,6 +47,8 @@ export default function MotionMaskEditPane({ setIsLoading, onSave, onCancel, + snapPoints, + setSnapPoints, }: MotionMaskEditPaneProps) { const { data: config, mutate: updateConfig } = useSWR("config"); @@ -252,6 +256,8 @@ export default function MotionMaskEditPane({ polygons={polygons} setPolygons={setPolygons} activePolygonIndex={activePolygonIndex} + snapPoints={snapPoints} + setSnapPoints={setSnapPoints} /> )} diff --git a/web/src/components/settings/ObjectMaskEditPane.tsx b/web/src/components/settings/ObjectMaskEditPane.tsx index 44b858183..2c63d2e63 100644 --- a/web/src/components/settings/ObjectMaskEditPane.tsx +++ b/web/src/components/settings/ObjectMaskEditPane.tsx @@ -49,6 +49,8 @@ type ObjectMaskEditPaneProps = { setIsLoading: React.Dispatch>; onSave?: () => void; onCancel?: () => void; + snapPoints: boolean; + setSnapPoints: React.Dispatch>; }; export default function ObjectMaskEditPane({ @@ -61,6 +63,8 @@ export default function ObjectMaskEditPane({ setIsLoading, onSave, onCancel, + snapPoints, + setSnapPoints, }: ObjectMaskEditPaneProps) { const { data: config, mutate: updateConfig } = useSWR("config"); @@ -272,6 +276,8 @@ export default function ObjectMaskEditPane({ polygons={polygons} setPolygons={setPolygons} activePolygonIndex={activePolygonIndex} + snapPoints={snapPoints} + setSnapPoints={setSnapPoints} /> )} diff --git a/web/src/components/settings/PolygonCanvas.tsx b/web/src/components/settings/PolygonCanvas.tsx index d2a0a46b5..9adc2f09e 100644 --- a/web/src/components/settings/PolygonCanvas.tsx +++ b/web/src/components/settings/PolygonCanvas.tsx @@ -6,6 +6,7 @@ import type { KonvaEventObject } from "konva/lib/Node"; import { Polygon, PolygonType } from "@/types/canvas"; import { useApiHost } from "@/api"; import ActivityIndicator from "@/components/indicators/activity-indicator"; +import { snapPointToLines } from "@/utils/canvasUtil"; type PolygonCanvasProps = { containerRef: RefObject; @@ -18,6 +19,7 @@ type PolygonCanvasProps = { hoveredPolygonIndex: number | null; selectedZoneMask: PolygonType[] | undefined; activeLine?: number; + snapPoints: boolean; }; export function PolygonCanvas({ @@ -31,6 +33,7 @@ export function PolygonCanvas({ hoveredPolygonIndex, selectedZoneMask, activeLine, + snapPoints, }: PolygonCanvasProps) { const [isLoaded, setIsLoaded] = useState(false); const [image, setImage] = useState(); @@ -156,9 +159,23 @@ export function PolygonCanvas({ intersection?.getClassName() !== "Circle") || (activePolygon.isFinished && intersection?.name() == "unfilled-line") ) { + let newPoint = [mousePos.x, mousePos.y]; + + if (snapPoints) { + // Snap to other polygons' edges + const otherPolygons = polygons.filter( + (_, i) => i !== activePolygonIndex, + ); + const snappedPos = snapPointToLines(newPoint, otherPolygons, 10); + + if (snappedPos) { + newPoint = snappedPos; + } + } + const { updatedPoints, updatedPointsOrder } = addPointToPolygon( activePolygon, - [mousePos.x, mousePos.y], + newPoint, ); updatedPolygons[activePolygonIndex] = { @@ -184,11 +201,24 @@ export function PolygonCanvas({ if (stage) { // we add an unfilled line for adding points when finished const index = e.target.index - (activePolygon.isFinished ? 2 : 1); - const pos = [e.target._lastPos!.x, e.target._lastPos!.y]; - if (pos[0] < 0) pos[0] = 0; - if (pos[1] < 0) pos[1] = 0; - if (pos[0] > stage.width()) pos[0] = stage.width(); - if (pos[1] > stage.height()) pos[1] = stage.height(); + let pos = [e.target._lastPos!.x, e.target._lastPos!.y]; + + if (snapPoints) { + // Snap to other polygons' edges + const otherPolygons = polygons.filter( + (_, i) => i !== activePolygonIndex, + ); + const snappedPos = snapPointToLines(pos, otherPolygons, 10); // 10 is the snap threshold + + if (snappedPos) { + pos = snappedPos; + } + } + + // Constrain to stage boundaries + pos[0] = Math.max(0, Math.min(pos[0], stage.width())); + pos[1] = Math.max(0, Math.min(pos[1], stage.height())); + updatedPolygons[activePolygonIndex] = { ...activePolygon, points: [ @@ -291,6 +321,16 @@ export function PolygonCanvas({ handlePointDragMove={handlePointDragMove} handleGroupDragEnd={handleGroupDragEnd} activeLine={activeLine} + snapPoints={snapPoints} + snapToLines={(point) => + snapPoints + ? snapPointToLines( + point, + polygons.filter((_, i) => i !== index), + 10, + ) + : null + } /> ), )} @@ -310,6 +350,16 @@ export function PolygonCanvas({ handlePointDragMove={handlePointDragMove} handleGroupDragEnd={handleGroupDragEnd} activeLine={activeLine} + snapPoints={snapPoints} + snapToLines={(point) => + snapPoints + ? snapPointToLines( + point, + polygons.filter((_, i) => i !== activePolygonIndex), + 10, + ) + : null + } /> )} diff --git a/web/src/components/settings/PolygonDrawer.tsx b/web/src/components/settings/PolygonDrawer.tsx index 1ae3d4601..9cc5649a6 100644 --- a/web/src/components/settings/PolygonDrawer.tsx +++ b/web/src/components/settings/PolygonDrawer.tsx @@ -28,6 +28,8 @@ type PolygonDrawerProps = { handlePointDragMove: (e: KonvaEventObject) => void; handleGroupDragEnd: (e: KonvaEventObject) => void; activeLine?: number; + snapToLines: (point: number[]) => number[] | null; + snapPoints: boolean; }; export default function PolygonDrawer({ @@ -41,6 +43,8 @@ export default function PolygonDrawer({ handlePointDragMove, handleGroupDragEnd, activeLine, + snapToLines, + snapPoints, }: PolygonDrawerProps) { const vertexRadius = 6; const flattenedPoints = useMemo(() => flattenPoints(points), [points]); @@ -218,15 +222,32 @@ export default function PolygonDrawer({ onMouseOver={handleMouseOverPoint} onMouseOut={handleMouseOutPoint} draggable={isActive} - onDragMove={isActive ? handlePointDragMove : undefined} + onDragMove={(e) => { + if (isActive) { + if (snapPoints) { + const snappedPos = snapToLines([e.target.x(), e.target.y()]); + if (snappedPos) { + e.target.position({ x: snappedPos[0], y: snappedPos[1] }); + } + } + handlePointDragMove(e); + } + }} dragBoundFunc={(pos) => { if (stageRef.current) { - return dragBoundFunc( + const boundPos = dragBoundFunc( stageRef.current.width(), stageRef.current.height(), vertexRadius, pos, ); + if (snapPoints) { + const snappedPos = snapToLines([boundPos.x, boundPos.y]); + return snappedPos + ? { x: snappedPos[0], y: snappedPos[1] } + : boundPos; + } + return boundPos; } else { return pos; } diff --git a/web/src/components/settings/PolygonEditControls.tsx b/web/src/components/settings/PolygonEditControls.tsx index 55017e3bb..e3055b654 100644 --- a/web/src/components/settings/PolygonEditControls.tsx +++ b/web/src/components/settings/PolygonEditControls.tsx @@ -2,17 +2,23 @@ import { Polygon } from "@/types/canvas"; import { Tooltip, TooltipContent, TooltipTrigger } from "../ui/tooltip"; import { MdOutlineRestartAlt, MdUndo } from "react-icons/md"; import { Button } from "../ui/button"; +import { TbPolygon, TbPolygonOff } from "react-icons/tb"; +import { cn } from "@/lib/utils"; type PolygonEditControlsProps = { polygons: Polygon[]; setPolygons: React.Dispatch>; activePolygonIndex: number | undefined; + snapPoints: boolean; + setSnapPoints: React.Dispatch>; }; export default function PolygonEditControls({ polygons, setPolygons, activePolygonIndex, + snapPoints, + setSnapPoints, }: PolygonEditControlsProps) { const undo = () => { if (activePolygonIndex === undefined || !polygons) { @@ -97,6 +103,25 @@ export default function PolygonEditControls({ Reset + + + + + + {snapPoints ? "Don't snap points" : "Snap points"} + + ); } diff --git a/web/src/components/settings/ZoneEditPane.tsx b/web/src/components/settings/ZoneEditPane.tsx index 9caf04273..247ae8991 100644 --- a/web/src/components/settings/ZoneEditPane.tsx +++ b/web/src/components/settings/ZoneEditPane.tsx @@ -41,6 +41,8 @@ type ZoneEditPaneProps = { onSave?: () => void; onCancel?: () => void; setActiveLine: React.Dispatch>; + snapPoints: boolean; + setSnapPoints: React.Dispatch>; }; export default function ZoneEditPane({ @@ -54,6 +56,8 @@ export default function ZoneEditPane({ onSave, onCancel, setActiveLine, + snapPoints, + setSnapPoints, }: ZoneEditPaneProps) { const { data: config, mutate: updateConfig } = useSWR("config"); @@ -483,6 +487,8 @@ export default function ZoneEditPane({ polygons={polygons} setPolygons={setPolygons} activePolygonIndex={activePolygonIndex} + snapPoints={snapPoints} + setSnapPoints={setSnapPoints} /> )} diff --git a/web/src/pages/Settings.tsx b/web/src/pages/Settings.tsx index e64620baa..0fcc0414e 100644 --- a/web/src/pages/Settings.tsx +++ b/web/src/pages/Settings.tsx @@ -124,7 +124,7 @@ export default function Settings() { if (allSettingsViews.includes(page as SettingsType)) { setPage(page as SettingsType); } - return true; + return false; }); useSearchEffect("camera", (camera: string) => { @@ -132,7 +132,7 @@ export default function Settings() { if (cameraNames.includes(camera)) { setSelectedCamera(camera); } - return true; + return false; }); useEffect(() => { diff --git a/web/src/utils/canvasUtil.ts b/web/src/utils/canvasUtil.ts index 12bd6b167..a9d3d8b3b 100644 --- a/web/src/utils/canvasUtil.ts +++ b/web/src/utils/canvasUtil.ts @@ -1,4 +1,5 @@ import { Vector2d } from "konva/lib/types"; +import { Polygon } from "@/types/canvas"; export const getAveragePoint = (points: number[]): Vector2d => { let totalX = 0; @@ -100,3 +101,72 @@ export const masksAreIdentical = (arr1: string[], arr2: string[]): boolean => { } return true; }; + +export function snapPointToLines( + point: number[], + polygons: Polygon[], + threshold: number, +): number[] | null { + for (const polygon of polygons) { + if (!polygon.isFinished) continue; + + for (let i = 0; i < polygon.points.length; i++) { + const start = polygon.points[i]; + const end = polygon.points[(i + 1) % polygon.points.length]; + + const snappedPoint = snapPointToLine(point, start, end, threshold); + if (snappedPoint) { + return snappedPoint; + } + } + } + + return null; +} + +function snapPointToLine( + point: number[], + lineStart: number[], + lineEnd: number[], + threshold: number, +): number[] | null { + const [x, y] = point; + const [x1, y1] = lineStart; + const [x2, y2] = lineEnd; + + const A = x - x1; + const B = y - y1; + const C = x2 - x1; + const D = y2 - y1; + + const dot = A * C + B * D; + const lenSq = C * C + D * D; + let param = -1; + + if (lenSq !== 0) { + param = dot / lenSq; + } + + let xx, yy; + + if (param < 0) { + xx = x1; + yy = y1; + } else if (param > 1) { + xx = x2; + yy = y2; + } else { + xx = x1 + param * C; + yy = y1 + param * D; + } + + const dx = x - xx; + const dy = y - yy; + const distance = Math.sqrt(dx * dx + dy * dy); + + if (distance <= threshold) { + return [xx, yy]; + } + + return null; +} diff --git a/web/src/views/settings/MasksAndZonesView.tsx b/web/src/views/settings/MasksAndZonesView.tsx index 4e649a3cd..27e495766 100644 --- a/web/src/views/settings/MasksAndZonesView.tsx +++ b/web/src/views/settings/MasksAndZonesView.tsx @@ -37,6 +37,7 @@ import PolygonItem from "@/components/settings/PolygonItem"; import { Link } from "react-router-dom"; import { isDesktop } from "react-device-detect"; import { StatusBarMessagesContext } from "@/context/statusbar-provider"; +import { useSearchEffect } from "@/hooks/use-overlay-state"; type MasksAndZoneViewProps = { selectedCamera: string; @@ -62,6 +63,7 @@ export default function MasksAndZonesView({ const containerRef = useRef(null); const [editPane, setEditPane] = useState(undefined); const [activeLine, setActiveLine] = useState(); + const [snapPoints, setSnapPoints] = useState(false); const { addMessage } = useContext(StatusBarMessagesContext)!; @@ -142,7 +144,7 @@ export default function MasksAndZonesView({ } }, [scaledHeight, aspectRatio]); - const handleNewPolygon = (type: PolygonType) => { + const handleNewPolygon = (type: PolygonType, coordinates?: number[][]) => { if (!cameraConfig) { return; } @@ -161,9 +163,9 @@ export default function MasksAndZonesView({ setEditingPolygons([ ...(allPolygons || []), { - points: [], + points: coordinates ?? [], distances: [], - isFinished: false, + isFinished: coordinates ? true : false, type, typeIndex: 9999, name: "", @@ -373,6 +375,48 @@ export default function MasksAndZonesView({ } }, [selectedCamera]); + useSearchEffect("object_mask", (coordinates: string) => { + if (!scaledWidth || !scaledHeight || isLoading) { + return false; + } + // convert box points string to points array + const points = coordinates.split(",").map((p) => parseFloat(p)); + + const [x1, y1, w, h] = points; + + // bottom center + const centerX = x1 + w / 2; + const bottomY = y1 + h; + + const centerXAbs = centerX * scaledWidth; + const bottomYAbs = bottomY * scaledHeight; + + // padding and clamp + const minPadding = 0.1 * w * scaledWidth; + const maxPadding = 0.3 * w * scaledWidth; + const padding = Math.min( + Math.max(minPadding, 0.15 * w * scaledWidth), + maxPadding, + ); + + const top = Math.max(0, bottomYAbs - padding); + const bottom = Math.min(scaledHeight, bottomYAbs + padding); + const left = Math.max(0, centerXAbs - padding); + const right = Math.min(scaledWidth, centerXAbs + padding); + + const paddedBox = [ + [left, top], + [right, top], + [right, bottom], + [left, bottom], + ]; + + setEditPane("object_mask"); + setActivePolygonIndex(undefined); + handleNewPolygon("object_mask", paddedBox); + return true; + }); + useEffect(() => { document.title = "Mask and Zone Editor - Frigate"; }, []); @@ -399,6 +443,8 @@ export default function MasksAndZonesView({ onCancel={handleCancel} onSave={handleSave} setActiveLine={setActiveLine} + snapPoints={snapPoints} + setSnapPoints={setSnapPoints} /> )} {editPane == "motion_mask" && ( @@ -412,6 +458,8 @@ export default function MasksAndZonesView({ setIsLoading={setIsLoading} onCancel={handleCancel} onSave={handleSave} + snapPoints={snapPoints} + setSnapPoints={setSnapPoints} /> )} {editPane == "object_mask" && ( @@ -425,6 +473,8 @@ export default function MasksAndZonesView({ setIsLoading={setIsLoading} onCancel={handleCancel} onSave={handleSave} + snapPoints={snapPoints} + setSnapPoints={setSnapPoints} /> )} {editPane === undefined && ( @@ -662,6 +712,7 @@ export default function MasksAndZonesView({ hoveredPolygonIndex={hoveredPolygonIndex} selectedZoneMask={selectedZoneMask} activeLine={activeLine} + snapPoints={true} /> ) : ( From 0f0b2687af2341f368a948b8501865259b2f082e Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Tue, 11 Feb 2025 11:23:19 -0700 Subject: [PATCH 06/21] Add support for YoloV9 to OpenVINO (#16495) * Add support for yolov9 to OpenVINO * Cleanup detector docs * Fix link --- docs/docs/configuration/object_detectors.md | 85 ++++++++++++++------- frigate/detectors/plugins/openvino.py | 20 +++-- 2 files changed, 70 insertions(+), 35 deletions(-) diff --git a/docs/docs/configuration/object_detectors.md b/docs/docs/configuration/object_detectors.md index c04b92474..b77590f41 100644 --- a/docs/docs/configuration/object_detectors.md +++ b/docs/docs/configuration/object_detectors.md @@ -201,15 +201,7 @@ This detector also supports YOLOX. Frigate does not come with any YOLOX models p #### YOLO-NAS -[YOLO-NAS](https://github.com/Deci-AI/super-gradients/blob/master/YOLONAS.md) models are supported, but not included by default. You can build and download a compatible model with pre-trained weights using [this notebook](https://github.com/blakeblackshear/frigate/blob/dev/notebooks/YOLO_NAS_Pretrained_Export.ipynb) [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/blakeblackshear/frigate/blob/dev/notebooks/YOLO_NAS_Pretrained_Export.ipynb). - -:::warning - -The pre-trained YOLO-NAS weights from DeciAI are subject to their license and can't be used commercially. For more information, see: https://docs.deci.ai/super-gradients/latest/LICENSE.YOLONAS.html - -::: - -The input image size in this notebook is set to 320x320. This results in lower CPU usage and faster inference times without impacting performance in most cases due to the way Frigate crops video frames to areas of interest before running detection. The notebook and config can be updated to 640x640 if desired. +[YOLO-NAS](https://github.com/Deci-AI/super-gradients/blob/master/YOLONAS.md) models are supported, but not included by default. See [the models section](#downloading-yolo-nas-model) for more information on downloading the YOLO-NAS model for use in Frigate. After placing the downloaded onnx model in your config folder, you can use the following configuration: @@ -231,6 +223,36 @@ model: Note that the labelmap uses a subset of the complete COCO label set that has only 80 objects. +#### YOLOv9 + +[YOLOv9](https://github.com/MultimediaTechLab/YOLO) models are supported, but not included by default. + +:::tip + +The YOLOv9 detector has been designed to support YOLOv9 models, but may support other YOLO model architectures as well. + +::: + +After placing the downloaded onnx model in your config folder, you can use the following configuration: + +```yaml +detectors: + ov: + type: openvino + device: GPU + +model: + model_type: yolov9 + width: 640 # <--- should match the imgsize set during model export + height: 640 # <--- should match the imgsize set during model export + input_tensor: nchw + input_dtype: float + path: /config/model_cache/yolov9-t.onnx + labelmap_path: /labelmap/coco-80.txt +``` + +Note that the labelmap uses a subset of the complete COCO label set that has only 80 objects. + ## NVidia TensorRT Detector Nvidia GPUs may be used for object detection using the TensorRT libraries. Due to the size of the additional libraries, this detector is only provided in images with the `-tensorrt` tag suffix, e.g. `ghcr.io/blakeblackshear/frigate:stable-tensorrt`. This detector is designed to work with Yolo models for object detection. @@ -265,6 +287,8 @@ If your GPU does not support FP16 operations, you can pass the environment varia Specific models can be selected by passing an environment variable to the `docker run` command or in your `docker-compose.yml` file. Use the form `-e YOLO_MODELS=yolov4-416,yolov4-tiny-416` to select one or more model names. The models available are shown below. +
+Available Models ``` yolov3-288 yolov3-416 @@ -293,6 +317,7 @@ yolov7-320 yolov7x-640 yolov7x-320 ``` +
An example `docker-compose.yml` fragment that converts the `yolov4-608` and `yolov7x-640` models for a Pascal card would look something like this: @@ -420,15 +445,7 @@ There is no default model provided, the following formats are supported: #### YOLO-NAS -[YOLO-NAS](https://github.com/Deci-AI/super-gradients/blob/master/YOLONAS.md) models are supported, but not included by default. You can build and download a compatible model with pre-trained weights using [this notebook](https://github.com/frigate/blob/dev/notebooks/YOLO_NAS_Pretrained_Export.ipynb) [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/blakeblackshear/frigate/blob/dev/notebooks/YOLO_NAS_Pretrained_Export.ipynb). - -:::warning - -The pre-trained YOLO-NAS weights from DeciAI are subject to their license and can't be used commercially. For more information, see: https://docs.deci.ai/super-gradients/latest/LICENSE.YOLONAS.html - -::: - -The input image size in this notebook is set to 320x320. This results in lower CPU usage and faster inference times without impacting performance in most cases due to the way Frigate crops video frames to areas of interest before running detection. The notebook and config can be updated to 640x640 if desired. +[YOLO-NAS](https://github.com/Deci-AI/super-gradients/blob/master/YOLONAS.md) models are supported, but not included by default. See [the models section](#downloading-yolo-nas-model) for more information on downloading the YOLO-NAS model for use in Frigate. After placing the downloaded onnx model in your config folder, you can use the following configuration: @@ -490,15 +507,7 @@ There is no default model provided, the following formats are supported: #### YOLO-NAS -[YOLO-NAS](https://github.com/Deci-AI/super-gradients/blob/master/YOLONAS.md) models are supported, but not included by default. You can build and download a compatible model with pre-trained weights using [this notebook](https://github.com/frigate/blob/dev/notebooks/YOLO_NAS_Pretrained_Export.ipynb) [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/blakeblackshear/frigate/blob/dev/notebooks/YOLO_NAS_Pretrained_Export.ipynb). - -:::warning - -The pre-trained YOLO-NAS weights from DeciAI are subject to their license and can't be used commercially. For more information, see: https://docs.deci.ai/super-gradients/latest/LICENSE.YOLONAS.html - -::: - -The input image size in this notebook is set to 320x320. This results in lower CPU usage and faster inference times without impacting performance in most cases due to the way Frigate crops video frames to areas of interest before running detection. The notebook and config can be updated to 640x640 if desired. +[YOLO-NAS](https://github.com/Deci-AI/super-gradients/blob/master/YOLONAS.md) models are supported, but not included by default. See [the models section](#downloading-yolo-nas-model) for more information on downloading the YOLO-NAS model for use in Frigate. After placing the downloaded onnx model in your config folder, you can use the following configuration: @@ -716,4 +725,24 @@ Explanation of the paramters: - `soc`: the SoC this model was build for (e.g. "rk3588") - `tk_version`: Version of `rknn-toolkit2` (e.g. "2.3.0") - **example**: Specifying `output_name = "frigate-{quant}-{input_basename}-{soc}-v{tk_version}"` could result in a model called `frigate-i8-my_model-rk3588-v2.3.0.rknn`. -- `config`: Configuration passed to `rknn-toolkit2` for model conversion. For an explanation of all available parameters have a look at section "2.2. Model configuration" of [this manual](https://github.com/MarcA711/rknn-toolkit2/releases/download/v2.3.0/03_Rockchip_RKNPU_API_Reference_RKNN_Toolkit2_V2.3.0_EN.pdf). \ No newline at end of file +- `config`: Configuration passed to `rknn-toolkit2` for model conversion. For an explanation of all available parameters have a look at section "2.2. Model configuration" of [this manual](https://github.com/MarcA711/rknn-toolkit2/releases/download/v2.3.0/03_Rockchip_RKNPU_API_Reference_RKNN_Toolkit2_V2.3.0_EN.pdf). + +# Models + +Some model types are not included in Frigate by default. + +## Downloading Models + +Here are some tips for getting different model types + +### Downloading YOLO-NAS Model + +You can build and download a compatible model with pre-trained weights using [this notebook](https://github.com/blakeblackshear/frigate/blob/dev/notebooks/YOLO_NAS_Pretrained_Export.ipynb) [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/blakeblackshear/frigate/blob/dev/notebooks/YOLO_NAS_Pretrained_Export.ipynb). + +:::warning + +The pre-trained YOLO-NAS weights from DeciAI are subject to their license and can't be used commercially. For more information, see: https://docs.deci.ai/super-gradients/latest/LICENSE.YOLONAS.html + +::: + +The input image size in this notebook is set to 320x320. This results in lower CPU usage and faster inference times without impacting performance in most cases due to the way Frigate crops video frames to areas of interest before running detection. The notebook and config can be updated to 640x640 if desired. \ No newline at end of file diff --git a/frigate/detectors/plugins/openvino.py b/frigate/detectors/plugins/openvino.py index 51e48530b..27be6b9bd 100644 --- a/frigate/detectors/plugins/openvino.py +++ b/frigate/detectors/plugins/openvino.py @@ -9,6 +9,7 @@ from typing_extensions import Literal from frigate.detectors.detection_api import DetectionApi from frigate.detectors.detector_config import BaseDetectorConfig, ModelTypeEnum +from frigate.util.model import post_process_yolov9 logger = logging.getLogger(__name__) @@ -22,7 +23,12 @@ class OvDetectorConfig(BaseDetectorConfig): class OvDetector(DetectionApi): type_key = DETECTOR_KEY - supported_models = [ModelTypeEnum.ssd, ModelTypeEnum.yolonas, ModelTypeEnum.yolox] + supported_models = [ + ModelTypeEnum.ssd, + ModelTypeEnum.yolonas, + ModelTypeEnum.yolov9, + ModelTypeEnum.yolox, + ] def __init__(self, detector_config: OvDetectorConfig): self.ov_core = ov.Core() @@ -160,8 +166,7 @@ class OvDetector(DetectionApi): if self.model_invalid: return detections - - if self.ov_model_type == ModelTypeEnum.ssd: + elif self.ov_model_type == ModelTypeEnum.ssd: results = infer_request.get_output_tensor(0).data[0][0] for i, (_, class_id, score, xmin, ymin, xmax, ymax) in enumerate(results): @@ -176,8 +181,7 @@ class OvDetector(DetectionApi): xmax, ] return detections - - if self.ov_model_type == ModelTypeEnum.yolonas: + elif self.ov_model_type == ModelTypeEnum.yolonas: predictions = infer_request.get_output_tensor(0).data for i, prediction in enumerate(predictions): @@ -196,8 +200,10 @@ class OvDetector(DetectionApi): x_max / self.w, ] return detections - - if self.ov_model_type == ModelTypeEnum.yolox: + elif self.ov_model_type == ModelTypeEnum.yolov9: + out_tensor = infer_request.get_output_tensor(0).data + return post_process_yolov9(out_tensor, self.w, self.h) + elif self.ov_model_type == ModelTypeEnum.yolox: out_tensor = infer_request.get_output_tensor() # [x, y, h, w, box_score, class_no_1, ..., class_no_80], results = out_tensor.data From f3e2cf0a588bc7f7d3195fb2c99d692c30095b19 Mon Sep 17 00:00:00 2001 From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> Date: Tue, 11 Feb 2025 14:23:41 -0600 Subject: [PATCH 07/21] Small fix and docs update (#16499) * Small docs tweak and bugfix * don't remove page arg either --- docs/docs/configuration/zones.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/docs/configuration/zones.md b/docs/docs/configuration/zones.md index 1d1e66c27..8dd63f0f3 100644 --- a/docs/docs/configuration/zones.md +++ b/docs/docs/configuration/zones.md @@ -157,8 +157,8 @@ The average speed of your object as it moved through your zone is saved in Friga #### Best practices and caveats -- Speed estimation works best with a straight road or path when your object travels in a straight line across that path. If your object makes turns, speed estimation may not be accurate. -- Create a zone where the bottom center of your object's bounding box travels directly through it and does not become obscured at any time. +- Speed estimation works best with a straight road or path when your object travels in a straight line across that path. Avoid creating your zone near intersections or anywhere that objects would make a turn. If the bounding box changes shape (either because the object made a turn or became partially obscured, for example), speed estimation will not be accurate. +- Create a zone where the bottom center of your object's bounding box travels directly through it and does not become obscured at any time. See the photo example above. - Depending on the size and location of your zone, you may want to decrease the zone's `inertia` value from the default of 3. - The more accurate your real-world dimensions can be measured, the more accurate speed estimation will be. However, due to the way Frigate's tracking algorithm works, you may need to tweak the real-world distance values so that estimated speeds better match real-world speeds. - Once an object leaves the zone, speed accuracy will likely decrease due to perspective distortion and misalignment with the calibrated area. Therefore, speed values will show as a zero through MQTT and will not be visible on the debug view when an object is outside of a speed tracking zone. From 2458f667c45f95a16e348ab8f0855486f10194a9 Mon Sep 17 00:00:00 2001 From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> Date: Tue, 11 Feb 2025 14:45:13 -0600 Subject: [PATCH 08/21] Refactor lpr into real time data processor (#16497) --- .../real_time/license_plate_processor.py} | 342 +++++++++++++++--- frigate/embeddings/embeddings.py | 41 --- frigate/embeddings/maintainer.py | 237 +----------- 3 files changed, 305 insertions(+), 315 deletions(-) rename frigate/{embeddings/lpr/lpr.py => data_processing/real_time/license_plate_processor.py} (68%) diff --git a/frigate/embeddings/lpr/lpr.py b/frigate/data_processing/real_time/license_plate_processor.py similarity index 68% rename from frigate/embeddings/lpr/lpr.py rename to frigate/data_processing/real_time/license_plate_processor.py index d7e513c73..27303601b 100644 --- a/frigate/embeddings/lpr/lpr.py +++ b/frigate/data_processing/real_time/license_plate_processor.py @@ -1,34 +1,41 @@ +"""Handle processing images for face detection and recognition.""" + +import datetime import logging import math -from typing import List, Tuple +import re +from typing import List, Optional, Tuple import cv2 import numpy as np +import requests from pyclipper import ET_CLOSEDPOLYGON, JT_ROUND, PyclipperOffset from shapely.geometry import Polygon from frigate.comms.inter_process import InterProcessRequestor -from frigate.config.classification import LicensePlateRecognitionConfig -from frigate.embeddings.embeddings import Embeddings +from frigate.config import FrigateConfig +from frigate.const import FRIGATE_LOCALHOST +from frigate.embeddings.functions.onnx import GenericONNXEmbedding, ModelTypeEnum +from frigate.util.image import area + +from ..types import DataProcessorMetrics +from .api import RealTimeProcessorApi logger = logging.getLogger(__name__) MIN_PLATE_LENGTH = 3 -class LicensePlateRecognition: - def __init__( - self, - config: LicensePlateRecognitionConfig, - requestor: InterProcessRequestor, - embeddings: Embeddings, - ): - self.lpr_config = config - self.requestor = requestor - self.embeddings = embeddings - self.detection_model = self.embeddings.lpr_detection_model - self.classification_model = self.embeddings.lpr_classification_model - self.recognition_model = self.embeddings.lpr_recognition_model +class LicensePlateProcessor(RealTimeProcessorApi): + def __init__(self, config: FrigateConfig, metrics: DataProcessorMetrics): + super().__init__(config, metrics) + self.requestor = InterProcessRequestor() + self.lpr_config = config.lpr + self.requires_license_plate_detection = ( + "license_plate" not in self.config.objects.all_objects + ) + self.detected_license_plates: dict[str, dict[str, any]] = {} + self.ctc_decoder = CTCDecoder() self.batch_size = 6 @@ -39,13 +46,54 @@ class LicensePlateRecognition: self.box_thresh = 0.8 self.mask_thresh = 0.8 + self.lpr_detection_model = None + self.lpr_classification_model = None + self.lpr_recognition_model = None + + if self.config.lpr.enabled: + self.detection_model = GenericONNXEmbedding( + model_name="paddleocr-onnx", + model_file="detection.onnx", + download_urls={ + "detection.onnx": "https://github.com/hawkeye217/paddleocr-onnx/raw/refs/heads/master/models/detection.onnx" + }, + model_size="large", + model_type=ModelTypeEnum.lpr_detect, + requestor=self.requestor, + device="CPU", + ) + + self.classification_model = GenericONNXEmbedding( + model_name="paddleocr-onnx", + model_file="classification.onnx", + download_urls={ + "classification.onnx": "https://github.com/hawkeye217/paddleocr-onnx/raw/refs/heads/master/models/classification.onnx" + }, + model_size="large", + model_type=ModelTypeEnum.lpr_classify, + requestor=self.requestor, + device="CPU", + ) + + self.recognition_model = GenericONNXEmbedding( + model_name="paddleocr-onnx", + model_file="recognition.onnx", + download_urls={ + "recognition.onnx": "https://github.com/hawkeye217/paddleocr-onnx/raw/refs/heads/master/models/recognition.onnx" + }, + model_size="large", + model_type=ModelTypeEnum.lpr_recognize, + requestor=self.requestor, + device="CPU", + ) + if self.lpr_config.enabled: # all models need to be loaded to run LPR self.detection_model._load_model_and_utils() self.classification_model._load_model_and_utils() self.recognition_model._load_model_and_utils() - def detect(self, image: np.ndarray) -> List[np.ndarray]: + def _detect(self, image: np.ndarray) -> List[np.ndarray]: """ Detect possible license plates in the input image by first resizing and normalizing it, running a detection model, and filtering out low-probability regions. @@ -59,18 +107,18 @@ class LicensePlateRecognition: h, w = image.shape[:2] if sum([h, w]) < 64: - image = self.zero_pad(image) + image = self._zero_pad(image) - resized_image = self.resize_image(image) - normalized_image = self.normalize_image(resized_image) + resized_image = self._resize_image(image) + normalized_image = self._normalize_image(resized_image) outputs = self.detection_model([normalized_image])[0] outputs = outputs[0, :, :] - boxes, _ = self.boxes_from_bitmap(outputs, outputs > self.mask_thresh, w, h) - return self.filter_polygon(boxes, (h, w)) + boxes, _ = self._boxes_from_bitmap(outputs, outputs > self.mask_thresh, w, h) + return self._filter_polygon(boxes, (h, w)) - def classify( + def _classify( self, images: List[np.ndarray] ) -> Tuple[List[np.ndarray], List[Tuple[str, float]]]: """ @@ -97,7 +145,7 @@ class LicensePlateRecognition: return self._process_classification_output(images, outputs) - def recognize( + def _recognize( self, images: List[np.ndarray] ) -> Tuple[List[str], List[List[float]]]: """ @@ -136,7 +184,7 @@ class LicensePlateRecognition: outputs = self.recognition_model(norm_images) return self.ctc_decoder(outputs) - def process_license_plate( + def _process_license_plate( self, image: np.ndarray ) -> Tuple[List[str], List[float], List[int]]: """ @@ -157,13 +205,13 @@ class LicensePlateRecognition: logger.debug("Model runners not loaded") return [], [], [] - plate_points = self.detect(image) + plate_points = self._detect(image) if len(plate_points) == 0: return [], [], [] - plate_points = self.sort_polygon(list(plate_points)) + plate_points = self._sort_polygon(list(plate_points)) plate_images = [self._crop_license_plate(image, x) for x in plate_points] - rotated_images, _ = self.classify(plate_images) + rotated_images, _ = self._classify(plate_images) # keep track of the index of each image for correct area calc later sorted_indices = np.argsort([x.shape[1] / x.shape[0] for x in rotated_images]) @@ -171,7 +219,7 @@ class LicensePlateRecognition: idx: original_idx for original_idx, idx in enumerate(sorted_indices) } - results, confidences = self.recognize(rotated_images) + results, confidences = self._recognize(rotated_images) if results: license_plates = [""] * len(rotated_images) @@ -218,7 +266,7 @@ class LicensePlateRecognition: return [], [], [] - def resize_image(self, image: np.ndarray) -> np.ndarray: + def _resize_image(self, image: np.ndarray) -> np.ndarray: """ Resize the input image while maintaining the aspect ratio, ensuring dimensions are multiples of 32. @@ -234,7 +282,7 @@ class LicensePlateRecognition: resize_w = max(int(round(int(w * ratio) / 32) * 32), 32) return cv2.resize(image, (resize_w, resize_h)) - def normalize_image(self, image: np.ndarray) -> np.ndarray: + def _normalize_image(self, image: np.ndarray) -> np.ndarray: """ Normalize the input image by subtracting the mean and multiplying by the standard deviation. @@ -252,7 +300,7 @@ class LicensePlateRecognition: cv2.multiply(image, std, image) return image.transpose((2, 0, 1))[np.newaxis, ...] - def boxes_from_bitmap( + def _boxes_from_bitmap( self, output: np.ndarray, mask: np.ndarray, dest_width: int, dest_height: int ) -> Tuple[np.ndarray, List[float]]: """ @@ -282,14 +330,14 @@ class LicensePlateRecognition: contour = contours[index] # get minimum bounding box (rotated rectangle) around the contour and the smallest side length. - points, min_side = self.get_min_boxes(contour) + points, min_side = self._get_min_boxes(contour) if min_side < self.min_size: continue points = np.array(points) - score = self.box_score(output, contour) + score = self._box_score(output, contour) if self.box_thresh > score: continue @@ -302,7 +350,7 @@ class LicensePlateRecognition: points = np.array(offset.Execute(distance * 1.5)).reshape((-1, 1, 2)) # get the minimum bounding box around the shrunken polygon. - box, min_side = self.get_min_boxes(points) + box, min_side = self._get_min_boxes(points) if min_side < self.min_size + 2: continue @@ -321,7 +369,7 @@ class LicensePlateRecognition: return np.array(boxes, dtype="int32"), scores @staticmethod - def get_min_boxes(contour: np.ndarray) -> Tuple[List[Tuple[float, float]], float]: + def _get_min_boxes(contour: np.ndarray) -> Tuple[List[Tuple[float, float]], float]: """ Calculate the minimum bounding box (rotated rectangle) for a given contour. @@ -340,7 +388,7 @@ class LicensePlateRecognition: return box, min(bounding_box[1]) @staticmethod - def box_score(bitmap: np.ndarray, contour: np.ndarray) -> float: + def _box_score(bitmap: np.ndarray, contour: np.ndarray) -> float: """ Calculate the average score within the bounding box of a contour. @@ -360,7 +408,7 @@ class LicensePlateRecognition: return cv2.mean(bitmap[y1 : y2 + 1, x1 : x2 + 1], mask)[0] @staticmethod - def expand_box(points: List[Tuple[float, float]]) -> np.ndarray: + def _expand_box(points: List[Tuple[float, float]]) -> np.ndarray: """ Expand a polygonal shape slightly by a factor determined by the area-to-perimeter ratio. @@ -377,7 +425,7 @@ class LicensePlateRecognition: expanded = np.array(offset.Execute(distance * 1.5)).reshape((-1, 2)) return expanded - def filter_polygon( + def _filter_polygon( self, points: List[np.ndarray], shape: Tuple[int, int] ) -> np.ndarray: """ @@ -394,14 +442,14 @@ class LicensePlateRecognition: height, width = shape return np.array( [ - self.clockwise_order(point) + self._clockwise_order(point) for point in points - if self.is_valid_polygon(point, width, height) + if self._is_valid_polygon(point, width, height) ] ) @staticmethod - def is_valid_polygon(point: np.ndarray, width: int, height: int) -> bool: + def _is_valid_polygon(point: np.ndarray, width: int, height: int) -> bool: """ Check if a polygon is valid, meaning it fits within the image bounds and has sides of a minimum length. @@ -424,7 +472,7 @@ class LicensePlateRecognition: ) @staticmethod - def clockwise_order(point: np.ndarray) -> np.ndarray: + def _clockwise_order(point: np.ndarray) -> np.ndarray: """ Arrange the points of a polygon in clockwise order based on their angular positions around the polygon's center. @@ -441,7 +489,7 @@ class LicensePlateRecognition: ] @staticmethod - def sort_polygon(points): + def _sort_polygon(points): """ Sort polygons based on their position in the image. If polygons are close in vertical position (within 10 pixels), sort them by horizontal position. @@ -466,7 +514,7 @@ class LicensePlateRecognition: return points @staticmethod - def zero_pad(image: np.ndarray) -> np.ndarray: + def _zero_pad(image: np.ndarray) -> np.ndarray: """ Apply zero-padding to an image, ensuring its dimensions are at least 32x32. The padding is added only if needed. @@ -649,6 +697,210 @@ class LicensePlateRecognition: image = np.rot90(image, k=3) return image + def __update_metrics(self, duration: float) -> None: + self.metrics.alpr_pps.value = (self.metrics.alpr_pps.value * 9 + duration) / 10 + + def _detect_license_plate(self, input: np.ndarray) -> tuple[int, int, int, int]: + """Return the dimensions of the input image as [x, y, width, height].""" + # TODO: use a small model here to detect plates + height, width = input.shape[:2] + return (0, 0, width, height) + + def process_frame(self, obj_data: dict[str, any], frame: np.ndarray): + """Look for license plates in image.""" + start = datetime.datetime.now().timestamp() + + id = obj_data["id"] + + # don't run for non car objects + if obj_data.get("label") != "car": + logger.debug("Not a processing license plate for non car object.") + return + + # don't run for stationary car objects + if obj_data.get("stationary") == True: + logger.debug("Not a processing license plate for a stationary car object.") + return + + # don't overwrite sub label for objects that have a sub label + # that is not a license plate + if obj_data.get("sub_label") and id not in self.detected_license_plates: + logger.debug( + f"Not processing license plate due to existing sub label: {obj_data.get('sub_label')}." + ) + return + + license_plate: Optional[dict[str, any]] = None + + if self.requires_license_plate_detection: + logger.debug("Running manual license_plate detection.") + car_box = obj_data.get("box") + + if not car_box: + return + + rgb = cv2.cvtColor(frame, cv2.COLOR_YUV2RGB_I420) + left, top, right, bottom = car_box + car = rgb[top:bottom, left:right] + license_plate = self._detect_license_plate(car) + + if not license_plate: + logger.debug("Detected no license plates for car object.") + return + + license_plate_frame = car[ + license_plate[1] : license_plate[3], license_plate[0] : license_plate[2] + ] + license_plate_frame = cv2.cvtColor(license_plate_frame, cv2.COLOR_RGB2BGR) + else: + # don't run for object without attributes + if not obj_data.get("current_attributes"): + logger.debug("No attributes to parse.") + return + + attributes: list[dict[str, any]] = obj_data.get("current_attributes", []) + for attr in attributes: + if attr.get("label") != "license_plate": + continue + + if license_plate is None or attr.get("score", 0.0) > license_plate.get( + "score", 0.0 + ): + license_plate = attr + + # no license plates detected in this frame + if not license_plate: + return + + license_plate_box = license_plate.get("box") + + # check that license plate is valid + if ( + not license_plate_box + or area(license_plate_box) < self.config.lpr.min_area + ): + logger.debug(f"Invalid license plate box {license_plate}") + return + + license_plate_frame = cv2.cvtColor(frame, cv2.COLOR_YUV2BGR_I420) + license_plate_frame = license_plate_frame[ + license_plate_box[1] : license_plate_box[3], + license_plate_box[0] : license_plate_box[2], + ] + + # run detection, returns results sorted by confidence, best first + license_plates, confidences, areas = self._process_license_plate( + license_plate_frame + ) + + logger.debug(f"Text boxes: {license_plates}") + logger.debug(f"Confidences: {confidences}") + logger.debug(f"Areas: {areas}") + + if license_plates: + for plate, confidence, text_area in zip(license_plates, confidences, areas): + avg_confidence = ( + (sum(confidence) / len(confidence)) if confidence else 0 + ) + + logger.debug( + f"Detected text: {plate} (average confidence: {avg_confidence:.2f}, area: {text_area} pixels)" + ) + else: + # no plates found + logger.debug("No text detected") + return + + top_plate, top_char_confidences, top_area = ( + license_plates[0], + confidences[0], + areas[0], + ) + avg_confidence = ( + (sum(top_char_confidences) / len(top_char_confidences)) + if top_char_confidences + else 0 + ) + + # Check if we have a previously detected plate for this ID + if id in self.detected_license_plates: + prev_plate = self.detected_license_plates[id]["plate"] + prev_char_confidences = self.detected_license_plates[id]["char_confidences"] + prev_area = self.detected_license_plates[id]["area"] + prev_avg_confidence = ( + (sum(prev_char_confidences) / len(prev_char_confidences)) + if prev_char_confidences + else 0 + ) + + # Define conditions for keeping the previous plate + shorter_than_previous = len(top_plate) < len(prev_plate) + lower_avg_confidence = avg_confidence <= prev_avg_confidence + smaller_area = top_area < prev_area + + # Compare character-by-character confidence where possible + min_length = min(len(top_plate), len(prev_plate)) + char_confidence_comparison = sum( + 1 + for i in range(min_length) + if top_char_confidences[i] <= prev_char_confidences[i] + ) + worse_char_confidences = char_confidence_comparison >= min_length / 2 + + if (shorter_than_previous or smaller_area) and ( + lower_avg_confidence and worse_char_confidences + ): + logger.debug( + f"Keeping previous plate. New plate stats: " + f"length={len(top_plate)}, avg_conf={avg_confidence:.2f}, area={top_area} " + f"vs Previous: length={len(prev_plate)}, avg_conf={prev_avg_confidence:.2f}, area={prev_area}" + ) + return True + + # Check against minimum confidence threshold + if avg_confidence < self.lpr_config.threshold: + logger.debug( + f"Average confidence {avg_confidence} is less than threshold ({self.lpr_config.threshold})" + ) + return + + # Determine subLabel based on known plates, use regex matching + # Default to the detected plate, use label name if there's a match + sub_label = next( + ( + label + for label, plates in self.lpr_config.known_plates.items() + if any(re.match(f"^{plate}$", top_plate) for plate in plates) + ), + top_plate, + ) + + # Send the result to the API + resp = requests.post( + f"{FRIGATE_LOCALHOST}/api/events/{id}/sub_label", + json={ + "camera": obj_data.get("camera"), + "subLabel": sub_label, + "subLabelScore": avg_confidence, + }, + ) + + if resp.status_code == 200: + self.detected_license_plates[id] = { + "plate": top_plate, + "char_confidences": top_char_confidences, + "area": top_area, + } + + self.__update_metrics(datetime.datetime.now().timestamp() - start) + + def handle_request(self, topic, request_data) -> dict[str, any] | None: + return + + def expire_object(self, object_id: str): + if object_id in self.detected_license_plates: + self.detected_license_plates.pop(object_id) + class CTCDecoder: """ diff --git a/frigate/embeddings/embeddings.py b/frigate/embeddings/embeddings.py index 852806a8d..d8a4a2f4d 100644 --- a/frigate/embeddings/embeddings.py +++ b/frigate/embeddings/embeddings.py @@ -131,47 +131,6 @@ class Embeddings: device="GPU" if config.semantic_search.model_size == "large" else "CPU", ) - self.lpr_detection_model = None - self.lpr_classification_model = None - self.lpr_recognition_model = None - - if self.config.lpr.enabled: - self.lpr_detection_model = GenericONNXEmbedding( - model_name="paddleocr-onnx", - model_file="detection.onnx", - download_urls={ - "detection.onnx": "https://github.com/hawkeye217/paddleocr-onnx/raw/refs/heads/master/models/detection.onnx" - }, - model_size="large", - model_type=ModelTypeEnum.lpr_detect, - requestor=self.requestor, - device="CPU", - ) - - self.lpr_classification_model = GenericONNXEmbedding( - model_name="paddleocr-onnx", - model_file="classification.onnx", - download_urls={ - "classification.onnx": "https://github.com/hawkeye217/paddleocr-onnx/raw/refs/heads/master/models/classification.onnx" - }, - model_size="large", - model_type=ModelTypeEnum.lpr_classify, - requestor=self.requestor, - device="CPU", - ) - - self.lpr_recognition_model = GenericONNXEmbedding( - model_name="paddleocr-onnx", - model_file="recognition.onnx", - download_urls={ - "recognition.onnx": "https://github.com/hawkeye217/paddleocr-onnx/raw/refs/heads/master/models/recognition.onnx" - }, - model_size="large", - model_type=ModelTypeEnum.lpr_recognize, - requestor=self.requestor, - device="CPU", - ) - def embed_thumbnail( self, event_id: str, thumbnail: bytes, upsert: bool = True ) -> ndarray: diff --git a/frigate/embeddings/maintainer.py b/frigate/embeddings/maintainer.py index e221bd146..b7623722d 100644 --- a/frigate/embeddings/maintainer.py +++ b/frigate/embeddings/maintainer.py @@ -1,10 +1,8 @@ """Maintain embeddings in SQLite-vec.""" import base64 -import datetime import logging import os -import re import threading from multiprocessing.synchronize import Event as MpEvent from pathlib import Path @@ -12,7 +10,6 @@ from typing import Optional import cv2 import numpy as np -import requests from peewee import DoesNotExist from playhouse.sqliteq import SqliteQueueDatabase @@ -26,20 +23,21 @@ from frigate.comms.inter_process import InterProcessRequestor from frigate.config import FrigateConfig from frigate.const import ( CLIPS_DIR, - FRIGATE_LOCALHOST, UPDATE_EVENT_DESCRIPTION, ) from frigate.data_processing.real_time.api import RealTimeProcessorApi from frigate.data_processing.real_time.bird_processor import BirdProcessor from frigate.data_processing.real_time.face_processor import FaceProcessor +from frigate.data_processing.real_time.license_plate_processor import ( + LicensePlateProcessor, +) from frigate.data_processing.types import DataProcessorMetrics -from frigate.embeddings.lpr.lpr import LicensePlateRecognition from frigate.events.types import EventTypeEnum from frigate.genai import get_genai_client from frigate.models import Event from frigate.types import TrackedObjectUpdateTypesEnum from frigate.util.builtin import serialize -from frigate.util.image import SharedMemoryFrameManager, area, calculate_region +from frigate.util.image import SharedMemoryFrameManager, calculate_region from .embeddings import Embeddings @@ -82,24 +80,15 @@ class EmbeddingMaintainer(threading.Thread): if self.config.classification.bird.enabled: self.processors.append(BirdProcessor(self.config, metrics)) + if self.config.lpr.enabled: + self.processors.append(LicensePlateProcessor(self.config, metrics)) + # create communication for updating event descriptions self.requestor = InterProcessRequestor() self.stop_event = stop_event self.tracked_events: dict[str, list[any]] = {} self.genai_client = get_genai_client(config) - # set license plate recognition conditions - self.lpr_config = self.config.lpr - self.requires_license_plate_detection = ( - "license_plate" not in self.config.objects.all_objects - ) - self.detected_license_plates: dict[str, dict[str, any]] = {} - - if self.lpr_config.enabled: - self.license_plate_recognition = LicensePlateRecognition( - self.lpr_config, self.requestor, self.embeddings - ) - def run(self) -> None: """Maintain a SQLite-vec database for semantic search.""" while not self.stop_event.is_set(): @@ -164,11 +153,7 @@ class EmbeddingMaintainer(threading.Thread): camera_config = self.config.cameras[camera] # no need to process updated objects if face recognition, lpr, genai are disabled - if ( - not camera_config.genai.enabled - and not self.lpr_config.enabled - and len(self.processors) == 0 - ): + if not camera_config.genai.enabled and len(self.processors) == 0: return # Create our own thumbnail based on the bounding box and the frame time @@ -188,16 +173,6 @@ class EmbeddingMaintainer(threading.Thread): for processor in self.processors: processor.process_frame(data, yuv_frame) - if self.lpr_config.enabled: - start = datetime.datetime.now().timestamp() - processed = self._process_license_plate(data, yuv_frame) - - if processed: - duration = datetime.datetime.now().timestamp() - start - self.metrics.alpr_pps.value = ( - self.metrics.alpr_pps.value * 9 + duration - ) / 10 - # no need to save our own thumbnails if genai is not enabled # or if the object has become stationary if self.genai_client is not None and not data["stationary"]: @@ -229,9 +204,6 @@ class EmbeddingMaintainer(threading.Thread): for processor in self.processors: processor.expire_object(event_id) - if event_id in self.detected_license_plates: - self.detected_license_plates.pop(event_id) - if updated_db: try: event: Event = Event.get(Event.id == event_id) @@ -354,199 +326,6 @@ class EmbeddingMaintainer(threading.Thread): if event_id: self.handle_regenerate_description(event_id, source) - def _detect_license_plate(self, input: np.ndarray) -> tuple[int, int, int, int]: - """Return the dimensions of the input image as [x, y, width, height].""" - height, width = input.shape[:2] - return (0, 0, width, height) - - def _process_license_plate( - self, obj_data: dict[str, any], frame: np.ndarray - ) -> bool: - """Look for license plates in image.""" - id = obj_data["id"] - - # don't run for non car objects - if obj_data.get("label") != "car": - logger.debug("Not a processing license plate for non car object.") - return False - - # don't run for stationary car objects - if obj_data.get("stationary") == True: - logger.debug("Not a processing license plate for a stationary car object.") - return False - - # don't overwrite sub label for objects that have a sub label - # that is not a license plate - if obj_data.get("sub_label") and id not in self.detected_license_plates: - logger.debug( - f"Not processing license plate due to existing sub label: {obj_data.get('sub_label')}." - ) - return False - - license_plate: Optional[dict[str, any]] = None - - if self.requires_license_plate_detection: - logger.debug("Running manual license_plate detection.") - car_box = obj_data.get("box") - - if not car_box: - return False - - rgb = cv2.cvtColor(frame, cv2.COLOR_YUV2RGB_I420) - left, top, right, bottom = car_box - car = rgb[top:bottom, left:right] - license_plate = self._detect_license_plate(car) - - if not license_plate: - logger.debug("Detected no license plates for car object.") - return False - - license_plate_frame = car[ - license_plate[1] : license_plate[3], license_plate[0] : license_plate[2] - ] - license_plate_frame = cv2.cvtColor(license_plate_frame, cv2.COLOR_RGB2BGR) - else: - # don't run for object without attributes - if not obj_data.get("current_attributes"): - logger.debug("No attributes to parse.") - return False - - attributes: list[dict[str, any]] = obj_data.get("current_attributes", []) - for attr in attributes: - if attr.get("label") != "license_plate": - continue - - if license_plate is None or attr.get("score", 0.0) > license_plate.get( - "score", 0.0 - ): - license_plate = attr - - # no license plates detected in this frame - if not license_plate: - return False - - license_plate_box = license_plate.get("box") - - # check that license plate is valid - if ( - not license_plate_box - or area(license_plate_box) < self.config.lpr.min_area - ): - logger.debug(f"Invalid license plate box {license_plate}") - return False - - license_plate_frame = cv2.cvtColor(frame, cv2.COLOR_YUV2BGR_I420) - license_plate_frame = license_plate_frame[ - license_plate_box[1] : license_plate_box[3], - license_plate_box[0] : license_plate_box[2], - ] - - # run detection, returns results sorted by confidence, best first - license_plates, confidences, areas = ( - self.license_plate_recognition.process_license_plate(license_plate_frame) - ) - - logger.debug(f"Text boxes: {license_plates}") - logger.debug(f"Confidences: {confidences}") - logger.debug(f"Areas: {areas}") - - if license_plates: - for plate, confidence, text_area in zip(license_plates, confidences, areas): - avg_confidence = ( - (sum(confidence) / len(confidence)) if confidence else 0 - ) - - logger.debug( - f"Detected text: {plate} (average confidence: {avg_confidence:.2f}, area: {text_area} pixels)" - ) - else: - # no plates found - logger.debug("No text detected") - return True - - top_plate, top_char_confidences, top_area = ( - license_plates[0], - confidences[0], - areas[0], - ) - avg_confidence = ( - (sum(top_char_confidences) / len(top_char_confidences)) - if top_char_confidences - else 0 - ) - - # Check if we have a previously detected plate for this ID - if id in self.detected_license_plates: - prev_plate = self.detected_license_plates[id]["plate"] - prev_char_confidences = self.detected_license_plates[id]["char_confidences"] - prev_area = self.detected_license_plates[id]["area"] - prev_avg_confidence = ( - (sum(prev_char_confidences) / len(prev_char_confidences)) - if prev_char_confidences - else 0 - ) - - # Define conditions for keeping the previous plate - shorter_than_previous = len(top_plate) < len(prev_plate) - lower_avg_confidence = avg_confidence <= prev_avg_confidence - smaller_area = top_area < prev_area - - # Compare character-by-character confidence where possible - min_length = min(len(top_plate), len(prev_plate)) - char_confidence_comparison = sum( - 1 - for i in range(min_length) - if top_char_confidences[i] <= prev_char_confidences[i] - ) - worse_char_confidences = char_confidence_comparison >= min_length / 2 - - if (shorter_than_previous or smaller_area) and ( - lower_avg_confidence and worse_char_confidences - ): - logger.debug( - f"Keeping previous plate. New plate stats: " - f"length={len(top_plate)}, avg_conf={avg_confidence:.2f}, area={top_area} " - f"vs Previous: length={len(prev_plate)}, avg_conf={prev_avg_confidence:.2f}, area={prev_area}" - ) - return True - - # Check against minimum confidence threshold - if avg_confidence < self.lpr_config.threshold: - logger.debug( - f"Average confidence {avg_confidence} is less than threshold ({self.lpr_config.threshold})" - ) - return True - - # Determine subLabel based on known plates, use regex matching - # Default to the detected plate, use label name if there's a match - sub_label = next( - ( - label - for label, plates in self.lpr_config.known_plates.items() - if any(re.match(f"^{plate}$", top_plate) for plate in plates) - ), - top_plate, - ) - - # Send the result to the API - resp = requests.post( - f"{FRIGATE_LOCALHOST}/api/events/{id}/sub_label", - json={ - "camera": obj_data.get("camera"), - "subLabel": sub_label, - "subLabelScore": avg_confidence, - }, - ) - - if resp.status_code == 200: - self.detected_license_plates[id] = { - "plate": top_plate, - "char_confidences": top_char_confidences, - "area": top_area, - } - - return True - def _create_thumbnail(self, yuv_frame, box, height=500) -> Optional[bytes]: """Return jpg thumbnail of a region of the frame.""" frame = cv2.cvtColor(yuv_frame, cv2.COLOR_YUV2BGR_I420) From 73fee6372b533738ebfe5cb42e7dbe9b9835693d Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Tue, 11 Feb 2025 14:16:10 -0700 Subject: [PATCH 09/21] Remove obsolete event clip logic (#16504) * Remove obsolete event clip logic * Formatting --- frigate/api/media.py | 26 ++------------------------ 1 file changed, 2 insertions(+), 24 deletions(-) diff --git a/frigate/api/media.py b/frigate/api/media.py index 39a6f7d1e..a9455919b 100644 --- a/frigate/api/media.py +++ b/frigate/api/media.py @@ -1088,30 +1088,8 @@ def event_clip(request: Request, event_id: str): content={"success": False, "message": "Clip not available"}, status_code=404 ) - file_name = f"{event.camera}-{event.id}.mp4" - clip_path = os.path.join(CLIPS_DIR, file_name) - - if not os.path.isfile(clip_path): - end_ts = ( - datetime.now().timestamp() if event.end_time is None else event.end_time - ) - return recording_clip(request, event.camera, event.start_time, end_ts) - - headers = { - "Content-Description": "File Transfer", - "Cache-Control": "no-cache", - "Content-Type": "video/mp4", - "Content-Length": str(os.path.getsize(clip_path)), - # nginx: https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ignore_headers - "X-Accel-Redirect": f"/clips/{file_name}", - } - - return FileResponse( - clip_path, - media_type="video/mp4", - filename=file_name, - headers=headers, - ) + end_ts = datetime.now().timestamp() if event.end_time is None else event.end_time + return recording_clip(request, event.camera, event.start_time, end_ts) @router.get("/events/{event_id}/preview.gif") From 11baf237bc398c70c0a714e68d219e5296521594 Mon Sep 17 00:00:00 2001 From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> Date: Tue, 11 Feb 2025 17:49:22 -0600 Subject: [PATCH 10/21] Ensure all streaming settings are saved correctly on mobile (#16511) * Ensure streaming settings are saved correctly on mobile * remove extra check --- web/src/components/menu/LiveContextMenu.tsx | 9 +- web/src/views/live/LiveDashboardView.tsx | 107 ++++++++++++++++++-- 2 files changed, 104 insertions(+), 12 deletions(-) diff --git a/web/src/components/menu/LiveContextMenu.tsx b/web/src/components/menu/LiveContextMenu.tsx index f5222592d..81be53f58 100644 --- a/web/src/components/menu/LiveContextMenu.tsx +++ b/web/src/components/menu/LiveContextMenu.tsx @@ -82,7 +82,7 @@ export default function LiveContextMenu({ ); useEffect(() => { - if (cameraGroup) { + if (cameraGroup && cameraGroup != "default") { setGroupStreamingSettings(allGroupsStreamingSettings[cameraGroup]); } // set individual group when all groups changes @@ -91,7 +91,12 @@ export default function LiveContextMenu({ const onSave = useCallback( (settings: GroupStreamingSettings) => { - if (!cameraGroup || !allGroupsStreamingSettings) { + if ( + !cameraGroup || + !allGroupsStreamingSettings || + cameraGroup == "default" || + !settings + ) { return; } diff --git a/web/src/views/live/LiveDashboardView.tsx b/web/src/views/live/LiveDashboardView.tsx index 363405023..89a2aeef2 100644 --- a/web/src/views/live/LiveDashboardView.tsx +++ b/web/src/views/live/LiveDashboardView.tsx @@ -14,7 +14,11 @@ import { TooltipTrigger, } from "@/components/ui/tooltip"; import { usePersistence } from "@/hooks/use-persistence"; -import { CameraConfig, FrigateConfig } from "@/types/frigateConfig"; +import { + AllGroupsStreamingSettings, + CameraConfig, + FrigateConfig, +} from "@/types/frigateConfig"; import { ReviewSegment } from "@/types/review"; import { useCallback, useEffect, useMemo, useRef, useState } from "react"; import { @@ -38,6 +42,7 @@ import { FaCompress, FaExpand } from "react-icons/fa"; import useCameraLiveMode from "@/hooks/use-camera-live-mode"; import { useResizeObserver } from "@/hooks/resize-observer"; import LiveContextMenu from "@/components/menu/LiveContextMenu"; +import { useStreamingSettings } from "@/context/streaming-settings-provider"; type LiveDashboardViewProps = { cameras: CameraConfig[]; @@ -135,8 +140,6 @@ export default function LiveDashboardView({ // camera live views - const [autoLiveView] = usePersistence("autoLiveView", true); - const [{ height: containerHeight }] = useResizeObserver(containerRef); const hasScrollbar = useMemo(() => { @@ -198,6 +201,17 @@ export default function LiveDashboardView({ supportsAudioOutputStates, } = useCameraLiveMode(cameras, windowVisible); + const [globalAutoLive] = usePersistence("autoLiveView", true); + + const { allGroupsStreamingSettings, setAllGroupsStreamingSettings } = + useStreamingSettings(); + + const currentGroupStreamingSettings = useMemo(() => { + if (cameraGroup && cameraGroup != "default" && allGroupsStreamingSettings) { + return allGroupsStreamingSettings[cameraGroup]; + } + }, [allGroupsStreamingSettings, cameraGroup]); + const cameraRef = useCallback( (node: HTMLElement | null) => { if (!visibleCameraObserver.current) { @@ -245,6 +259,25 @@ export default function LiveDashboardView({ })); }; + useEffect(() => { + if (!allGroupsStreamingSettings) { + return; + } + + const initialAudioStates: AudioState = {}; + const initialVolumeStates: VolumeState = {}; + + Object.entries(allGroupsStreamingSettings).forEach(([_, groupSettings]) => { + Object.entries(groupSettings).forEach(([camera, cameraSettings]) => { + initialAudioStates[camera] = cameraSettings.playAudio ?? false; + initialVolumeStates[camera] = cameraSettings.volume ?? 1; + }); + }); + + setAudioStates(initialAudioStates); + setVolumeStates(initialVolumeStates); + }, [allGroupsStreamingSettings]); + const toggleAudio = (cameraName: string): void => { setAudioStates((prev) => ({ ...prev, @@ -252,12 +285,53 @@ export default function LiveDashboardView({ })); }; + const onSaveMuting = useCallback( + (playAudio: boolean) => { + if ( + !cameraGroup || + !allGroupsStreamingSettings || + cameraGroup == "default" + ) { + return; + } + + const existingGroupSettings = + allGroupsStreamingSettings[cameraGroup] || {}; + + const updatedSettings: AllGroupsStreamingSettings = { + ...Object.fromEntries( + Object.entries(allGroupsStreamingSettings || {}).filter( + ([key]) => key !== cameraGroup, + ), + ), + [cameraGroup]: { + ...existingGroupSettings, + ...Object.fromEntries( + Object.entries(existingGroupSettings).map( + ([cameraName, settings]) => [ + cameraName, + { + ...settings, + playAudio: playAudio, + }, + ], + ), + ), + }, + }; + + setAllGroupsStreamingSettings?.(updatedSettings); + }, + [cameraGroup, allGroupsStreamingSettings, setAllGroupsStreamingSettings], + ); + const muteAll = (): void => { const updatedStates: Record = {}; visibleCameras.forEach((cameraName) => { updatedStates[cameraName] = false; }); setAudioStates(updatedStates); + onSaveMuting(false); }; const unmuteAll = (): void => { @@ -266,6 +340,7 @@ export default function LiveDashboardView({ updatedStates[cameraName] = true; }); setAudioStates(updatedStates); + onSaveMuting(true); }; return ( @@ -392,19 +467,30 @@ export default function LiveDashboardView({ } else { grow = "aspect-video"; } + const streamName = + currentGroupStreamingSettings?.[camera.name]?.streamName || + Object.values(camera.live.streams)?.[0]; + const autoLive = + currentGroupStreamingSettings?.[camera.name]?.streamType !== + "no-streaming"; + const showStillWithoutActivity = + currentGroupStreamingSettings?.[camera.name]?.streamType !== + "continuous"; + const useWebGL = + currentGroupStreamingSettings?.[camera.name] + ?.compatibilityMode || false; return ( toggleAudio(camera.name)} @@ -431,11 +517,12 @@ export default function LiveDashboardView({ } cameraConfig={camera} preferredLiveMode={preferredLiveModes[camera.name] ?? "mse"} - autoLive={autoLiveView} - useWebGL={false} + autoLive={autoLive ?? globalAutoLive} + showStillWithoutActivity={showStillWithoutActivity ?? true} + useWebGL={useWebGL} playInBackground={false} showStats={statsStates[camera.name]} - streamName={Object.values(camera.live.streams)[0]} + streamName={streamName} onClick={() => onSelectCamera(camera.name)} onError={(e) => handleError(camera.name, e)} onResetLiveMode={() => resetPreferredLiveMode(camera.name)} From d6b5dc93cca30f7bf71e762f1902e95297f71646 Mon Sep 17 00:00:00 2001 From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> Date: Wed, 12 Feb 2025 07:16:32 -0600 Subject: [PATCH 11/21] Fix streaming dialog and use less text on register button (#16518) --- web/src/components/menu/LiveContextMenu.tsx | 4 +--- web/src/views/settings/NotificationsSettingsView.tsx | 2 +- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/web/src/components/menu/LiveContextMenu.tsx b/web/src/components/menu/LiveContextMenu.tsx index 81be53f58..07909a311 100644 --- a/web/src/components/menu/LiveContextMenu.tsx +++ b/web/src/components/menu/LiveContextMenu.tsx @@ -85,9 +85,7 @@ export default function LiveContextMenu({ if (cameraGroup && cameraGroup != "default") { setGroupStreamingSettings(allGroupsStreamingSettings[cameraGroup]); } - // set individual group when all groups changes - // eslint-disable-next-line react-hooks/exhaustive-deps - }, [allGroupsStreamingSettings]); + }, [allGroupsStreamingSettings, cameraGroup]); const onSave = useCallback( (settings: GroupStreamingSettings) => { diff --git a/web/src/views/settings/NotificationsSettingsView.tsx b/web/src/views/settings/NotificationsSettingsView.tsx index 5ea545fb3..3918949d0 100644 --- a/web/src/views/settings/NotificationsSettingsView.tsx +++ b/web/src/views/settings/NotificationsSettingsView.tsx @@ -484,7 +484,7 @@ export default function NotificationView({ } }} > - {`${registration != null ? "Unregister" : "Register"} for notifications on this device`} + {`${registration != null ? "Unregister" : "Register"} this device`} {registration != null && registration.active && (