From a510ea903675327dbb1d752403efbc066fe29272 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Wed, 5 Nov 2025 08:48:47 -0700 Subject: [PATCH 01/31] Review card refactor (#20813) * Use the review card in event timeline popover * Show review title in review card --- web/src/components/card/ReviewCard.tsx | 59 ++++++++++++-------- web/src/components/timeline/DetailStream.tsx | 6 +- web/src/components/timeline/EventSegment.tsx | 17 ++---- web/src/hooks/use-event-segment-utils.ts | 8 +-- web/src/views/recording/RecordingView.tsx | 2 +- 5 files changed, 51 insertions(+), 41 deletions(-) diff --git a/web/src/components/card/ReviewCard.tsx b/web/src/components/card/ReviewCard.tsx index 6337ac4a9..8fc4024db 100644 --- a/web/src/components/card/ReviewCard.tsx +++ b/web/src/components/card/ReviewCard.tsx @@ -38,6 +38,7 @@ import { Button, buttonVariants } from "../ui/button"; import { Trans, useTranslation } from "react-i18next"; import { cn } from "@/lib/utils"; import { LuCircle } from "react-icons/lu"; +import { MdAutoAwesome } from "react-icons/md"; type ReviewCardProps = { event: ReviewSegment; @@ -164,29 +165,33 @@ export default function ReviewCard({
-
- <> - - {event.data.objects.map((object) => { - return getIconForLabel( - object, - "size-3 text-primary dark:text-white", - ); - })} - {event.data.audio.map((audio) => { - return getIconForLabel( - audio, - "size-3 text-primary dark:text-white", - ); - })} - +
+ +
+ {event.data.objects.map((object, idx) => ( +
+ {getIconForLabel(object, "size-3 text-white")} +
+ ))} + {event.data.audio.map((audio, idx) => ( +
+ {getIconForLabel(audio, "size-3 text-white")} +
+ ))} +
{formattedDate}
@@ -213,6 +218,14 @@ export default function ReviewCard({ dense />
+ {event.data.metadata?.title && ( +
+ + + {event.data.metadata.title} + +
+ )}
); diff --git a/web/src/components/timeline/DetailStream.tsx b/web/src/components/timeline/DetailStream.tsx index 4b152aadb..5b45de19f 100644 --- a/web/src/components/timeline/DetailStream.tsx +++ b/web/src/components/timeline/DetailStream.tsx @@ -22,6 +22,7 @@ import { LuChevronRight, LuSettings, } from "react-icons/lu"; +import { MdAutoAwesome } from "react-icons/md"; import { getTranslatedLabel } from "@/utils/i18n"; import EventMenu from "@/components/timeline/EventMenu"; import { FrigatePlusDialog } from "@/components/overlay/dialog/FrigatePlusDialog"; @@ -410,8 +411,9 @@ function ReviewGroup({
{review.data.metadata?.title && ( -
- {review.data.metadata.title} +
+ + {review.data.metadata.title}
)}
diff --git a/web/src/components/timeline/EventSegment.tsx b/web/src/components/timeline/EventSegment.tsx index e04841540..368e0fad5 100644 --- a/web/src/components/timeline/EventSegment.tsx +++ b/web/src/components/timeline/EventSegment.tsx @@ -1,4 +1,3 @@ -import { useApiHost } from "@/api"; import { useTimelineUtils } from "@/hooks/use-timeline-utils"; import { useEventSegmentUtils } from "@/hooks/use-event-segment-utils"; import { ReviewSegment, ReviewSeverity } from "@/types/review"; @@ -18,6 +17,7 @@ import { HoverCardPortal } from "@radix-ui/react-hover-card"; import scrollIntoView from "scroll-into-view-if-needed"; import { MinimapBounds, Tick, Timestamp } from "./segment-metadata"; import useTapUtils from "@/hooks/use-tap-utils"; +import ReviewCard from "../card/ReviewCard"; type EventSegmentProps = { events: ReviewSegment[]; @@ -54,7 +54,7 @@ export function EventSegment({ displaySeverityType, shouldShowRoundedCorners, getEventStart, - getEventThumbnail, + getEvent, } = useEventSegmentUtils(segmentDuration, events, severityType); const { alignStartDateToTimeline, alignEndDateToTimeline } = useTimelineUtils( @@ -87,13 +87,11 @@ export function EventSegment({ // eslint-disable-next-line react-hooks/exhaustive-deps }, [getEventStart, segmentTime]); - const apiHost = useApiHost(); - const { handleTouchStart } = useTapUtils(); - const eventThumbnail = useMemo(() => { - return getEventThumbnail(segmentTime); - }, [getEventThumbnail, segmentTime]); + const segmentEvent = useMemo(() => { + return getEvent(segmentTime); + }, [getEvent, segmentTime]); const timestamp = useMemo(() => new Date(segmentTime * 1000), [segmentTime]); const segmentKey = useMemo( @@ -252,10 +250,7 @@ export function EventSegment({ className="w-[250px] rounded-lg p-2 md:rounded-2xl" side="left" > - + {segmentEvent && } diff --git a/web/src/hooks/use-event-segment-utils.ts b/web/src/hooks/use-event-segment-utils.ts index 3ecafcded..b105c3bf8 100644 --- a/web/src/hooks/use-event-segment-utils.ts +++ b/web/src/hooks/use-event-segment-utils.ts @@ -191,8 +191,8 @@ export const useEventSegmentUtils = ( [events, getSegmentStart, getSegmentEnd, severityType], ); - const getEventThumbnail = useCallback( - (time: number): string => { + const getEvent = useCallback( + (time: number): ReviewSegment | undefined => { const matchingEvent = events.find((event) => { return ( time >= getSegmentStart(event.start_time) && @@ -201,7 +201,7 @@ export const useEventSegmentUtils = ( ); }); - return matchingEvent?.thumb_path ?? ""; + return matchingEvent; }, [events, getSegmentStart, getSegmentEnd, severityType], ); @@ -214,6 +214,6 @@ export const useEventSegmentUtils = ( getReviewed, shouldShowRoundedCorners, getEventStart, - getEventThumbnail, + getEvent, }; }; diff --git a/web/src/views/recording/RecordingView.tsx b/web/src/views/recording/RecordingView.tsx index 149237b63..0c13876e8 100644 --- a/web/src/views/recording/RecordingView.tsx +++ b/web/src/views/recording/RecordingView.tsx @@ -974,7 +974,7 @@ function Timeline({ ? "w-[100px] flex-shrink-0" : timelineType == "detail" ? "min-w-[20rem] max-w-[30%] flex-shrink-0 flex-grow-0 basis-[30rem] md:min-w-[20rem] md:max-w-[25%] lg:min-w-[30rem] lg:max-w-[33%]" - : "w-60 flex-shrink-0", + : "w-80 flex-shrink-0", ) : cn( timelineType == "timeline" From 8048168814f0629e6f10d00a31ec38f1d5f26e3c Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Thu, 6 Nov 2025 07:21:07 -0700 Subject: [PATCH 02/31] Bug Fixes (#20825) * Correctly sort summary responses * Consider JinaV2 as a complex model * Subscribe to record updates in camera watchdog * Cleanup score showing * No need to sort review summary * Add tests for recording summary * Don't break existing format * Sort event summary by day --- frigate/api/event.py | 2 +- frigate/api/media.py | 2 +- frigate/detectors/detection_runners.py | 15 +- frigate/test/http_api/test_http_media.py | 379 ++++++++++++++++++ frigate/video.py | 4 +- .../components/card/ClassificationCard.tsx | 6 +- 6 files changed, 400 insertions(+), 8 deletions(-) create mode 100644 frigate/test/http_api/test_http_media.py diff --git a/frigate/api/event.py b/frigate/api/event.py index 544e58fd2..61c1d86c4 100644 --- a/frigate/api/event.py +++ b/frigate/api/event.py @@ -912,7 +912,7 @@ def events_summary( "count": int(g.count or 0), } - return JSONResponse(content=list(grouped.values())) + return JSONResponse(content=sorted(grouped.values(), key=lambda x: x["day"])) @router.get( diff --git a/frigate/api/media.py b/frigate/api/media.py index 8d310fec8..2bad3658f 100644 --- a/frigate/api/media.py +++ b/frigate/api/media.py @@ -496,7 +496,7 @@ def all_recordings_summary( for g in period_query: days[g.day] = True - return JSONResponse(content=days) + return JSONResponse(content=dict(sorted(days.items()))) @router.get( diff --git a/frigate/detectors/detection_runners.py b/frigate/detectors/detection_runners.py index 9dadb16fa..5b45238be 100644 --- a/frigate/detectors/detection_runners.py +++ b/frigate/detectors/detection_runners.py @@ -234,7 +234,10 @@ class OpenVINOModelRunner(BaseModelRunner): # Import here to avoid circular imports from frigate.embeddings.types import EnrichmentModelTypeEnum - return model_type in [EnrichmentModelTypeEnum.paddleocr.value] + return model_type in [ + EnrichmentModelTypeEnum.paddleocr.value, + EnrichmentModelTypeEnum.jina_v2.value, + ] def __init__(self, model_path: str, device: str, model_type: str, **kwargs): self.model_path = model_path @@ -345,6 +348,16 @@ class OpenVINOModelRunner(BaseModelRunner): # Create tensor with the correct element type input_element_type = input_port.get_element_type() + + # Ensure input data matches the expected dtype to prevent type mismatches + # that can occur with models like Jina-CLIP v2 running on OpenVINO + expected_dtype = input_element_type.to_dtype() + if input_data.dtype != expected_dtype: + logger.debug( + f"Converting input '{input_name}' from {input_data.dtype} to {expected_dtype}" + ) + input_data = input_data.astype(expected_dtype) + input_tensor = ov.Tensor(input_element_type, input_data.shape) np.copyto(input_tensor.data, input_data) diff --git a/frigate/test/http_api/test_http_media.py b/frigate/test/http_api/test_http_media.py new file mode 100644 index 000000000..970a331e7 --- /dev/null +++ b/frigate/test/http_api/test_http_media.py @@ -0,0 +1,379 @@ +"""Unit tests for recordings/media API endpoints.""" + +from datetime import datetime, timezone +from typing import Any + +import pytz +from fastapi.testclient import TestClient + +from frigate.api.auth import get_allowed_cameras_for_filter, get_current_user +from frigate.models import Recordings +from frigate.test.http_api.base_http_test import BaseTestHttp + + +class TestHttpMedia(BaseTestHttp): + """Test media API endpoints, particularly recordings with DST handling.""" + + def setUp(self): + """Set up test fixtures.""" + super().setUp([Recordings]) + self.app = super().create_app() + + # Mock auth to bypass camera access for tests + async def mock_get_current_user(request: Any): + return {"username": "test_user", "role": "admin"} + + self.app.dependency_overrides[get_current_user] = mock_get_current_user + self.app.dependency_overrides[get_allowed_cameras_for_filter] = lambda: [ + "front_door", + "back_door", + ] + + def tearDown(self): + """Clean up after tests.""" + self.app.dependency_overrides.clear() + super().tearDown() + + def test_recordings_summary_across_dst_spring_forward(self): + """ + Test recordings summary across spring DST transition (spring forward). + + In 2024, DST in America/New_York transitions on March 10, 2024 at 2:00 AM + Clocks spring forward from 2:00 AM to 3:00 AM (EST to EDT) + """ + tz = pytz.timezone("America/New_York") + + # March 9, 2024 at 12:00 PM EST (before DST) + march_9_noon = tz.localize(datetime(2024, 3, 9, 12, 0, 0)).timestamp() + + # March 10, 2024 at 12:00 PM EDT (after DST transition) + march_10_noon = tz.localize(datetime(2024, 3, 10, 12, 0, 0)).timestamp() + + # March 11, 2024 at 12:00 PM EDT (after DST) + march_11_noon = tz.localize(datetime(2024, 3, 11, 12, 0, 0)).timestamp() + + with TestClient(self.app) as client: + # Insert recordings for each day + Recordings.insert( + id="recording_march_9", + path="/media/recordings/march_9.mp4", + camera="front_door", + start_time=march_9_noon, + end_time=march_9_noon + 3600, # 1 hour recording + duration=3600, + motion=100, + objects=5, + ).execute() + + Recordings.insert( + id="recording_march_10", + path="/media/recordings/march_10.mp4", + camera="front_door", + start_time=march_10_noon, + end_time=march_10_noon + 3600, + duration=3600, + motion=150, + objects=8, + ).execute() + + Recordings.insert( + id="recording_march_11", + path="/media/recordings/march_11.mp4", + camera="front_door", + start_time=march_11_noon, + end_time=march_11_noon + 3600, + duration=3600, + motion=200, + objects=10, + ).execute() + + # Test recordings summary with America/New_York timezone + response = client.get( + "/recordings/summary", + params={"timezone": "America/New_York", "cameras": "all"}, + ) + + assert response.status_code == 200 + summary = response.json() + + # Verify we get exactly 3 days + assert len(summary) == 3, f"Expected 3 days, got {len(summary)}" + + # Verify the correct dates are returned (API returns dict with True values) + assert "2024-03-09" in summary, f"Expected 2024-03-09 in {summary}" + assert "2024-03-10" in summary, f"Expected 2024-03-10 in {summary}" + assert "2024-03-11" in summary, f"Expected 2024-03-11 in {summary}" + assert summary["2024-03-09"] is True + assert summary["2024-03-10"] is True + assert summary["2024-03-11"] is True + + def test_recordings_summary_across_dst_fall_back(self): + """ + Test recordings summary across fall DST transition (fall back). + + In 2024, DST in America/New_York transitions on November 3, 2024 at 2:00 AM + Clocks fall back from 2:00 AM to 1:00 AM (EDT to EST) + """ + tz = pytz.timezone("America/New_York") + + # November 2, 2024 at 12:00 PM EDT (before DST transition) + nov_2_noon = tz.localize(datetime(2024, 11, 2, 12, 0, 0)).timestamp() + + # November 3, 2024 at 12:00 PM EST (after DST transition) + # Need to specify is_dst=False to get the time after fall back + nov_3_noon = tz.localize( + datetime(2024, 11, 3, 12, 0, 0), is_dst=False + ).timestamp() + + # November 4, 2024 at 12:00 PM EST (after DST) + nov_4_noon = tz.localize(datetime(2024, 11, 4, 12, 0, 0)).timestamp() + + with TestClient(self.app) as client: + # Insert recordings for each day + Recordings.insert( + id="recording_nov_2", + path="/media/recordings/nov_2.mp4", + camera="front_door", + start_time=nov_2_noon, + end_time=nov_2_noon + 3600, + duration=3600, + motion=100, + objects=5, + ).execute() + + Recordings.insert( + id="recording_nov_3", + path="/media/recordings/nov_3.mp4", + camera="front_door", + start_time=nov_3_noon, + end_time=nov_3_noon + 3600, + duration=3600, + motion=150, + objects=8, + ).execute() + + Recordings.insert( + id="recording_nov_4", + path="/media/recordings/nov_4.mp4", + camera="front_door", + start_time=nov_4_noon, + end_time=nov_4_noon + 3600, + duration=3600, + motion=200, + objects=10, + ).execute() + + # Test recordings summary with America/New_York timezone + response = client.get( + "/recordings/summary", + params={"timezone": "America/New_York", "cameras": "all"}, + ) + + assert response.status_code == 200 + summary = response.json() + + # Verify we get exactly 3 days + assert len(summary) == 3, f"Expected 3 days, got {len(summary)}" + + # Verify the correct dates are returned (API returns dict with True values) + assert "2024-11-02" in summary, f"Expected 2024-11-02 in {summary}" + assert "2024-11-03" in summary, f"Expected 2024-11-03 in {summary}" + assert "2024-11-04" in summary, f"Expected 2024-11-04 in {summary}" + assert summary["2024-11-02"] is True + assert summary["2024-11-03"] is True + assert summary["2024-11-04"] is True + + def test_recordings_summary_multiple_cameras_across_dst(self): + """ + Test recordings summary with multiple cameras across DST boundary. + """ + tz = pytz.timezone("America/New_York") + + # March 9, 2024 at 10:00 AM EST (before DST) + march_9_morning = tz.localize(datetime(2024, 3, 9, 10, 0, 0)).timestamp() + + # March 10, 2024 at 3:00 PM EDT (after DST transition) + march_10_afternoon = tz.localize(datetime(2024, 3, 10, 15, 0, 0)).timestamp() + + with TestClient(self.app) as client: + # Insert recordings for front_door on March 9 + Recordings.insert( + id="front_march_9", + path="/media/recordings/front_march_9.mp4", + camera="front_door", + start_time=march_9_morning, + end_time=march_9_morning + 3600, + duration=3600, + motion=100, + objects=5, + ).execute() + + # Insert recordings for back_door on March 10 + Recordings.insert( + id="back_march_10", + path="/media/recordings/back_march_10.mp4", + camera="back_door", + start_time=march_10_afternoon, + end_time=march_10_afternoon + 3600, + duration=3600, + motion=150, + objects=8, + ).execute() + + # Test with all cameras + response = client.get( + "/recordings/summary", + params={"timezone": "America/New_York", "cameras": "all"}, + ) + + assert response.status_code == 200 + summary = response.json() + + # Verify we get both days + assert len(summary) == 2, f"Expected 2 days, got {len(summary)}" + assert "2024-03-09" in summary + assert "2024-03-10" in summary + assert summary["2024-03-09"] is True + assert summary["2024-03-10"] is True + + def test_recordings_summary_at_dst_transition_time(self): + """ + Test recordings that span the exact DST transition time. + """ + tz = pytz.timezone("America/New_York") + + # March 10, 2024 at 1:00 AM EST (1 hour before DST transition) + # At 2:00 AM, clocks jump to 3:00 AM + before_transition = tz.localize(datetime(2024, 3, 10, 1, 0, 0)).timestamp() + + # Recording that spans the transition (1:00 AM to 3:30 AM EDT) + # This is 1.5 hours of actual time but spans the "missing" hour + after_transition = tz.localize(datetime(2024, 3, 10, 3, 30, 0)).timestamp() + + with TestClient(self.app) as client: + Recordings.insert( + id="recording_during_transition", + path="/media/recordings/transition.mp4", + camera="front_door", + start_time=before_transition, + end_time=after_transition, + duration=after_transition - before_transition, + motion=100, + objects=5, + ).execute() + + response = client.get( + "/recordings/summary", + params={"timezone": "America/New_York", "cameras": "all"}, + ) + + assert response.status_code == 200 + summary = response.json() + + # The recording should appear on March 10 + assert len(summary) == 1 + assert "2024-03-10" in summary + assert summary["2024-03-10"] is True + + def test_recordings_summary_utc_timezone(self): + """ + Test recordings summary with UTC timezone (no DST). + """ + # Use UTC timestamps directly + march_9_utc = datetime(2024, 3, 9, 17, 0, 0, tzinfo=timezone.utc).timestamp() + march_10_utc = datetime(2024, 3, 10, 17, 0, 0, tzinfo=timezone.utc).timestamp() + + with TestClient(self.app) as client: + Recordings.insert( + id="recording_march_9_utc", + path="/media/recordings/march_9_utc.mp4", + camera="front_door", + start_time=march_9_utc, + end_time=march_9_utc + 3600, + duration=3600, + motion=100, + objects=5, + ).execute() + + Recordings.insert( + id="recording_march_10_utc", + path="/media/recordings/march_10_utc.mp4", + camera="front_door", + start_time=march_10_utc, + end_time=march_10_utc + 3600, + duration=3600, + motion=150, + objects=8, + ).execute() + + # Test with UTC timezone + response = client.get( + "/recordings/summary", params={"timezone": "utc", "cameras": "all"} + ) + + assert response.status_code == 200 + summary = response.json() + + # Verify we get both days + assert len(summary) == 2 + assert "2024-03-09" in summary + assert "2024-03-10" in summary + assert summary["2024-03-09"] is True + assert summary["2024-03-10"] is True + + def test_recordings_summary_no_recordings(self): + """ + Test recordings summary when no recordings exist. + """ + with TestClient(self.app) as client: + response = client.get( + "/recordings/summary", + params={"timezone": "America/New_York", "cameras": "all"}, + ) + + assert response.status_code == 200 + summary = response.json() + assert len(summary) == 0 + + def test_recordings_summary_single_camera_filter(self): + """ + Test recordings summary filtered to a single camera. + """ + tz = pytz.timezone("America/New_York") + march_10_noon = tz.localize(datetime(2024, 3, 10, 12, 0, 0)).timestamp() + + with TestClient(self.app) as client: + # Insert recordings for both cameras + Recordings.insert( + id="front_recording", + path="/media/recordings/front.mp4", + camera="front_door", + start_time=march_10_noon, + end_time=march_10_noon + 3600, + duration=3600, + motion=100, + objects=5, + ).execute() + + Recordings.insert( + id="back_recording", + path="/media/recordings/back.mp4", + camera="back_door", + start_time=march_10_noon, + end_time=march_10_noon + 3600, + duration=3600, + motion=150, + objects=8, + ).execute() + + # Test with only front_door camera + response = client.get( + "/recordings/summary", + params={"timezone": "America/New_York", "cameras": "front_door"}, + ) + + assert response.status_code == 200 + summary = response.json() + assert len(summary) == 1 + assert "2024-03-10" in summary + assert summary["2024-03-10"] is True diff --git a/frigate/video.py b/frigate/video.py index e2de5fe49..739fb5c03 100755 --- a/frigate/video.py +++ b/frigate/video.py @@ -196,7 +196,9 @@ class CameraWatchdog(threading.Thread): self.sleeptime = self.config.ffmpeg.retry_interval self.config_subscriber = CameraConfigUpdateSubscriber( - None, {config.name: config}, [CameraConfigUpdateEnum.enabled] + None, + {config.name: config}, + [CameraConfigUpdateEnum.enabled, CameraConfigUpdateEnum.record], ) self.requestor = InterProcessRequestor() self.was_enabled = self.config.enabled diff --git a/web/src/components/card/ClassificationCard.tsx b/web/src/components/card/ClassificationCard.tsx index bde452770..cd7d89827 100644 --- a/web/src/components/card/ClassificationCard.tsx +++ b/web/src/components/card/ClassificationCard.tsx @@ -217,9 +217,7 @@ export function GroupedClassificationCard({ }); if (!best) { - // select an item from the middle of the time series as this usually correlates - // to a more representative image than the first or last - return group.at(Math.floor(group.length / 2)); + return group.at(-1); } const bestTyped: ClassificationItemData = best; @@ -230,7 +228,7 @@ export function GroupedClassificationCard({ ? event.sub_label : t(noClassificationLabel) : bestTyped.name, - score: event?.data?.sub_label_score || bestTyped.score, + score: event?.data?.sub_label_score, }; }, [group, event, noClassificationLabel, t]); From 35ce275071d047950afaf2b9ee10681bc10d8789 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Thu, 6 Nov 2025 07:39:44 -0700 Subject: [PATCH 03/31] Add ability to define Review Summary camera context (#20828) * Add ability to define GenAI camera context * Cleanup * Only show example with list --- .../configuration/genai/review_summaries.md | 30 +++++++++++++++++++ frigate/config/camera/review.py | 4 +++ .../post/review_descriptions.py | 1 + frigate/genai/__init__.py | 15 +++++++++- web/public/locales/en/config/review.json | 5 +++- 5 files changed, 53 insertions(+), 2 deletions(-) diff --git a/docs/docs/configuration/genai/review_summaries.md b/docs/docs/configuration/genai/review_summaries.md index 4e8107441..fd6762df9 100644 --- a/docs/docs/configuration/genai/review_summaries.md +++ b/docs/docs/configuration/genai/review_summaries.md @@ -68,6 +68,36 @@ The mere presence of an unidentified person in private areas during late night h +### Camera Spatial Context + +In addition to defining activity patterns, you can provide spatial context for specific cameras to help the LLM generate more accurate and descriptive titles and scene descriptions. The `camera_context` field allows you to describe physical features and locations that are outside the camera's field of view but are relevant for understanding the scene. + +**Important Guidelines:** + +- This context is used **only for descriptive purposes** to help the LLM write better titles and scene descriptions +- It should describe **physical features and spatial relationships** (e.g., "front door is to the right", "driveway on the left") +- It should **NOT** include subjective assessments or threat evaluations (e.g., "high-crime area") +- Threat level determination remains based solely on observable actions defined in the activity patterns + +Example configuration: + +```yaml +cameras: + front_door: + review: + genai: + enabled: true + camera_context: | + - Front door entrance is to the right of the frame + - Driveway and street are to the left + - Steps in the center lead from the sidewalk to the front door + - Garage is located beyond the left edge of the frame +``` + +This helps the LLM generate more natural descriptions like "Person approaching front door" instead of "Person walking toward right side of frame". + +The `camera_context` can be defined globally under `genai.review` and overridden per camera for specific spatial details. + ### Image Source By default, review summaries use preview images (cached preview frames) which have a lower resolution but use fewer tokens per image. For better image quality and more detailed analysis, you can configure Frigate to extract frames directly from recordings at a higher resolution: diff --git a/frigate/config/camera/review.py b/frigate/config/camera/review.py index 67ba3b60c..3ed3ffa76 100644 --- a/frigate/config/camera/review.py +++ b/frigate/config/camera/review.py @@ -140,6 +140,10 @@ Evaluate in this order: The mere presence of an unidentified person in private areas during late night hours is inherently suspicious and warrants human review, regardless of what activity they appear to be doing or how brief the sequence is.""", title="Custom activity context prompt defining normal and suspicious activity patterns for this property.", ) + camera_context: str = Field( + default="", + title="Spatial context about the camera's field of view to help with descriptive accuracy. Should describe physical features and locations outside the frame.", + ) class ReviewConfig(FrigateBaseModel): diff --git a/frigate/data_processing/post/review_descriptions.py b/frigate/data_processing/post/review_descriptions.py index 94250dd37..501168da6 100644 --- a/frigate/data_processing/post/review_descriptions.py +++ b/frigate/data_processing/post/review_descriptions.py @@ -458,6 +458,7 @@ def run_analysis( genai_config.preferred_language, genai_config.debug_save_thumbnails, genai_config.activity_context_prompt, + genai_config.camera_context, ) review_inference_speed.update(datetime.datetime.now().timestamp() - start) diff --git a/frigate/genai/__init__.py b/frigate/genai/__init__.py index 4d789d77e..13d57046c 100644 --- a/frigate/genai/__init__.py +++ b/frigate/genai/__init__.py @@ -45,6 +45,7 @@ class GenAIClient: preferred_language: str | None, debug_save: bool, activity_context_prompt: str, + camera_context: str = "", ) -> ReviewMetadata | None: """Generate a description for the review item activity.""" @@ -69,6 +70,16 @@ class GenAIClient: else: return "\n- (No objects detected)" + def get_camera_context_section() -> str: + if camera_context: + return f"""## Camera Spatial Context + +Use this spatial information when writing the title and scene description to provide more accurate context about where activity is occurring or where people/objects are moving to/from. + +{camera_context}""" + return "" + + camera_context_section = get_camera_context_section() context_prompt = f""" Your task is to analyze the sequence of images ({len(thumbnails)} total) taken in chronological order from the perspective of the {review_data["camera"].replace("_", " ")} security camera. @@ -76,6 +87,8 @@ Your task is to analyze the sequence of images ({len(thumbnails)} total) taken i {activity_context_prompt} +{camera_context_section} + ## Task Instructions Your task is to provide a clear, accurate description of the scene that: @@ -100,7 +113,7 @@ When forming your description: ## Response Format Your response MUST be a flat JSON object with: -- `title` (string): A concise, direct title that describes the purpose or overall action, not just what you literally see. Use names from "Objects in Scene" based on what you visually observe. If you see both a name and an unidentified object of the same type but visually observe only one person/object, use ONLY the name. Examples: "Joe walking dog", "Person taking out trash", "Joe accessing vehicle", "Joe and person on front porch". +- `title` (string): A concise, direct title that describes the purpose or overall action, not just what you literally see. {"Use spatial context when available to make titles more meaningful." if camera_context_section else ""} Use names from "Objects in Scene" based on what you visually observe. If you see both a name and an unidentified object of the same type but visually observe only one person/object, use ONLY the name. Examples: "Joe walking dog", "Person taking out trash", "Joe accessing vehicle", "Person leaving porch for driveway", "Joe and person on front porch". - `scene` (string): A narrative description of what happens across the sequence from start to finish. **Only describe actions you can actually observe happening in the frames provided.** Do not infer or assume actions that aren't visible (e.g., if you see someone walking but never see them sit, don't say they sat down). Include setting, detected objects, and their observable actions. Avoid speculation or filling in assumed behaviors. Your description should align with and support the threat level you assign. - `confidence` (float): 0-1 confidence in your analysis. Higher confidence when objects/actions are clearly visible and context is unambiguous. Lower confidence when the sequence is unclear, objects are partially obscured, or context is ambiguous. - `potential_threat_level` (integer): 0, 1, or 2 as defined in "Normal Activity Patterns for This Property" above. Your threat level must be consistent with your scene description and the guidance above. diff --git a/web/public/locales/en/config/review.json b/web/public/locales/en/config/review.json index a44c2cfa9..f5b6cbeae 100644 --- a/web/public/locales/en/config/review.json +++ b/web/public/locales/en/config/review.json @@ -67,8 +67,11 @@ }, "activity_context_prompt": { "label": "Custom activity context prompt defining normal activity patterns for this property." + }, + "camera_context": { + "label": "Spatial context about the camera's field of view to help with descriptive accuracy. Should describe physical features and locations outside the frame. This is for spatial reference only and should NOT include subjective assessments." } } } } -} \ No newline at end of file +} From 32f1d85a6fd617af3df1a27443464e53c80011c8 Mon Sep 17 00:00:00 2001 From: Artem Vladimirov Date: Thu, 6 Nov 2025 19:39:57 +0500 Subject: [PATCH 04/31] fix: add pluralization for userRolesUpdated toast message (#20827) Co-authored-by: Artem Vladimirov --- web/public/locales/en/views/settings.json | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/web/public/locales/en/views/settings.json b/web/public/locales/en/views/settings.json index fe837663c..626e5385a 100644 --- a/web/public/locales/en/views/settings.json +++ b/web/public/locales/en/views/settings.json @@ -745,7 +745,8 @@ "createRole": "Role {{role}} created successfully", "updateCameras": "Cameras updated for role {{role}}", "deleteRole": "Role {{role}} deleted successfully", - "userRolesUpdated": "{{count}} user(s) assigned to this role have been updated to 'viewer', which has access to all cameras." + "userRolesUpdated_one": "{{count}} user assigned to this role has been updated to 'viewer', which has access to all cameras.", + "userRolesUpdated_other": "{{count}} users assigned to this role have been updated to 'viewer', which has access to all cameras." }, "error": { "createRoleFailed": "Failed to create role: {{errorMessage}}", From 945317b44e910eed1a72a2967eb8946a9dc66afa Mon Sep 17 00:00:00 2001 From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> Date: Thu, 6 Nov 2025 10:22:52 -0600 Subject: [PATCH 05/31] Tracked Object Details pane tweaks (#20830) * add prev/next buttons on desktop * buttons should work with summary and grid view * i18n * small tweaks * don't change dialog size * remove heading and count * remove icons * spacing * two column detail view * add actions to dots menu * move actions menu to its own component * set modal to false on face library dropdown to guard against improper closures https://github.com/shadcn-ui/ui/discussions/6908 * frigate plus layout * remove face training * clean up unused * refactor to remove duplication between mobile and desktop * turn annotation settings into a popover * fix popover * improve annotation offset popver * change icon and popover text in detail stream for annotation settings * clean up * use drawer on mobile * fix setter function * use dialog ref for popover portal * don't portal popover * tweaks * add button type * lower xl max width * fixes * justify --- web/public/locales/en/views/explore.json | 4 +- .../overlay/detail/AnnotationOffsetSlider.tsx | 4 +- .../overlay/detail/AnnotationSettingsPane.tsx | 72 +- .../overlay/detail/DetailActionsMenu.tsx | 118 ++ .../overlay/detail/SearchDetailDialog.tsx | 1350 +++++++++-------- .../overlay/detail/TrackingDetails.tsx | 141 +- web/src/components/timeline/DetailStream.tsx | 12 +- web/src/components/ui/popover.tsx | 28 +- web/src/context/detail-stream-context.tsx | 2 +- web/src/pages/FaceLibrary.tsx | 2 +- web/src/views/search/SearchView.tsx | 157 +- 11 files changed, 1013 insertions(+), 877 deletions(-) create mode 100644 web/src/components/overlay/detail/DetailActionsMenu.tsx diff --git a/web/public/locales/en/views/explore.json b/web/public/locales/en/views/explore.json index 787581bf7..afc81eaa6 100644 --- a/web/public/locales/en/views/explore.json +++ b/web/public/locales/en/views/explore.json @@ -74,7 +74,7 @@ "label": "Annotation Offset", "desc": "This data comes from your camera's detect feed but is overlayed on images from the the record feed. It is unlikely that the two streams are perfectly in sync. As a result, the bounding box and the footage will not line up perfectly. You can use this setting to offset the annotations forward or backward in time to better align them with the recorded footage.", "millisecondsToOffset": "Milliseconds to offset detect annotations by. Default: 0", - "tips": "TIP: Imagine there is an event clip with a person walking from left to right. If the event timeline bounding box is consistently to the left of the person then the value should be decreased. Similarly, if a person is walking from left to right and the bounding box is consistently ahead of the person then the value should be increased.", + "tips": "Lower the value if the video playback is ahead of the boxes and path points, and increase the value if the video playback is behind them. This value can be negative.", "toast": { "success": "Annotation offset for {{camera}} has been saved to the config file. Restart Frigate to apply your changes." } @@ -215,6 +215,8 @@ "trackedObjectsCount_other": "{{count}} tracked objects ", "searchResult": { "tooltip": "Matched {{type}} at {{confidence}}%", + "previousTrackedObject": "Previous tracked object", + "nextTrackedObject": "Next tracked object", "deleteTrackedObject": { "toast": { "success": "Tracked object deleted successfully.", diff --git a/web/src/components/overlay/detail/AnnotationOffsetSlider.tsx b/web/src/components/overlay/detail/AnnotationOffsetSlider.tsx index 4af982da5..9f4851d42 100644 --- a/web/src/components/overlay/detail/AnnotationOffsetSlider.tsx +++ b/web/src/components/overlay/detail/AnnotationOffsetSlider.tsx @@ -121,13 +121,13 @@ export default function AnnotationOffsetSlider({ className }: Props) { - {t("trackingDetails.annotationSettings.offset.desc")} + {t("trackingDetails.annotationSettings.offset.tips")}
diff --git a/web/src/components/overlay/detail/AnnotationSettingsPane.tsx b/web/src/components/overlay/detail/AnnotationSettingsPane.tsx index c180502f4..33bf10c5c 100644 --- a/web/src/components/overlay/detail/AnnotationSettingsPane.tsx +++ b/web/src/components/overlay/detail/AnnotationSettingsPane.tsx @@ -1,6 +1,3 @@ -import Heading from "@/components/ui/heading"; -import { Label } from "@/components/ui/label"; -import { Switch } from "@/components/ui/switch"; import { Event } from "@/types/event"; import { FrigateConfig } from "@/types/frigateConfig"; import { zodResolver } from "@hookform/resolvers/zod"; @@ -8,7 +5,6 @@ import axios from "axios"; import { useCallback, useState } from "react"; import { useForm } from "react-hook-form"; import { LuExternalLink } from "react-icons/lu"; -import { PiWarningCircle } from "react-icons/pi"; import { Link } from "react-router-dom"; import { toast } from "sonner"; import useSWR from "swr"; @@ -31,15 +27,11 @@ import { useDocDomain } from "@/hooks/use-doc-domain"; type AnnotationSettingsPaneProps = { event: Event; - showZones: boolean; - setShowZones: React.Dispatch>; annotationOffset: number; setAnnotationOffset: React.Dispatch>; }; export function AnnotationSettingsPane({ event, - showZones, - setShowZones, annotationOffset, setAnnotationOffset, }: AnnotationSettingsPaneProps) { @@ -140,26 +132,12 @@ export function AnnotationSettingsPane({ } return ( -
- +
+
{t("trackingDetails.annotationSettings.title")} - -
-
- - -
-
- {t("trackingDetails.annotationSettings.showAllZones.desc")} -
- + +
( - - - {t("trackingDetails.annotationSettings.offset.label")} - -
-
- -
- - trackingDetails.annotationSettings.offset.desc - + +
+ + {t("trackingDetails.annotationSettings.offset.label")} + + + + trackingDetails.annotationSettings.offset.millisecondsToOffset + + +
+ {t("trackingDetails.annotationSettings.offset.tips")}
-
-
+ +
+
+
- - - trackingDetails.annotationSettings.offset.millisecondsToOffset - -
- {t("trackingDetails.annotationSettings.offset.tips")} -
-
-
)} /> @@ -220,7 +192,9 @@ export function AnnotationSettingsPane({
+ + + + + + +
+ ); +} + +type DialogContentComponentProps = { + page: SearchTab; + search: SearchResult; + isDesktop: boolean; + apiHost: string; + config?: FrigateConfig; + searchTabs: SearchTab[]; + pageToggle: SearchTab; + setPageToggle: (v: SearchTab) => void; + setSearch: (s: SearchResult | undefined) => void; + setInputFocused: React.Dispatch>; + setSimilarity?: () => void; + isPopoverOpen: boolean; + setIsPopoverOpen: (open: boolean) => void; + dialogContainer: HTMLDivElement | null; +}; + +function DialogContentComponent({ + page, + search, + isDesktop, + apiHost, + config, + searchTabs, + pageToggle, + setPageToggle, + setSearch, + setInputFocused, + setSimilarity, + isPopoverOpen, + setIsPopoverOpen, + dialogContainer, +}: DialogContentComponentProps) { + if (page === "tracking_details") { + return ( + + ) : undefined + } + /> + ); + } + + // Snapshot page content + const snapshotElement = search.has_snapshot ? ( + + ) : ( +
+ +
+ ); + + if (isDesktop) { + return ( +
+
+ {snapshotElement} +
+
+ +
+ +
+
+
+ ); + } + + // mobile + return ( + <> + {snapshotElement} + + + ); +} + type SearchDetailDialogProps = { search?: SearchResult; page: SearchTab; @@ -91,7 +415,10 @@ type SearchDetailDialogProps = { setSearchPage: (page: SearchTab) => void; setSimilarity?: () => void; setInputFocused: React.Dispatch>; + onPrevious?: () => void; + onNext?: () => void; }; + export default function SearchDetailDialog({ search, page, @@ -99,6 +426,8 @@ export default function SearchDetailDialog({ setSearchPage, setSimilarity, setInputFocused, + onPrevious, + onNext, }: SearchDetailDialogProps) { const { t } = useTranslation(["views/explore", "views/faceLibrary"]); const { data: config } = useSWR("config", { @@ -117,11 +446,17 @@ export default function SearchDetailDialog({ // dialog and mobile page const [isOpen, setIsOpen] = useState(search != undefined); + const [isPopoverOpen, setIsPopoverOpen] = useState(false); + const dialogContentRef = useRef(null); + const [dialogContainer, setDialogContainer] = useState( + null, + ); const handleOpenChange = useCallback( (open: boolean) => { setIsOpen(open); if (!open) { + setIsPopoverOpen(false); // short timeout to allow the mobile page animation // to complete before updating the state setTimeout(() => { @@ -132,12 +467,18 @@ export default function SearchDetailDialog({ [setSearch], ); + useLayoutEffect(() => { + setDialogContainer(dialogContentRef.current); + }, [isOpen, search?.id]); + useEffect(() => { if (search) { setIsOpen(search != undefined); } }, [search]); + // show/hide annotation settings is handled inside TabsWithActions + const searchTabs = useMemo(() => { if (!config || !search) { return []; @@ -163,46 +504,6 @@ export default function SearchDetailDialog({ } }, [pageToggle, searchTabs, setSearchPage]); - // Tabs component for reuse - const tabsComponent = ( - -
- { - if (value) { - setPageToggle(value); - } - }} - > - {Object.values(searchTabs).map((item) => ( - - {item == "snapshot" && } - {item == "tracking_details" && } -
- {item === "snapshot" - ? search?.has_snapshot - ? t("type.snapshot") - : t("type.thumbnail") - : t(`type.${item}`)} -
-
- ))} -
- -
-
- ); - if (!search) { return; } @@ -227,174 +528,115 @@ export default function SearchDetailDialog({ onOpenChange={handleOpenChange} enableHistoryBack={true} > + {isDesktop && onPrevious && onNext && ( + +
+
+ + + + + + {t("searchResult.previousTrackedObject")} + + + + + + + + + {t("searchResult.nextTrackedObject")} + + +
+
+
+ )} { + if (isPopoverOpen) { + e.preventDefault(); + } + const target = e.target as HTMLElement; + if (target.closest(".nav-button")) { + e.preventDefault(); + } + }} >
{t("trackedObjectDetails")} {t("trackedObjectDetails")} +
- {isDesktop ? ( - page === "tracking_details" ? ( - + - ) : ( -
-
- {page === "snapshot" && search.has_snapshot && ( - { - search.plus_id = "new_upload"; - }} - /> - )} - {page === "snapshot" && !search.has_snapshot && ( - - )} -
-
- {tabsComponent} -
- {page == "snapshot" && ( - - )} -
-
-
- ) - ) : ( - <> - -
- { - if (value) { - setPageToggle(value); - } - }} - > - {Object.values(searchTabs).map((item) => ( - - {item == "snapshot" && } - {item == "tracking_details" && ( - - )} -
- {t(`type.${item}`)} -
-
- ))} -
- -
-
- {page == "snapshot" && ( - <> - {search.has_snapshot && ( - { - search.plus_id = "new_upload"; - }} - /> - )} - {page == "snapshot" && !search.has_snapshot && ( - - )} - - {t("type.details")} - - - - )} - {page == "tracking_details" && ( - - )} - +
)} + + @@ -405,19 +647,19 @@ type ObjectDetailsTabProps = { search: SearchResult; config?: FrigateConfig; setSearch: (search: SearchResult | undefined) => void; - setSimilarity?: () => void; setInputFocused: React.Dispatch>; - showThumbnail?: boolean; }; function ObjectDetailsTab({ search, config, setSearch, - setSimilarity, setInputFocused, - showThumbnail = true, }: ObjectDetailsTabProps) { - const { t } = useTranslation(["views/explore", "views/faceLibrary"]); + const { t, i18n } = useTranslation([ + "views/explore", + "views/faceLibrary", + "components/dialog", + ]); const apiHost = useApiHost(); @@ -783,57 +1025,6 @@ function ObjectDetailsTab({ [search, apiHost, mutate, setSearch, t], ); - // face training - - const hasFace = useMemo(() => { - if (!config?.face_recognition.enabled || !search) { - return false; - } - - return search.data.attributes?.find((attr) => attr.label == "face"); - }, [config, search]); - - const { data: faceData } = useSWR(hasFace ? "faces" : null); - - const faceNames = useMemo( - () => - faceData ? Object.keys(faceData).filter((face) => face != "train") : [], - [faceData], - ); - - const onTrainFace = useCallback( - (trainName: string) => { - axios - .post(`/faces/train/${trainName}/classify`, { event_id: search.id }) - .then((resp) => { - if (resp.status == 200) { - toast.success( - t("toast.success.trainedFace", { ns: "views/faceLibrary" }), - { - position: "top-center", - }, - ); - } - }) - .catch((error) => { - const errorMessage = - error.response?.data?.message || - error.response?.data?.detail || - "Unknown error"; - toast.error( - t("toast.error.trainFailed", { - ns: "views/faceLibrary", - errorMessage, - }), - { - position: "top-center", - }, - ); - }); - }, - [search, t], - ); - // speech transcription const onTranscribe = useCallback(() => { @@ -862,35 +1053,159 @@ function ObjectDetailsTab({ }); }, [search, t]); + // frigate+ submission + + type SubmissionState = "reviewing" | "uploading" | "submitted"; + const [state, setState] = useState( + search?.plus_id ? "submitted" : "reviewing", + ); + + useEffect( + () => setState(search?.plus_id ? "submitted" : "reviewing"), + [search], + ); + + const onSubmitToPlus = useCallback( + async (falsePositive: boolean) => { + if (!search) { + return; + } + + falsePositive + ? axios.put(`events/${search.id}/false_positive`) + : axios.post(`events/${search.id}/plus`, { + include_annotation: 1, + }); + + setState("submitted"); + setSearch({ + ...search, + plus_id: "new_upload", + }); + }, + [search, setSearch], + ); + + const popoverContainerRef = useRef(null); return ( -
+
-
-
{t("details.label")}
-
- {getIconForLabel(search.label, "size-4 text-primary")} - {getTranslatedLabel(search.label)} - {search.sub_label && ` (${search.sub_label})`} - {isAdmin && search.end_time && ( - - - - { - setIsSubLabelDialogOpen(true); - }} - /> - - - - - {t("details.editSubLabel.title")} - - - - )} +
+
+
+
+
+
+ {t("details.label")} +
+
+ {getIconForLabel(search.label, "size-4 text-primary")} + {getTranslatedLabel(search.label)} + {search.sub_label && ` (${search.sub_label})`} + {isAdmin && search.end_time && ( + + + + setIsSubLabelDialogOpen(true)} + /> + + + + + {t("details.editSubLabel.title")} + + + + )} +
+
+ +
+
+
+ {t("details.topScore.label")} + + +
+ + Info +
+
+ + {t("details.topScore.info")} + +
+
+
+
+ {topScore}%{subLabelScore && ` (${subLabelScore}%)`} +
+
+ +
+
+ {t("details.camera")} +
+
+ +
+
+
+
+ +
+
+ {snapScore != undefined && ( +
+
+
+ {t("details.snapshotScore.label")} +
+
+
{snapScore}%
+
+ )} + + {averageEstimatedSpeed && ( +
+
+ {t("details.estimatedSpeed")} +
+
+
+ {averageEstimatedSpeed}{" "} + {config?.ui.unit_system == "imperial" + ? t("unit.speed.mph", { ns: "common" }) + : t("unit.speed.kph", { ns: "common" })} + {velocityAngle != undefined && ( + + + + )} +
+
+
+ )} + +
+
+ {t("details.timestamp")} +
+
{formattedDate}
+
+
+
{search?.data.recognized_license_plate && ( @@ -909,9 +1224,7 @@ function ObjectDetailsTab({ { - setIsLPRDialogOpen(true); - }} + onClick={() => setIsLPRDialogOpen(true)} /> @@ -926,142 +1239,108 @@ function ObjectDetailsTab({
)} -
-
-
- {t("details.topScore.label")} - - -
- - Info -
-
- - {t("details.topScore.info")} - -
-
-
-
- {topScore}%{subLabelScore && ` (${subLabelScore}%)`} -
-
- {snapScore != undefined && ( -
-
-
- {t("details.snapshotScore.label")} +
+
+ +
+
+
+ {t("explore.plus.submitToPlus.label", { + ns: "components/dialog", + })} + + +
+ + Info
-
-
{snapScore}%
-
- )} - {averageEstimatedSpeed && ( -
-
- {t("details.estimatedSpeed")} -
-
- {averageEstimatedSpeed && ( -
- {averageEstimatedSpeed}{" "} - {config?.ui.unit_system == "imperial" - ? t("unit.speed.mph", { ns: "common" }) - : t("unit.speed.kph", { ns: "common" })}{" "} - {velocityAngle != undefined && ( - - - - )} -
- )} -
-
- )} -
-
{t("details.camera")}
-
- -
-
-
-
- {t("details.timestamp")} -
-
{formattedDate}
+ + + {t("explore.plus.submitToPlus.desc", { + ns: "components/dialog", + })} + +
- {showThumbnail && ( -
- -
- {config?.semantic_search.enabled && - setSimilarity != undefined && - search.data.type == "object" && ( - + explore.plus.review.question.ask_full + )} - {hasFace && ( - +
+ - - )} - {config?.cameras[search?.camera].audio_transcription.enabled && - search?.label == "speech" && - search?.end_time && ( - - )} + {t("button.yes", { ns: "common" })} + + +
+ + )} + {state == "uploading" && } + {state == "submitted" && ( +
+ + {t("explore.plus.review.state.submitted")}
-
- )} + )} +
{config?.cameras[search.camera].objects.genai.enabled && @@ -1103,6 +1382,15 @@ function ObjectDetailsTab({ )}
+ {config?.cameras[search?.camera].audio_transcription.enabled && + search?.label == "speech" && + search?.end_time && ( + + )} {config?.cameras[search.camera].objects.genai.enabled && search.end_time && (
@@ -1154,6 +1442,7 @@ function ObjectDetailsTab({ {t("button.save", { ns: "common" })} )} + void; + className?: string; + onEventUploaded?: () => void; }; export function ObjectSnapshotTab({ search, - onEventUploaded, + className, }: ObjectSnapshotTabProps) { - const { t, i18n } = useTranslation(["components/dialog"]); - type SubmissionState = "reviewing" | "uploading" | "submitted"; - const [imgRef, imgLoaded, onImgLoad] = useImageLoaded(); - // upload - - const [state, setState] = useState( - search?.plus_id ? "submitted" : "reviewing", - ); - - useEffect( - () => setState(search?.plus_id ? "submitted" : "reviewing"), - [search], - ); - - const onSubmitToPlus = useCallback( - async (falsePositive: boolean) => { - if (!search) { - return; - } - - falsePositive - ? axios.put(`events/${search.id}/false_positive`) - : axios.post(`events/${search.id}/plus`, { - include_annotation: 1, - }); - - setState("submitted"); - onEventUploaded(); - }, - [search, onEventUploaded], - ); - return ( -
+
-
+
-
+
{search?.id && ( -
+
{`${search?.label}`} -
- - - - - - - - - - - {t("button.download", { ns: "common" })} - - - -
)} - {search.data.type == "object" && - search.plus_id !== "not_enabled" && - search.end_time && - search.label != "on_demand" && ( - - -
-
- {t("explore.plus.submitToPlus.label")} -
-
- {t("explore.plus.submitToPlus.desc")} -
-
- -
- {state == "reviewing" && ( - <> -
- {i18n.language === "en" ? ( - // English with a/an logic plus label - <> - {/^[aeiou]/i.test(search?.label || "") ? ( - - explore.plus.review.question.ask_an - - ) : ( - - explore.plus.review.question.ask_a - - )} - - ) : ( - // For other languages - - explore.plus.review.question.ask_full - - )} -
-
- - -
- - )} - {state == "uploading" && } - {state == "submitted" && ( -
- - {t("explore.plus.review.state.submitted")} -
- )} -
-
-
- )}
@@ -1391,12 +1542,6 @@ type VideoTabProps = { }; export function VideoTab({ search }: VideoTabProps) { - const { t } = useTranslation(["views/explore"]); - const navigate = useNavigate(); - const { data: reviewItem } = useSWR([ - `review/event/${search.id}`, - ]); - const clipTimeRange = useMemo(() => { const startTime = search.start_time - REVIEW_PADDING; const endTime = (search.end_time ?? Date.now() / 1000) + REVIEW_PADDING; @@ -1408,56 +1553,7 @@ export function VideoTab({ search }: VideoTabProps) { return ( <> - -
- {reviewItem && ( - - - { - if (reviewItem?.id) { - const params = new URLSearchParams({ - id: reviewItem.id, - }).toString(); - navigate(`/review?${params}`); - } - }} - > - - - - - - {t("itemMenu.viewInHistory.label")} - - - - )} - - - - - - - - - - - {t("button.download", { ns: "common" })} - - - -
-
+ ); } diff --git a/web/src/components/overlay/detail/TrackingDetails.tsx b/web/src/components/overlay/detail/TrackingDetails.tsx index b505130cc..cd4e18e3b 100644 --- a/web/src/components/overlay/detail/TrackingDetails.tsx +++ b/web/src/components/overlay/detail/TrackingDetails.tsx @@ -2,21 +2,12 @@ import useSWR from "swr"; import { useCallback, useEffect, useMemo, useRef, useState } from "react"; import { Event } from "@/types/event"; import ActivityIndicator from "@/components/indicators/activity-indicator"; -import { Button } from "@/components/ui/button"; import { TrackingDetailsSequence } from "@/types/timeline"; -import Heading from "@/components/ui/heading"; import { FrigateConfig } from "@/types/frigateConfig"; import { formatUnixTimestampToDateTime } from "@/utils/dateUtil"; import { getIconForLabel } from "@/utils/iconUtil"; -import { LuCircle, LuFolderX, LuSettings } from "react-icons/lu"; +import { LuCircle, LuFolderX } from "react-icons/lu"; import { cn } from "@/lib/utils"; -import { - Tooltip, - TooltipContent, - TooltipTrigger, -} from "@/components/ui/tooltip"; -import { AnnotationSettingsPane } from "./AnnotationSettingsPane"; -import { TooltipPortal } from "@radix-ui/react-tooltip"; import HlsVideoPlayer from "@/components/player/HlsVideoPlayer"; import { baseUrl } from "@/api/baseUrl"; import { REVIEW_PADDING } from "@/types/review"; @@ -38,8 +29,6 @@ import axios from "axios"; import { toast } from "sonner"; import { useDetailStream } from "@/context/detail-stream-context"; import { isDesktop, isIOS, isMobileOnly, isSafari } from "react-device-detect"; -import Chip from "@/components/indicators/Chip"; -import { FaDownload, FaHistory } from "react-icons/fa"; import { useApiHost } from "@/api"; import ImageLoadingIndicator from "@/components/indicators/ImageLoadingIndicator"; import ObjectTrackOverlay from "../ObjectTrackOverlay"; @@ -58,15 +47,13 @@ export function TrackingDetails({ }: TrackingDetailsProps) { const videoRef = useRef(null); const { t } = useTranslation(["views/explore"]); - const navigate = useNavigate(); const apiHost = useApiHost(); const imgRef = useRef(null); const [imgLoaded, setImgLoaded] = useState(false); const [displaySource, _setDisplaySource] = useState<"video" | "image">( "video", ); - const { setSelectedObjectIds, annotationOffset, setAnnotationOffset } = - useDetailStream(); + const { setSelectedObjectIds, annotationOffset } = useDetailStream(); // manualOverride holds a record-stream timestamp explicitly chosen by the // user (eg, clicking a lifecycle row). When null we display `currentTime`. @@ -97,8 +84,6 @@ export function TrackingDetails({ const containerRef = useRef(null); const [_selectedZone, setSelectedZone] = useState(""); const [_lifecycleZones, setLifecycleZones] = useState([]); - const [showControls, setShowControls] = useState(false); - const [showZones, setShowZones] = useState(true); const [seekToTimestamp, setSeekToTimestamp] = useState(null); const aspectRatio = useMemo(() => { @@ -359,7 +344,7 @@ export function TrackingDetails({
)} -
- {event && ( - - - { - if (event?.id) { - const params = new URLSearchParams({ - id: event.id, - }).toString(); - navigate(`/review?${params}`); - } - }} - > - - - - - - {t("itemMenu.viewInHistory.label")} - - - - )} - - - - - - - - - - - {t("button.download", { ns: "common" })} - - - -
-
- {isDesktop && tabs &&
{tabs}
} +
+ {isDesktop && tabs && ( +
+
{tabs}
+
+ )}
-
- {t("trackingDetails.title")} - -
- - - - - - - {t("trackingDetails.adjustAnnotationSettings")} - - - -
-
-
-
- {t("trackingDetails.scrollViewTips")} -
-
- {t("trackingDetails.count", { - first: eventSequence?.length ?? 0, - second: eventSequence?.length ?? 0, - })} -
-
{config?.cameras[event.camera]?.onvif.autotracking .enabled_in_config && ( -
+
{t("trackingDetails.autoTrackingTips")}
)} - {showControls && ( - { - if (typeof value === "function") { - const newValue = value(annotationOffset); - setAnnotationOffset(newValue); - } else { - setAnnotationOffset(value); - } - }} - /> - )}
{label} - + {formattedStart ?? ""} - {formattedEnd ?? ""} {event.data?.recognized_license_plate && ( diff --git a/web/src/components/timeline/DetailStream.tsx b/web/src/components/timeline/DetailStream.tsx index 5b45de19f..ca834e2a8 100644 --- a/web/src/components/timeline/DetailStream.tsx +++ b/web/src/components/timeline/DetailStream.tsx @@ -16,13 +16,7 @@ import ActivityIndicator from "../indicators/activity-indicator"; import { Event } from "@/types/event"; import { getIconForLabel } from "@/utils/iconUtil"; import { ReviewSegment } from "@/types/review"; -import { - LuChevronDown, - LuCircle, - LuChevronRight, - LuSettings, -} from "react-icons/lu"; -import { MdAutoAwesome } from "react-icons/md"; +import { LuChevronDown, LuCircle, LuChevronRight } from "react-icons/lu"; import { getTranslatedLabel } from "@/utils/i18n"; import EventMenu from "@/components/timeline/EventMenu"; import { FrigatePlusDialog } from "@/components/overlay/dialog/FrigatePlusDialog"; @@ -32,6 +26,8 @@ import { Link } from "react-router-dom"; import { Switch } from "@/components/ui/switch"; import { usePersistence } from "@/hooks/use-persistence"; import { isDesktop } from "react-device-detect"; +import { PiSlidersHorizontalBold } from "react-icons/pi"; +import { MdAutoAwesome } from "react-icons/md"; type DetailStreamProps = { reviewItems?: ReviewSegment[]; @@ -237,7 +233,7 @@ export default function DetailStream({ className="flex w-full items-center justify-between p-3" >
- + {t("detail.settings")}
{controlsExpanded ? ( diff --git a/web/src/components/ui/popover.tsx b/web/src/components/ui/popover.tsx index bba83f977..017d1bdc7 100644 --- a/web/src/components/ui/popover.tsx +++ b/web/src/components/ui/popover.tsx @@ -11,13 +11,21 @@ const PopoverContent = React.forwardRef< React.ElementRef, React.ComponentPropsWithoutRef & { container?: HTMLElement | null; + disablePortal?: boolean; } >( ( - { className, container, align = "center", sideOffset = 4, ...props }, + { + className, + container, + disablePortal = false, + align = "center", + sideOffset = 4, + ...props + }, ref, - ) => ( - + ) => { + const content = ( - - ), + ); + + if (disablePortal) { + return content; + } + + return ( + + {content} + + ); + }, ); PopoverContent.displayName = PopoverPrimitive.Content.displayName; diff --git a/web/src/context/detail-stream-context.tsx b/web/src/context/detail-stream-context.tsx index ff909a30e..57971f7ac 100644 --- a/web/src/context/detail-stream-context.tsx +++ b/web/src/context/detail-stream-context.tsx @@ -8,7 +8,7 @@ export interface DetailStreamContextType { camera: string; annotationOffset: number; // milliseconds setSelectedObjectIds: React.Dispatch>; - setAnnotationOffset: (ms: number) => void; + setAnnotationOffset: React.Dispatch>; toggleObjectSelection: (id: string | undefined) => void; isDetailMode: boolean; } diff --git a/web/src/pages/FaceLibrary.tsx b/web/src/pages/FaceLibrary.tsx index b6a04ada9..6cc113e77 100644 --- a/web/src/pages/FaceLibrary.tsx +++ b/web/src/pages/FaceLibrary.tsx @@ -524,7 +524,7 @@ function LibrarySelector({ regexErrorMessage={t("description.invalidName")} /> - +