From aff82f809c541374286372eb0d09b3d793d0ec48 Mon Sep 17 00:00:00 2001 From: GuoQing Liu <842607283@qq.com> Date: Sun, 2 Nov 2025 21:45:24 +0800 Subject: [PATCH 01/21] feat: add search filter group audio i18n (#20760) --- .../components/filter/SearchFilterGroup.tsx | 25 ++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/web/src/components/filter/SearchFilterGroup.tsx b/web/src/components/filter/SearchFilterGroup.tsx index b96ed7dd7..1426bb5f9 100644 --- a/web/src/components/filter/SearchFilterGroup.tsx +++ b/web/src/components/filter/SearchFilterGroup.tsx @@ -348,6 +348,26 @@ export function GeneralFilterContent({ onClose, }: GeneralFilterContentProps) { const { t } = useTranslation(["components/filter"]); + const { data: config } = useSWR("config", { + revalidateOnFocus: false, + }); + + const allAudioListenLabels = useMemo(() => { + if (!config) { + return []; + } + + const labels = new Set(); + Object.values(config.cameras).forEach((camera) => { + if (camera?.audio?.enabled) { + camera.audio.listen.forEach((label) => { + labels.add(label); + }); + } + }); + return [...labels].sort(); + }, [config]); + return ( <>
@@ -373,7 +393,10 @@ export function GeneralFilterContent({ {allLabels.map((item) => ( { if (isChecked) { From d44340eca611e1fbc27457e39e9ce69b6554125e Mon Sep 17 00:00:00 2001 From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> Date: Sun, 2 Nov 2025 07:48:43 -0600 Subject: [PATCH 02/21] Tracked Object Details pane tweaks (#20762) * normalize path and points sizes * fix bounding box display to only show on actual points that have a box * add support for using snapshots --- .../components/overlay/ObjectTrackOverlay.tsx | 80 ++++++-- .../overlay/detail/TrackingDetails.tsx | 171 +++++++++++++++--- 2 files changed, 208 insertions(+), 43 deletions(-) diff --git a/web/src/components/overlay/ObjectTrackOverlay.tsx b/web/src/components/overlay/ObjectTrackOverlay.tsx index ec51786b8..07f900c51 100644 --- a/web/src/components/overlay/ObjectTrackOverlay.tsx +++ b/web/src/components/overlay/ObjectTrackOverlay.tsx @@ -58,6 +58,47 @@ export default function ObjectTrackOverlay({ const effectiveCurrentTime = currentTime - annotationOffset / 1000; + const { + pathStroke, + pointRadius, + pointStroke, + zoneStroke, + boxStroke, + highlightRadius, + } = useMemo(() => { + const BASE_WIDTH = 1280; + const BASE_HEIGHT = 720; + const BASE_PATH_STROKE = 5; + const BASE_POINT_RADIUS = 7; + const BASE_POINT_STROKE = 3; + const BASE_ZONE_STROKE = 5; + const BASE_BOX_STROKE = 5; + const BASE_HIGHLIGHT_RADIUS = 5; + + const scale = Math.sqrt( + (videoWidth * videoHeight) / (BASE_WIDTH * BASE_HEIGHT), + ); + + const pathStroke = Math.max(1, Math.round(BASE_PATH_STROKE * scale)); + const pointRadius = Math.max(2, Math.round(BASE_POINT_RADIUS * scale)); + const pointStroke = Math.max(1, Math.round(BASE_POINT_STROKE * scale)); + const zoneStroke = Math.max(1, Math.round(BASE_ZONE_STROKE * scale)); + const boxStroke = Math.max(1, Math.round(BASE_BOX_STROKE * scale)); + const highlightRadius = Math.max( + 2, + Math.round(BASE_HIGHLIGHT_RADIUS * scale), + ); + + return { + pathStroke, + pointRadius, + pointStroke, + zoneStroke, + boxStroke, + highlightRadius, + }; + }, [videoWidth, videoHeight]); + // Fetch all event data in a single request (CSV ids) const { data: eventsData } = useSWR( selectedObjectIds.length > 0 @@ -198,16 +239,21 @@ export default function ObjectTrackOverlay({ b.timestamp - a.timestamp, )[0]?.data?.zones || []; - // bounding box (with tolerance for browsers with seek precision by-design issues) - const boxCandidates = timelineData?.filter( - (event: TrackingDetailsSequence) => - event.timestamp <= effectiveCurrentTime + TOLERANCE && - event.data.box, - ); - const currentBox = boxCandidates?.sort( - (a: TrackingDetailsSequence, b: TrackingDetailsSequence) => - b.timestamp - a.timestamp, - )[0]?.data?.box; + // bounding box - only show if there's a timeline event at/near the current time with a box + // Search all timeline events (not just those before current time) to find one matching the seek position + const nearbyTimelineEvent = timelineData + ?.filter((event: TrackingDetailsSequence) => event.data.box) + .sort( + (a: TrackingDetailsSequence, b: TrackingDetailsSequence) => + Math.abs(a.timestamp - effectiveCurrentTime) - + Math.abs(b.timestamp - effectiveCurrentTime), + ) + .find( + (event: TrackingDetailsSequence) => + Math.abs(event.timestamp - effectiveCurrentTime) <= TOLERANCE, + ); + + const currentBox = nearbyTimelineEvent?.data?.box; return { objectId, @@ -333,7 +379,7 @@ export default function ObjectTrackOverlay({ points={zone.points} fill={zone.fill} stroke={zone.stroke} - strokeWidth="5" + strokeWidth={zoneStroke} opacity="0.7" /> ))} @@ -353,7 +399,7 @@ export default function ObjectTrackOverlay({ d={generateStraightPath(absolutePositions)} fill="none" stroke={objData.color} - strokeWidth="5" + strokeWidth={pathStroke} strokeLinecap="round" strokeLinejoin="round" /> @@ -365,13 +411,13 @@ export default function ObjectTrackOverlay({ handlePointClick(pos.timestamp)} /> @@ -400,7 +446,7 @@ export default function ObjectTrackOverlay({ height={objData.currentBox[3] * videoHeight} fill="none" stroke={objData.color} - strokeWidth="5" + strokeWidth={boxStroke} opacity="0.9" /> diff --git a/web/src/components/overlay/detail/TrackingDetails.tsx b/web/src/components/overlay/detail/TrackingDetails.tsx index 82fb14771..b505130cc 100644 --- a/web/src/components/overlay/detail/TrackingDetails.tsx +++ b/web/src/components/overlay/detail/TrackingDetails.tsx @@ -8,7 +8,7 @@ import Heading from "@/components/ui/heading"; import { FrigateConfig } from "@/types/frigateConfig"; import { formatUnixTimestampToDateTime } from "@/utils/dateUtil"; import { getIconForLabel } from "@/utils/iconUtil"; -import { LuCircle, LuSettings } from "react-icons/lu"; +import { LuCircle, LuFolderX, LuSettings } from "react-icons/lu"; import { cn } from "@/lib/utils"; import { Tooltip, @@ -37,9 +37,12 @@ import { HiDotsHorizontal } from "react-icons/hi"; import axios from "axios"; import { toast } from "sonner"; import { useDetailStream } from "@/context/detail-stream-context"; -import { isDesktop, isIOS } from "react-device-detect"; +import { isDesktop, isIOS, isMobileOnly, isSafari } from "react-device-detect"; import Chip from "@/components/indicators/Chip"; import { FaDownload, FaHistory } from "react-icons/fa"; +import { useApiHost } from "@/api"; +import ImageLoadingIndicator from "@/components/indicators/ImageLoadingIndicator"; +import ObjectTrackOverlay from "../ObjectTrackOverlay"; type TrackingDetailsProps = { className?: string; @@ -56,9 +59,19 @@ export function TrackingDetails({ const videoRef = useRef(null); const { t } = useTranslation(["views/explore"]); const navigate = useNavigate(); + const apiHost = useApiHost(); + const imgRef = useRef(null); + const [imgLoaded, setImgLoaded] = useState(false); + const [displaySource, _setDisplaySource] = useState<"video" | "image">( + "video", + ); const { setSelectedObjectIds, annotationOffset, setAnnotationOffset } = useDetailStream(); + // manualOverride holds a record-stream timestamp explicitly chosen by the + // user (eg, clicking a lifecycle row). When null we display `currentTime`. + const [manualOverride, setManualOverride] = useState(null); + // event.start_time is detect time, convert to record, then subtract padding const [currentTime, setCurrentTime] = useState( (event.start_time ?? 0) + annotationOffset / 1000 - REVIEW_PADDING, @@ -73,9 +86,13 @@ export function TrackingDetails({ const { data: config } = useSWR("config"); + // Use manualOverride (set when seeking in image mode) if present so + // lifecycle rows and overlays follow image-mode seeks. Otherwise fall + // back to currentTime used for video mode. const effectiveTime = useMemo(() => { - return currentTime - annotationOffset / 1000; - }, [currentTime, annotationOffset]); + const displayedRecordTime = manualOverride ?? currentTime; + return displayedRecordTime - annotationOffset / 1000; + }, [manualOverride, currentTime, annotationOffset]); const containerRef = useRef(null); const [_selectedZone, setSelectedZone] = useState(""); @@ -118,20 +135,30 @@ export function TrackingDetails({ const handleLifecycleClick = useCallback( (item: TrackingDetailsSequence) => { - if (!videoRef.current) return; + if (!videoRef.current && !imgRef.current) return; // Convert lifecycle timestamp (detect stream) to record stream time const targetTimeRecord = item.timestamp + annotationOffset / 1000; - // Convert to video-relative time for seeking + if (displaySource === "image") { + // For image mode: set a manual override timestamp and update + // currentTime so overlays render correctly. + setManualOverride(targetTimeRecord); + setCurrentTime(targetTimeRecord); + return; + } + + // For video mode: convert to video-relative time and seek player const eventStartRecord = (event.start_time ?? 0) + annotationOffset / 1000; const videoStartTime = eventStartRecord - REVIEW_PADDING; const relativeTime = targetTimeRecord - videoStartTime; - videoRef.current.currentTime = relativeTime; + if (videoRef.current) { + videoRef.current.currentTime = relativeTime; + } }, - [event.start_time, annotationOffset], + [event.start_time, annotationOffset, displaySource], ); const formattedStart = config @@ -172,11 +199,20 @@ export function TrackingDetails({ }, [eventSequence]); useEffect(() => { - if (seekToTimestamp === null || !videoRef.current) return; + if (seekToTimestamp === null) return; + + if (displaySource === "image") { + // For image mode, set the manual override so the snapshot updates to + // the exact record timestamp. + setManualOverride(seekToTimestamp); + setSeekToTimestamp(null); + return; + } // seekToTimestamp is a record stream timestamp // event.start_time is detect stream time, convert to record // The video clip starts at (eventStartRecord - REVIEW_PADDING) + if (!videoRef.current) return; const eventStartRecord = event.start_time + annotationOffset / 1000; const videoStartTime = eventStartRecord - REVIEW_PADDING; const relativeTime = seekToTimestamp - videoStartTime; @@ -184,7 +220,14 @@ export function TrackingDetails({ videoRef.current.currentTime = relativeTime; } setSeekToTimestamp(null); - }, [seekToTimestamp, event.start_time, annotationOffset]); + }, [ + seekToTimestamp, + event.start_time, + annotationOffset, + apiHost, + event.camera, + displaySource, + ]); const isWithinEventRange = effectiveTime !== undefined && @@ -287,6 +330,27 @@ export function TrackingDetails({ [event.start_time, annotationOffset], ); + const [src, setSrc] = useState( + `${apiHost}api/${event.camera}/recordings/${currentTime + REVIEW_PADDING}/snapshot.jpg?height=500`, + ); + const [hasError, setHasError] = useState(false); + + // Derive the record timestamp to display: manualOverride if present, + // otherwise use currentTime. + const displayedRecordTime = manualOverride ?? currentTime; + + useEffect(() => { + if (displayedRecordTime) { + const newSrc = `${apiHost}api/${event.camera}/recordings/${displayedRecordTime}/snapshot.jpg?height=500`; + setSrc(newSrc); + } + setImgLoaded(false); + setHasError(false); + + // we know that these deps are correct + // eslint-disable-next-line react-hooks/exhaustive-deps + }, [displayedRecordTime]); + if (!config) { return ; } @@ -304,9 +368,10 @@ export function TrackingDetails({
- + {displaySource == "video" && ( + + )} + {displaySource == "image" && ( + <> + + {hasError && ( +
+
+ + {t("objectLifecycle.noImageFound")} +
+
+ )} +
+
+ +
+ setImgLoaded(true)} + onError={() => setHasError(true)} + /> +
+ + )}
Date: Mon, 3 Nov 2025 06:34:06 -0700 Subject: [PATCH 03/21] Classification fixes (#20771) * Fully delete a model * Fix deletion dialog * Fix classification back step * Adjust selection gradient * Fix * Fix --- frigate/api/classification.py | 30 +++++++++++++++-- .../wizard/Step3ChooseExamples.tsx | 17 +++++++++- .../classification/ModelSelectionView.tsx | 33 +++++++------------ 3 files changed, 54 insertions(+), 26 deletions(-) diff --git a/frigate/api/classification.py b/frigate/api/classification.py index 1b91afeea..975370d59 100644 --- a/frigate/api/classification.py +++ b/frigate/api/classification.py @@ -31,13 +31,15 @@ from frigate.api.defs.response.generic_response import GenericResponse from frigate.api.defs.tags import Tags from frigate.config import FrigateConfig from frigate.config.camera import DetectConfig -from frigate.const import CLIPS_DIR, FACE_DIR +from frigate.const import CLIPS_DIR, FACE_DIR, MODEL_CACHE_DIR from frigate.embeddings import EmbeddingsContext from frigate.models import Event +from frigate.util.builtin import update_yaml_file_bulk from frigate.util.classification import ( collect_object_classification_examples, collect_state_classification_examples, ) +from frigate.util.config import find_config_file from frigate.util.path import get_event_snapshot logger = logging.getLogger(__name__) @@ -828,12 +830,34 @@ def delete_classification_model(request: Request, name: str): status_code=404, ) - # Delete the classification model's data directory - model_dir = os.path.join(CLIPS_DIR, sanitize_filename(name)) + # Delete the classification model's data directory in clips + data_dir = os.path.join(CLIPS_DIR, sanitize_filename(name)) + if os.path.exists(data_dir): + shutil.rmtree(data_dir) + # Delete the classification model's files in model_cache + model_dir = os.path.join(MODEL_CACHE_DIR, sanitize_filename(name)) if os.path.exists(model_dir): shutil.rmtree(model_dir) + # Remove the model from the config file + config_file = find_config_file() + try: + # Setting value to empty string deletes the key + updates = {f"classification.custom.{name}": None} + update_yaml_file_bulk(config_file, updates) + except Exception as e: + logger.error(f"Error updating config file: {e}") + return JSONResponse( + content=( + { + "success": False, + "message": "Failed to update config file.", + } + ), + status_code=500, + ) + return JSONResponse( content=( { diff --git a/web/src/components/classification/wizard/Step3ChooseExamples.tsx b/web/src/components/classification/wizard/Step3ChooseExamples.tsx index 06bb2bbad..68da03eaf 100644 --- a/web/src/components/classification/wizard/Step3ChooseExamples.tsx +++ b/web/src/components/classification/wizard/Step3ChooseExamples.tsx @@ -317,6 +317,21 @@ export default function Step3ChooseExamples({ return unclassifiedImages.length === 0; }, [unclassifiedImages]); + const handleBack = useCallback(() => { + if (currentClassIndex > 0) { + const previousClass = allClasses[currentClassIndex - 1]; + setCurrentClassIndex((prev) => prev - 1); + + // Restore selections for the previous class + const previousSelections = Object.entries(imageClassifications) + .filter(([_, className]) => className === previousClass) + .map(([imageName, _]) => imageName); + setSelectedImages(new Set(previousSelections)); + } else { + onBack(); + } + }, [currentClassIndex, allClasses, imageClassifications, onBack]); + return (
{isTraining ? ( @@ -420,7 +435,7 @@ export default function Step3ChooseExamples({ {!isTraining && (
-
)} + {isTesting && ( +
+ + {testStatus} +
+ )}
)}
diff --git a/web/src/components/settings/wizard/Step2StreamConfig.tsx b/web/src/components/settings/wizard/Step2StreamConfig.tsx index 5827e6467..a9cb00c2e 100644 --- a/web/src/components/settings/wizard/Step2StreamConfig.tsx +++ b/web/src/components/settings/wizard/Step2StreamConfig.tsx @@ -201,16 +201,12 @@ export default function Step2StreamConfig({ const setRestream = useCallback( (streamId: string) => { - const currentIds = wizardData.restreamIds || []; - const isSelected = currentIds.includes(streamId); - const newIds = isSelected - ? currentIds.filter((id) => id !== streamId) - : [...currentIds, streamId]; - onUpdate({ - restreamIds: newIds, - }); + const stream = streams.find((s) => s.id === streamId); + if (!stream) return; + + updateStream(streamId, { restream: !stream.restream }); }, - [wizardData.restreamIds, onUpdate], + [streams, updateStream], ); const hasDetectRole = streams.some((s) => s.roles.includes("detect")); @@ -435,9 +431,7 @@ export default function Step2StreamConfig({ {t("cameraWizard.step2.go2rtc")} setRestream(stream.id)} />
diff --git a/web/src/components/settings/wizard/Step3Validation.tsx b/web/src/components/settings/wizard/Step3Validation.tsx index 9f4b25330..a0dd72e7e 100644 --- a/web/src/components/settings/wizard/Step3Validation.tsx +++ b/web/src/components/settings/wizard/Step3Validation.tsx @@ -1,7 +1,13 @@ import { Button } from "@/components/ui/button"; import { Badge } from "@/components/ui/badge"; +import { Switch } from "@/components/ui/switch"; +import { + Popover, + PopoverContent, + PopoverTrigger, +} from "@/components/ui/popover"; import { useTranslation } from "react-i18next"; -import { LuRotateCcw } from "react-icons/lu"; +import { LuRotateCcw, LuInfo } from "react-icons/lu"; import { useState, useCallback, useMemo, useEffect } from "react"; import ActivityIndicator from "@/components/indicators/activity-indicator"; import axios from "axios"; @@ -216,7 +222,6 @@ export default function Step3Validation({ brandTemplate: wizardData.brandTemplate, customUrl: wizardData.customUrl, streams: wizardData.streams, - restreamIds: wizardData.restreamIds, }; onSave(configData); @@ -322,6 +327,51 @@ export default function Step3Validation({
)} + {result?.success && ( +
+
+ + {t("cameraWizard.step3.ffmpegModule")} + + + + + + +
+
+ {t("cameraWizard.step3.ffmpegModule")} +
+
+ {t( + "cameraWizard.step3.ffmpegModuleDescription", + )} +
+
+
+
+
+ { + onUpdate({ + streams: streams.map((s) => + s.id === stream.id + ? { ...s, useFfmpeg: checked } + : s, + ), + }); + }} + /> +
+ )} +
{stream.url} @@ -491,8 +541,7 @@ function StreamIssues({ // Restreaming check if (stream.roles.includes("record")) { - const restreamIds = wizardData.restreamIds || []; - if (restreamIds.includes(stream.id)) { + if (stream.restream) { result.push({ type: "warning", message: t("cameraWizard.step3.issues.restreamingWarning"), @@ -660,9 +709,10 @@ function StreamPreview({ stream, onBandwidthUpdate }: StreamPreviewProps) { useEffect(() => { // Register stream with go2rtc + const streamUrl = stream.useFfmpeg ? `ffmpeg:${stream.url}` : stream.url; axios .put(`go2rtc/streams/${streamId}`, null, { - params: { src: stream.url }, + params: { src: streamUrl }, }) .then(() => { // Add small delay to allow go2rtc api to run and initialize the stream @@ -680,7 +730,7 @@ function StreamPreview({ stream, onBandwidthUpdate }: StreamPreviewProps) { // do nothing on cleanup errors - go2rtc won't consume the streams }); }; - }, [stream.url, streamId]); + }, [stream.url, stream.useFfmpeg, streamId]); const resolution = stream.testResult?.resolution; let aspectRatio = "16/9"; diff --git a/web/src/types/cameraWizard.ts b/web/src/types/cameraWizard.ts index f80dc60c2..a37eafafc 100644 --- a/web/src/types/cameraWizard.ts +++ b/web/src/types/cameraWizard.ts @@ -85,6 +85,8 @@ export type StreamConfig = { quality?: string; testResult?: TestResult; userTested?: boolean; + useFfmpeg?: boolean; + restream?: boolean; }; export type TestResult = { @@ -105,7 +107,6 @@ export type WizardFormData = { brandTemplate?: CameraBrand; customUrl?: string; streams?: StreamConfig[]; - restreamIds?: string[]; }; // API Response Types @@ -146,6 +147,7 @@ export type CameraConfigData = { inputs: { path: string; roles: string[]; + input_args?: string; }[]; }; live?: { From 55294328564e6eda2b667dc4751a211e6feb2b74 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Mon, 3 Nov 2025 09:05:03 -0700 Subject: [PATCH 07/21] Various fixes (#20774) * Change order of deletion * Add debug log for camera enabled * Add more face debug logs * Set jetson numpy version --- docker/tensorrt/requirements-arm64.txt | 1 + frigate/data_processing/real_time/face.py | 7 ++++++- frigate/embeddings/maintainer.py | 2 ++ web/src/views/classification/ModelSelectionView.tsx | 5 +---- 4 files changed, 10 insertions(+), 5 deletions(-) diff --git a/docker/tensorrt/requirements-arm64.txt b/docker/tensorrt/requirements-arm64.txt index c9b618180..78d659746 100644 --- a/docker/tensorrt/requirements-arm64.txt +++ b/docker/tensorrt/requirements-arm64.txt @@ -1 +1,2 @@ cuda-python == 12.6.*; platform_machine == 'aarch64' +numpy == 1.26.*; platform_machine == 'aarch64' diff --git a/frigate/data_processing/real_time/face.py b/frigate/data_processing/real_time/face.py index 6148e8c05..2c6b02103 100644 --- a/frigate/data_processing/real_time/face.py +++ b/frigate/data_processing/real_time/face.py @@ -166,6 +166,7 @@ class FaceRealTimeProcessor(RealTimeProcessorApi): camera = obj_data["camera"] if not self.config.cameras[camera].face_recognition.enabled: + logger.debug(f"Face recognition disabled for camera {camera}, skipping") return start = datetime.datetime.now().timestamp() @@ -208,6 +209,7 @@ class FaceRealTimeProcessor(RealTimeProcessorApi): person_box = obj_data.get("box") if not person_box: + logger.debug(f"No person box available for {id}") return rgb = cv2.cvtColor(frame, cv2.COLOR_YUV2RGB_I420) @@ -233,7 +235,8 @@ class FaceRealTimeProcessor(RealTimeProcessorApi): try: face_frame = cv2.cvtColor(face_frame, cv2.COLOR_RGB2BGR) - except Exception: + except Exception as e: + logger.debug(f"Failed to convert face frame color for {id}: {e}") return else: # don't run for object without attributes @@ -251,6 +254,7 @@ class FaceRealTimeProcessor(RealTimeProcessorApi): # no faces detected in this frame if not face: + logger.debug(f"No face attributes found for {id}") return face_box = face.get("box") @@ -274,6 +278,7 @@ class FaceRealTimeProcessor(RealTimeProcessorApi): res = self.recognizer.classify(face_frame) if not res: + logger.debug(f"Face recognizer returned no result for {id}") self.__update_metrics(datetime.datetime.now().timestamp() - start) return diff --git a/frigate/embeddings/maintainer.py b/frigate/embeddings/maintainer.py index 4ab8132c1..d169d2d88 100644 --- a/frigate/embeddings/maintainer.py +++ b/frigate/embeddings/maintainer.py @@ -158,11 +158,13 @@ class EmbeddingMaintainer(threading.Thread): self.realtime_processors: list[RealTimeProcessorApi] = [] if self.config.face_recognition.enabled: + logger.debug("Face recognition enabled, initializing FaceRealTimeProcessor") self.realtime_processors.append( FaceRealTimeProcessor( self.config, self.requestor, self.event_metadata_publisher, metrics ) ) + logger.debug("FaceRealTimeProcessor initialized successfully") if self.config.classification.bird.enabled: self.realtime_processors.append( diff --git a/web/src/views/classification/ModelSelectionView.tsx b/web/src/views/classification/ModelSelectionView.tsx index b353be65f..b1b462497 100644 --- a/web/src/views/classification/ModelSelectionView.tsx +++ b/web/src/views/classification/ModelSelectionView.tsx @@ -214,7 +214,7 @@ function ModelCard({ config, onClick, onDelete }: ModelCardProps) { const handleDelete = useCallback(async () => { try { - // First, remove from config to stop the processor + await axios.delete(`classification/${config.name}`); await axios.put("/config/set", { requires_restart: 0, update_topic: `config/classification/custom/${config.name}`, @@ -227,9 +227,6 @@ function ModelCard({ config, onClick, onDelete }: ModelCardProps) { }, }); - // Then, delete the model data and files - await axios.delete(`classification/${config.name}`); - toast.success(t("toast.success.deletedModel", { count: 1 }), { position: "top-center", }); From fc1cad28729ca07f642ae1ed8504657688d635a2 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Mon, 3 Nov 2025 13:11:02 -0700 Subject: [PATCH 08/21] Adjust LPR packages for licensing (#20780) --- docker/main/requirements-wheels.txt | 2 +- frigate/data_processing/common/license_plate/mixin.py | 11 +++++++---- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/docker/main/requirements-wheels.txt b/docker/main/requirements-wheels.txt index 7c0dc1843..b28de5e6b 100644 --- a/docker/main/requirements-wheels.txt +++ b/docker/main/requirements-wheels.txt @@ -56,7 +56,7 @@ pywebpush == 2.0.* # alpr pyclipper == 1.3.* shapely == 2.0.* -Levenshtein==0.26.* +rapidfuzz==3.12.* # HailoRT Wheels appdirs==1.4.* argcomplete==2.0.* diff --git a/frigate/data_processing/common/license_plate/mixin.py b/frigate/data_processing/common/license_plate/mixin.py index 80a169c25..a2509d4fa 100644 --- a/frigate/data_processing/common/license_plate/mixin.py +++ b/frigate/data_processing/common/license_plate/mixin.py @@ -14,8 +14,8 @@ from typing import Any, List, Optional, Tuple import cv2 import numpy as np -from Levenshtein import distance, jaro_winkler from pyclipper import ET_CLOSEDPOLYGON, JT_ROUND, PyclipperOffset +from rapidfuzz.distance import JaroWinkler, Levenshtein from shapely.geometry import Polygon from frigate.comms.event_metadata_updater import ( @@ -1123,7 +1123,9 @@ class LicensePlateProcessingMixin: for i, plate in enumerate(plates): merged = False for j, cluster in enumerate(clusters): - sims = [jaro_winkler(plate["plate"], v["plate"]) for v in cluster] + sims = [ + JaroWinkler.similarity(plate["plate"], v["plate"]) for v in cluster + ] if len(sims) > 0: avg_sim = sum(sims) / len(sims) if avg_sim >= self.cluster_threshold: @@ -1500,7 +1502,7 @@ class LicensePlateProcessingMixin: and current_time - data["last_seen"] <= self.config.cameras[camera].lpr.expire_time ): - similarity = jaro_winkler(data["plate"], top_plate) + similarity = JaroWinkler.similarity(data["plate"], top_plate) if similarity >= self.similarity_threshold: plate_id = existing_id logger.debug( @@ -1580,7 +1582,8 @@ class LicensePlateProcessingMixin: for label, plates_list in self.lpr_config.known_plates.items() if any( re.match(f"^{plate}$", rep_plate) - or distance(plate, rep_plate) <= self.lpr_config.match_distance + or Levenshtein.distance(plate, rep_plate) + <= self.lpr_config.match_distance for plate in plates_list ) ), From 85f7138361b5850a2a11a5d8fb79b4b9d09452e4 Mon Sep 17 00:00:00 2001 From: Abinila Siva <163017635+abinila4@users.noreply.github.com> Date: Mon, 3 Nov 2025 15:23:51 -0500 Subject: [PATCH 09/21] update installation code to hold SDK 2.1 version (#20781) --- docker/main/install_memryx.sh | 4 ++-- docker/memryx/user_installation.sh | 17 +++++++---------- 2 files changed, 9 insertions(+), 12 deletions(-) diff --git a/docker/main/install_memryx.sh b/docker/main/install_memryx.sh index f96181ae0..676e06daa 100644 --- a/docker/main/install_memryx.sh +++ b/docker/main/install_memryx.sh @@ -2,9 +2,9 @@ set -e # Download the MxAccl for Frigate github release -wget https://github.com/memryx/mx_accl_frigate/archive/refs/heads/main.zip -O /tmp/mxaccl.zip +wget https://github.com/memryx/mx_accl_frigate/archive/refs/tags/v2.1.0.zip -O /tmp/mxaccl.zip unzip /tmp/mxaccl.zip -d /tmp -mv /tmp/mx_accl_frigate-main /opt/mx_accl_frigate +mv /tmp/mx_accl_frigate-2.1.0 /opt/mx_accl_frigate rm /tmp/mxaccl.zip # Install Python dependencies diff --git a/docker/memryx/user_installation.sh b/docker/memryx/user_installation.sh index 20c9b8ece..b92b7e3b1 100644 --- a/docker/memryx/user_installation.sh +++ b/docker/memryx/user_installation.sh @@ -24,10 +24,13 @@ echo "Adding MemryX GPG key and repository..." wget -qO- https://developer.memryx.com/deb/memryx.asc | sudo tee /etc/apt/trusted.gpg.d/memryx.asc >/dev/null echo 'deb https://developer.memryx.com/deb stable main' | sudo tee /etc/apt/sources.list.d/memryx.list >/dev/null -# Update and install memx-drivers -echo "Installing memx-drivers..." +# Update and install specific SDK 2.1 packages +echo "Installing MemryX SDK 2.1 packages..." sudo apt update -sudo apt install -y memx-drivers +sudo apt install -y memx-drivers=2.1.* memx-accl=2.1.* mxa-manager=2.1.* + +# Hold packages to prevent automatic upgrades +sudo apt-mark hold memx-drivers memx-accl mxa-manager # ARM-specific board setup if [[ "$arch" == "aarch64" || "$arch" == "arm64" ]]; then @@ -37,11 +40,5 @@ fi echo -e "\n\n\033[1;31mYOU MUST RESTART YOUR COMPUTER NOW\033[0m\n\n" -# Install other runtime packages -packages=("memx-accl" "mxa-manager") -for pkg in "${packages[@]}"; do - echo "Installing $pkg..." - sudo apt install -y "$pkg" -done +echo "MemryX SDK 2.1 installation complete!" -echo "MemryX installation complete!" From 9e8388813347965e1038f88b843b6f105081bdc2 Mon Sep 17 00:00:00 2001 From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> Date: Mon, 3 Nov 2025 18:30:56 -0600 Subject: [PATCH 10/21] Fix recordings summary for DST (#20784) * make recordings summary endpoints DST aware * remove unused * clean up --- frigate/api/media.py | 222 +++++++++++++++++++++++++++--------------- frigate/api/review.py | 3 +- 2 files changed, 147 insertions(+), 78 deletions(-) diff --git a/frigate/api/media.py b/frigate/api/media.py index aafe7fe4c..493653a24 100644 --- a/frigate/api/media.py +++ b/frigate/api/media.py @@ -46,7 +46,7 @@ from frigate.models import Event, Previews, Recordings, Regions, ReviewSegment from frigate.track.object_processing import TrackedObjectProcessor from frigate.util.image import get_image_from_recording from frigate.util.path import get_event_thumbnail_bytes -from frigate.util.time import get_tz_modifiers +from frigate.util.time import get_dst_transitions logger = logging.getLogger(__name__) @@ -424,7 +424,6 @@ def all_recordings_summary( allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter), ): """Returns true/false by day indicating if recordings exist""" - hour_modifier, minute_modifier, seconds_offset = get_tz_modifiers(params.timezone) cameras = params.cameras if cameras != "all": @@ -432,41 +431,70 @@ def all_recordings_summary( filtered = requested.intersection(allowed_cameras) if not filtered: return JSONResponse(content={}) - cameras = ",".join(filtered) + camera_list = list(filtered) else: - cameras = allowed_cameras + camera_list = allowed_cameras - query = ( + time_range_query = ( Recordings.select( - fn.strftime( - "%Y-%m-%d", - fn.datetime( - Recordings.start_time + seconds_offset, - "unixepoch", - hour_modifier, - minute_modifier, - ), - ).alias("day") + fn.MIN(Recordings.start_time).alias("min_time"), + fn.MAX(Recordings.start_time).alias("max_time"), ) - .group_by( - fn.strftime( - "%Y-%m-%d", - fn.datetime( - Recordings.start_time + seconds_offset, - "unixepoch", - hour_modifier, - minute_modifier, - ), - ) - ) - .order_by(Recordings.start_time.desc()) + .where(Recordings.camera << camera_list) + .dicts() + .get() ) - if params.cameras != "all": - query = query.where(Recordings.camera << cameras.split(",")) + min_time = time_range_query.get("min_time") + max_time = time_range_query.get("max_time") - recording_days = query.namedtuples() - days = {day.day: True for day in recording_days} + if min_time is None or max_time is None: + return JSONResponse(content={}) + + dst_periods = get_dst_transitions(params.timezone, min_time, max_time) + + days: dict[str, bool] = {} + + for period_start, period_end, period_offset in dst_periods: + hours_offset = int(period_offset / 60 / 60) + minutes_offset = int(period_offset / 60 - hours_offset * 60) + period_hour_modifier = f"{hours_offset} hour" + period_minute_modifier = f"{minutes_offset} minute" + + period_query = ( + Recordings.select( + fn.strftime( + "%Y-%m-%d", + fn.datetime( + Recordings.start_time, + "unixepoch", + period_hour_modifier, + period_minute_modifier, + ), + ).alias("day") + ) + .where( + (Recordings.camera << camera_list) + & (Recordings.end_time >= period_start) + & (Recordings.start_time <= period_end) + ) + .group_by( + fn.strftime( + "%Y-%m-%d", + fn.datetime( + Recordings.start_time, + "unixepoch", + period_hour_modifier, + period_minute_modifier, + ), + ) + ) + .order_by(Recordings.start_time.desc()) + .namedtuples() + ) + + for g in period_query: + days[g.day] = True return JSONResponse(content=days) @@ -476,61 +504,103 @@ def all_recordings_summary( ) async def recordings_summary(camera_name: str, timezone: str = "utc"): """Returns hourly summary for recordings of given camera""" - hour_modifier, minute_modifier, seconds_offset = get_tz_modifiers(timezone) - recording_groups = ( + + time_range_query = ( Recordings.select( - fn.strftime( - "%Y-%m-%d %H", - fn.datetime( - Recordings.start_time, "unixepoch", hour_modifier, minute_modifier - ), - ).alias("hour"), - fn.SUM(Recordings.duration).alias("duration"), - fn.SUM(Recordings.motion).alias("motion"), - fn.SUM(Recordings.objects).alias("objects"), + fn.MIN(Recordings.start_time).alias("min_time"), + fn.MAX(Recordings.start_time).alias("max_time"), ) .where(Recordings.camera == camera_name) - .group_by((Recordings.start_time + seconds_offset).cast("int") / 3600) - .order_by(Recordings.start_time.desc()) - .namedtuples() + .dicts() + .get() ) - event_groups = ( - Event.select( - fn.strftime( - "%Y-%m-%d %H", - fn.datetime( - Event.start_time, "unixepoch", hour_modifier, minute_modifier - ), - ).alias("hour"), - fn.COUNT(Event.id).alias("count"), + min_time = time_range_query.get("min_time") + max_time = time_range_query.get("max_time") + + days: dict[str, dict] = {} + + if min_time is None or max_time is None: + return JSONResponse(content=list(days.values())) + + dst_periods = get_dst_transitions(timezone, min_time, max_time) + + for period_start, period_end, period_offset in dst_periods: + hours_offset = int(period_offset / 60 / 60) + minutes_offset = int(period_offset / 60 - hours_offset * 60) + period_hour_modifier = f"{hours_offset} hour" + period_minute_modifier = f"{minutes_offset} minute" + + recording_groups = ( + Recordings.select( + fn.strftime( + "%Y-%m-%d %H", + fn.datetime( + Recordings.start_time, + "unixepoch", + period_hour_modifier, + period_minute_modifier, + ), + ).alias("hour"), + fn.SUM(Recordings.duration).alias("duration"), + fn.SUM(Recordings.motion).alias("motion"), + fn.SUM(Recordings.objects).alias("objects"), + ) + .where( + (Recordings.camera == camera_name) + & (Recordings.end_time >= period_start) + & (Recordings.start_time <= period_end) + ) + .group_by((Recordings.start_time + period_offset).cast("int") / 3600) + .order_by(Recordings.start_time.desc()) + .namedtuples() ) - .where(Event.camera == camera_name, Event.has_clip) - .group_by((Event.start_time + seconds_offset).cast("int") / 3600) - .namedtuples() - ) - event_map = {g.hour: g.count for g in event_groups} + event_groups = ( + Event.select( + fn.strftime( + "%Y-%m-%d %H", + fn.datetime( + Event.start_time, + "unixepoch", + period_hour_modifier, + period_minute_modifier, + ), + ).alias("hour"), + fn.COUNT(Event.id).alias("count"), + ) + .where(Event.camera == camera_name, Event.has_clip) + .where( + (Event.start_time >= period_start) & (Event.start_time <= period_end) + ) + .group_by((Event.start_time + period_offset).cast("int") / 3600) + .namedtuples() + ) - days = {} + event_map = {g.hour: g.count for g in event_groups} - for recording_group in recording_groups: - parts = recording_group.hour.split() - hour = parts[1] - day = parts[0] - events_count = event_map.get(recording_group.hour, 0) - hour_data = { - "hour": hour, - "events": events_count, - "motion": recording_group.motion, - "objects": recording_group.objects, - "duration": round(recording_group.duration), - } - if day not in days: - days[day] = {"events": events_count, "hours": [hour_data], "day": day} - else: - days[day]["events"] += events_count - days[day]["hours"].append(hour_data) + for recording_group in recording_groups: + parts = recording_group.hour.split() + hour = parts[1] + day = parts[0] + events_count = event_map.get(recording_group.hour, 0) + hour_data = { + "hour": hour, + "events": events_count, + "motion": recording_group.motion, + "objects": recording_group.objects, + "duration": round(recording_group.duration), + } + if day in days: + # merge counts if already present (edge-case at DST boundary) + days[day]["events"] += events_count or 0 + days[day]["hours"].append(hour_data) + else: + days[day] = { + "events": events_count or 0, + "hours": [hour_data], + "day": day, + } return JSONResponse(content=list(days.values())) diff --git a/frigate/api/review.py b/frigate/api/review.py index 1417883a0..300255663 100644 --- a/frigate/api/review.py +++ b/frigate/api/review.py @@ -36,7 +36,7 @@ from frigate.config import FrigateConfig from frigate.embeddings import EmbeddingsContext from frigate.models import Recordings, ReviewSegment, UserReviewStatus from frigate.review.types import SeverityEnum -from frigate.util.time import get_dst_transitions, get_tz_modifiers +from frigate.util.time import get_dst_transitions logger = logging.getLogger(__name__) @@ -197,7 +197,6 @@ async def review_summary( user_id = current_user["username"] - hour_modifier, minute_modifier, seconds_offset = get_tz_modifiers(params.timezone) day_ago = (datetime.datetime.now() - datetime.timedelta(hours=24)).timestamp() cameras = params.cameras From 84409eab7e40397e107d1d7c298e5e4f4cd3cfd1 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Mon, 3 Nov 2025 17:42:59 -0700 Subject: [PATCH 11/21] Various fixes (#20785) * Catch case where detector overflows * Add more debug logs * Cleanup * Adjust no class wording * Adjustments --- frigate/embeddings/maintainer.py | 15 +++++++++++++++ frigate/object_detection/base.py | 10 ++++++++++ web/src/components/card/ClassificationCard.tsx | 16 ++++++++++++---- web/src/pages/FaceLibrary.tsx | 1 + .../views/classification/ModelTrainingView.tsx | 1 + 5 files changed, 39 insertions(+), 4 deletions(-) diff --git a/frigate/embeddings/maintainer.py b/frigate/embeddings/maintainer.py index d169d2d88..a99ef72a2 100644 --- a/frigate/embeddings/maintainer.py +++ b/frigate/embeddings/maintainer.py @@ -397,7 +397,14 @@ class EmbeddingMaintainer(threading.Thread): source_type, _, camera, frame_name, data = update + logger.debug( + f"Received update - source_type: {source_type}, camera: {camera}, data label: {data.get('label') if data else 'None'}" + ) + if not camera or source_type != EventTypeEnum.tracked_object: + logger.debug( + f"Skipping update - camera: {camera}, source_type: {source_type}" + ) return if self.config.semantic_search.enabled: @@ -407,6 +414,9 @@ class EmbeddingMaintainer(threading.Thread): # no need to process updated objects if no processors are active if len(self.realtime_processors) == 0 and len(self.post_processors) == 0: + logger.debug( + f"No processors active - realtime: {len(self.realtime_processors)}, post: {len(self.post_processors)}" + ) return # Create our own thumbnail based on the bounding box and the frame time @@ -415,6 +425,7 @@ class EmbeddingMaintainer(threading.Thread): frame_name, camera_config.frame_shape_yuv ) except FileNotFoundError: + logger.debug(f"Frame {frame_name} not found for camera {camera}") pass if yuv_frame is None: @@ -423,7 +434,11 @@ class EmbeddingMaintainer(threading.Thread): ) return + logger.debug( + f"Processing {len(self.realtime_processors)} realtime processors for object {data.get('id')} (label: {data.get('label')})" + ) for processor in self.realtime_processors: + logger.debug(f"Calling process_frame on {processor.__class__.__name__}") processor.process_frame(data, yuv_frame) for processor in self.post_processors: diff --git a/frigate/object_detection/base.py b/frigate/object_detection/base.py index 9f4965111..bb5f83fab 100644 --- a/frigate/object_detection/base.py +++ b/frigate/object_detection/base.py @@ -9,6 +9,7 @@ from multiprocessing import Queue, Value from multiprocessing.synchronize import Event as MpEvent import numpy as np +import zmq from frigate.comms.object_detector_signaler import ( ObjectDetectorPublisher, @@ -377,6 +378,15 @@ class RemoteObjectDetector: if self.stop_event.is_set(): return detections + # Drain any stale detection results from the ZMQ buffer before making a new request + # This prevents reading detection results from a previous request + # NOTE: This should never happen, but can in some rare cases + while True: + try: + self.detector_subscriber.socket.recv_string(flags=zmq.NOBLOCK) + except zmq.Again: + break + # copy input to shared memory self.np_shm[:] = tensor_input[:] self.detection_queue.put(self.name) diff --git a/web/src/components/card/ClassificationCard.tsx b/web/src/components/card/ClassificationCard.tsx index 21f498fe4..6de418446 100644 --- a/web/src/components/card/ClassificationCard.tsx +++ b/web/src/components/card/ClassificationCard.tsx @@ -181,6 +181,7 @@ type GroupedClassificationCardProps = { selectedItems: string[]; i18nLibrary: string; objectType: string; + noClassificationLabel?: string; onClick: (data: ClassificationItemData | undefined) => void; children?: (data: ClassificationItemData) => React.ReactNode; }; @@ -190,6 +191,7 @@ export function GroupedClassificationCard({ threshold, selectedItems, i18nLibrary, + noClassificationLabel = "details.none", onClick, children, }: GroupedClassificationCardProps) { @@ -222,10 +224,14 @@ export function GroupedClassificationCard({ const bestTyped: ClassificationItemData = best; return { ...bestTyped, - name: event ? (event.sub_label ?? t("details.unknown")) : bestTyped.name, + name: event + ? event.sub_label && event.sub_label !== "none" + ? event.sub_label + : t(noClassificationLabel) + : bestTyped.name, score: event?.data?.sub_label_score || bestTyped.score, }; - }, [group, event, t]); + }, [group, event, noClassificationLabel, t]); const bestScoreStatus = useMemo(() => { if (!bestItem?.score || !threshold) { @@ -311,8 +317,10 @@ export function GroupedClassificationCard({ isMobile && "px-2", )} > - {event?.sub_label ? event.sub_label : t("details.unknown")} - {event?.sub_label && ( + {event?.sub_label && event.sub_label !== "none" + ? event.sub_label + : t(noClassificationLabel)} + {event?.sub_label && event.sub_label !== "none" && (
{ if (data) { onClickFaces([data.filename], true); diff --git a/web/src/views/classification/ModelTrainingView.tsx b/web/src/views/classification/ModelTrainingView.tsx index 5d60ce56c..92a4adcdf 100644 --- a/web/src/views/classification/ModelTrainingView.tsx +++ b/web/src/views/classification/ModelTrainingView.tsx @@ -961,6 +961,7 @@ function ObjectTrainGrid({ selectedItems={selectedImages} i18nLibrary="views/classificationModel" objectType={model.object_config?.objects?.at(0) ?? "Object"} + noClassificationLabel="details.none" onClick={(data) => { if (data) { onClickImages([data.filename], true); From 256817d5c2640b4cd1167272ec606d6bbf9eb772 Mon Sep 17 00:00:00 2001 From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> Date: Mon, 3 Nov 2025 18:54:33 -0600 Subject: [PATCH 12/21] Make events summary endpoint DST-aware (#20786) --- frigate/api/event.py | 106 +++++++++++++++++++++++++++++++++---------- 1 file changed, 82 insertions(+), 24 deletions(-) diff --git a/frigate/api/event.py b/frigate/api/event.py index 8f82c8621..111842c9c 100644 --- a/frigate/api/event.py +++ b/frigate/api/event.py @@ -2,6 +2,7 @@ import base64 import datetime +import json import logging import os import random @@ -58,7 +59,7 @@ from frigate.embeddings import EmbeddingsContext from frigate.models import Event, ReviewSegment, Timeline, Trigger from frigate.track.object_processing import TrackedObject from frigate.util.path import get_event_thumbnail_bytes -from frigate.util.time import get_tz_modifiers +from frigate.util.time import get_dst_transitions, get_tz_modifiers logger = logging.getLogger(__name__) @@ -813,7 +814,6 @@ def events_summary( allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter), ): tz_name = params.timezone - hour_modifier, minute_modifier, seconds_offset = get_tz_modifiers(tz_name) has_clip = params.has_clip has_snapshot = params.has_snapshot @@ -828,33 +828,91 @@ def events_summary( if len(clauses) == 0: clauses.append((True)) - groups = ( + time_range_query = ( Event.select( - Event.camera, - Event.label, - Event.sub_label, - Event.data, - fn.strftime( - "%Y-%m-%d", - fn.datetime( - Event.start_time, "unixepoch", hour_modifier, minute_modifier - ), - ).alias("day"), - Event.zones, - fn.COUNT(Event.id).alias("count"), + fn.MIN(Event.start_time).alias("min_time"), + fn.MAX(Event.start_time).alias("max_time"), ) .where(reduce(operator.and_, clauses) & (Event.camera << allowed_cameras)) - .group_by( - Event.camera, - Event.label, - Event.sub_label, - Event.data, - (Event.start_time + seconds_offset).cast("int") / (3600 * 24), - Event.zones, - ) + .dicts() + .get() ) - return JSONResponse(content=[e for e in groups.dicts()]) + min_time = time_range_query.get("min_time") + max_time = time_range_query.get("max_time") + + if min_time is None or max_time is None: + return JSONResponse(content=[]) + + dst_periods = get_dst_transitions(tz_name, min_time, max_time) + + grouped: dict[tuple, dict] = {} + + for period_start, period_end, period_offset in dst_periods: + hours_offset = int(period_offset / 60 / 60) + minutes_offset = int(period_offset / 60 - hours_offset * 60) + period_hour_modifier = f"{hours_offset} hour" + period_minute_modifier = f"{minutes_offset} minute" + + period_groups = ( + Event.select( + Event.camera, + Event.label, + Event.sub_label, + Event.data, + fn.strftime( + "%Y-%m-%d", + fn.datetime( + Event.start_time, + "unixepoch", + period_hour_modifier, + period_minute_modifier, + ), + ).alias("day"), + Event.zones, + fn.COUNT(Event.id).alias("count"), + ) + .where( + reduce(operator.and_, clauses) + & (Event.camera << allowed_cameras) + & (Event.start_time >= period_start) + & (Event.start_time <= period_end) + ) + .group_by( + Event.camera, + Event.label, + Event.sub_label, + Event.data, + (Event.start_time + period_offset).cast("int") / (3600 * 24), + Event.zones, + ) + .namedtuples() + ) + + for g in period_groups: + key = ( + g.camera, + g.label, + g.sub_label, + json.dumps(g.data, sort_keys=True) if g.data is not None else None, + g.day, + json.dumps(g.zones, sort_keys=True) if g.zones is not None else None, + ) + + if key in grouped: + grouped[key]["count"] += int(g.count or 0) + else: + grouped[key] = { + "camera": g.camera, + "label": g.label, + "sub_label": g.sub_label, + "data": g.data, + "day": g.day, + "zones": g.zones, + "count": int(g.count or 0), + } + + return JSONResponse(content=list(grouped.values())) @router.get( From 2e288109f44b59e9e7c2c5a97d77b2f05e6d711c Mon Sep 17 00:00:00 2001 From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> Date: Tue, 4 Nov 2025 08:45:45 -0600 Subject: [PATCH 13/21] Review tweaks (#20789) * use alerts/detections colors for dots and add back blue border * add alerts/detections colored dot next to event icons * add margin for border --- web/src/components/card/ReviewCard.tsx | 11 ++++++++++- web/src/components/timeline/DetailStream.tsx | 14 +++++++++----- 2 files changed, 19 insertions(+), 6 deletions(-) diff --git a/web/src/components/card/ReviewCard.tsx b/web/src/components/card/ReviewCard.tsx index b8b8ffa1a..6337ac4a9 100644 --- a/web/src/components/card/ReviewCard.tsx +++ b/web/src/components/card/ReviewCard.tsx @@ -37,6 +37,7 @@ import { capitalizeFirstLetter } from "@/utils/stringUtil"; import { Button, buttonVariants } from "../ui/button"; import { Trans, useTranslation } from "react-i18next"; import { cn } from "@/lib/utils"; +import { LuCircle } from "react-icons/lu"; type ReviewCardProps = { event: ReviewSegment; @@ -142,7 +143,7 @@ export default function ReviewCard({ className={cn( "size-full rounded-lg", activeReviewItem?.id == event.id && - "outline outline-[3px] outline-offset-1 outline-selected", + "outline outline-[3px] -outline-offset-[2.8px] outline-selected duration-200", imgLoaded ? "visible" : "invisible", )} src={`${baseUrl}${event.thumb_path.replace("/media/frigate/", "")}`} @@ -165,6 +166,14 @@ export default function ReviewCard({
<> + {event.data.objects.map((object) => { return getIconForLabel( object, diff --git a/web/src/components/timeline/DetailStream.tsx b/web/src/components/timeline/DetailStream.tsx index e8d609cdb..706c6c5b4 100644 --- a/web/src/components/timeline/DetailStream.tsx +++ b/web/src/components/timeline/DetailStream.tsx @@ -367,7 +367,11 @@ function ReviewGroup({ return (
From e7394d0dc13abc90e762b2843c22be49f2ec2b5a Mon Sep 17 00:00:00 2001 From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> Date: Tue, 4 Nov 2025 08:57:47 -0600 Subject: [PATCH 14/21] Form validation tweaks (#20790) * ensure id field is expanded on form errors * only validate id field when name field has no errors * use ref instead * all numeric is an invalid name --- web/src/components/input/NameAndIdFields.tsx | 14 ++++++++++++-- web/src/utils/stringUtil.ts | 2 +- 2 files changed, 13 insertions(+), 3 deletions(-) diff --git a/web/src/components/input/NameAndIdFields.tsx b/web/src/components/input/NameAndIdFields.tsx index ad4ebcfcc..7b988edc7 100644 --- a/web/src/components/input/NameAndIdFields.tsx +++ b/web/src/components/input/NameAndIdFields.tsx @@ -8,7 +8,7 @@ import { FormMessage, } from "@/components/ui/form"; import { Input } from "@/components/ui/input"; -import { useState, useEffect } from "react"; +import { useState, useEffect, useRef } from "react"; import { useFormContext } from "react-hook-form"; import { generateFixedHash, isValidId } from "@/utils/stringUtil"; import { useTranslation } from "react-i18next"; @@ -41,8 +41,9 @@ export default function NameAndIdFields({ placeholderId, }: NameAndIdFieldsProps) { const { t } = useTranslation(["common"]); - const { watch, setValue, trigger } = useFormContext(); + const { watch, setValue, trigger, formState } = useFormContext(); const [isIdVisible, setIsIdVisible] = useState(false); + const hasUserTypedRef = useRef(false); const defaultProcessId = (name: string) => { const normalized = name.replace(/\s+/g, "_").toLowerCase(); @@ -58,6 +59,7 @@ export default function NameAndIdFields({ useEffect(() => { const subscription = watch((value, { name }) => { if (name === nameField) { + hasUserTypedRef.current = true; const processedId = effectiveProcessId(value[nameField] || ""); setValue(idField, processedId as PathValue>); trigger(idField); @@ -66,6 +68,14 @@ export default function NameAndIdFields({ return () => subscription.unsubscribe(); }, [watch, setValue, trigger, nameField, idField, effectiveProcessId]); + // Auto-expand if there's an error on the ID field after user has typed + useEffect(() => { + const idError = formState.errors[idField]; + if (idError && hasUserTypedRef.current && !isIdVisible) { + setIsIdVisible(true); + } + }, [formState.errors, idField, isIdVisible]); + return ( <> Date: Tue, 4 Nov 2025 09:54:05 -0700 Subject: [PATCH 15/21] UI Tweaks (#20791) * Add tooltip for classification group * Don't portal upload dialog when not in fullscreen --- .../locales/en/views/classificationModel.json | 3 ++ web/public/locales/en/views/faceLibrary.json | 3 +- .../components/card/ClassificationCard.tsx | 36 ++++++++++++++----- web/src/components/player/VideoControls.tsx | 9 ++++- 4 files changed, 40 insertions(+), 11 deletions(-) diff --git a/web/public/locales/en/views/classificationModel.json b/web/public/locales/en/views/classificationModel.json index ff0fab291..291b2bdf3 100644 --- a/web/public/locales/en/views/classificationModel.json +++ b/web/public/locales/en/views/classificationModel.json @@ -1,5 +1,8 @@ { "documentTitle": "Classification Models", + "details": { + "scoreInfo": "Score represents the average classification confidence across all detections of this object." + }, "button": { "deleteClassificationAttempts": "Delete Classification Images", "renameCategory": "Rename Class", diff --git a/web/public/locales/en/views/faceLibrary.json b/web/public/locales/en/views/faceLibrary.json index ce168c346..4c0d1e712 100644 --- a/web/public/locales/en/views/faceLibrary.json +++ b/web/public/locales/en/views/faceLibrary.json @@ -6,7 +6,8 @@ }, "details": { "timestamp": "Timestamp", - "unknown": "Unknown" + "unknown": "Unknown", + "scoreInfo": "Score is a weighted average of all face scores, weighted by the size of the face in each image." }, "documentTitle": "Face Library - Frigate", "uploadFaceImage": { diff --git a/web/src/components/card/ClassificationCard.tsx b/web/src/components/card/ClassificationCard.tsx index 6de418446..73be455bc 100644 --- a/web/src/components/card/ClassificationCard.tsx +++ b/web/src/components/card/ClassificationCard.tsx @@ -11,7 +11,8 @@ import { isDesktop, isMobile } from "react-device-detect"; import { useTranslation } from "react-i18next"; import TimeAgo from "../dynamic/TimeAgo"; import { Tooltip, TooltipContent, TooltipTrigger } from "../ui/tooltip"; -import { LuSearch } from "react-icons/lu"; +import { Popover, PopoverContent, PopoverTrigger } from "../ui/popover"; +import { LuSearch, LuInfo } from "react-icons/lu"; import { TooltipPortal } from "@radix-ui/react-tooltip"; import { useNavigate } from "react-router-dom"; import { HiSquare2Stack } from "react-icons/hi2"; @@ -321,14 +322,31 @@ export function GroupedClassificationCard({ ? event.sub_label : t(noClassificationLabel)} {event?.sub_label && event.sub_label !== "none" && ( -
{`${Math.round((event.data.sub_label_score || 0) * 100)}%`}
+
+
{`${Math.round((event.data.sub_label_score || 0) * 100)}%`}
+ + + + + + {t("details.scoreInfo", { ns: i18nLibrary })} + + +
)} diff --git a/web/src/components/player/VideoControls.tsx b/web/src/components/player/VideoControls.tsx index d3bb1aa04..020c54d7b 100644 --- a/web/src/components/player/VideoControls.tsx +++ b/web/src/components/player/VideoControls.tsx @@ -289,6 +289,7 @@ export default function VideoControls({ }} onUploadFrame={onUploadFrame} containerRef={containerRef} + fullscreen={fullscreen} /> )} {features.fullscreen && toggleFullscreen && ( @@ -306,6 +307,7 @@ type FrigatePlusUploadButtonProps = { onClose: () => void; onUploadFrame: () => void; containerRef?: React.MutableRefObject; + fullscreen?: boolean; }; function FrigatePlusUploadButton({ video, @@ -313,6 +315,7 @@ function FrigatePlusUploadButton({ onClose, onUploadFrame, containerRef, + fullscreen, }: FrigatePlusUploadButtonProps) { const { t } = useTranslation(["components/player"]); @@ -349,7 +352,11 @@ function FrigatePlusUploadButton({ /> From b75122847668d6c500c95178e5fc767328c2945e Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Tue, 4 Nov 2025 16:06:14 -0700 Subject: [PATCH 16/21] Various Tweaks (#20800) * Fix incorrectly picking start time when date was selected * Implement shared file locking utility * Cleanup --- frigate/api/classification.py | 2 +- frigate/api/event.py | 2 +- frigate/api/media.py | 2 +- .../post/object_descriptions.py | 2 +- .../data_processing/post/semantic_trigger.py | 2 +- frigate/detectors/plugins/memryx.py | 31 +- frigate/embeddings/embeddings.py | 2 +- frigate/embeddings/maintainer.py | 2 +- frigate/events/cleanup.py | 2 +- frigate/util/classification.py | 2 +- frigate/util/downloader.py | 40 +-- frigate/util/file.py | 276 ++++++++++++++++++ frigate/util/path.py | 62 ---- frigate/util/rknn_converter.py | 128 +------- web/src/views/events/EventView.tsx | 40 +-- 15 files changed, 330 insertions(+), 265 deletions(-) create mode 100644 frigate/util/file.py delete mode 100644 frigate/util/path.py diff --git a/frigate/api/classification.py b/frigate/api/classification.py index 90f6391c0..a167911c4 100644 --- a/frigate/api/classification.py +++ b/frigate/api/classification.py @@ -38,7 +38,7 @@ from frigate.util.classification import ( collect_object_classification_examples, collect_state_classification_examples, ) -from frigate.util.path import get_event_snapshot +from frigate.util.file import get_event_snapshot logger = logging.getLogger(__name__) diff --git a/frigate/api/event.py b/frigate/api/event.py index 111842c9c..544e58fd2 100644 --- a/frigate/api/event.py +++ b/frigate/api/event.py @@ -58,7 +58,7 @@ from frigate.const import CLIPS_DIR, TRIGGER_DIR from frigate.embeddings import EmbeddingsContext from frigate.models import Event, ReviewSegment, Timeline, Trigger from frigate.track.object_processing import TrackedObject -from frigate.util.path import get_event_thumbnail_bytes +from frigate.util.file import get_event_thumbnail_bytes from frigate.util.time import get_dst_transitions, get_tz_modifiers logger = logging.getLogger(__name__) diff --git a/frigate/api/media.py b/frigate/api/media.py index 493653a24..8d310fec8 100644 --- a/frigate/api/media.py +++ b/frigate/api/media.py @@ -44,8 +44,8 @@ from frigate.const import ( ) from frigate.models import Event, Previews, Recordings, Regions, ReviewSegment from frigate.track.object_processing import TrackedObjectProcessor +from frigate.util.file import get_event_thumbnail_bytes from frigate.util.image import get_image_from_recording -from frigate.util.path import get_event_thumbnail_bytes from frigate.util.time import get_dst_transitions logger = logging.getLogger(__name__) diff --git a/frigate/data_processing/post/object_descriptions.py b/frigate/data_processing/post/object_descriptions.py index 23af43548..1f4608bc3 100644 --- a/frigate/data_processing/post/object_descriptions.py +++ b/frigate/data_processing/post/object_descriptions.py @@ -20,8 +20,8 @@ from frigate.genai import GenAIClient from frigate.models import Event from frigate.types import TrackedObjectUpdateTypesEnum from frigate.util.builtin import EventsPerSecond, InferenceSpeed +from frigate.util.file import get_event_thumbnail_bytes from frigate.util.image import create_thumbnail, ensure_jpeg_bytes -from frigate.util.path import get_event_thumbnail_bytes if TYPE_CHECKING: from frigate.embeddings import Embeddings diff --git a/frigate/data_processing/post/semantic_trigger.py b/frigate/data_processing/post/semantic_trigger.py index 40eed0c10..ec9e5d220 100644 --- a/frigate/data_processing/post/semantic_trigger.py +++ b/frigate/data_processing/post/semantic_trigger.py @@ -22,7 +22,7 @@ from frigate.db.sqlitevecq import SqliteVecQueueDatabase from frigate.embeddings.util import ZScoreNormalization from frigate.models import Event, Trigger from frigate.util.builtin import cosine_distance -from frigate.util.path import get_event_thumbnail_bytes +from frigate.util.file import get_event_thumbnail_bytes from ..post.api import PostProcessorApi from ..types import DataProcessorMetrics diff --git a/frigate/detectors/plugins/memryx.py b/frigate/detectors/plugins/memryx.py index 3b424bcc0..3e1651604 100644 --- a/frigate/detectors/plugins/memryx.py +++ b/frigate/detectors/plugins/memryx.py @@ -17,6 +17,7 @@ from frigate.detectors.detector_config import ( BaseDetectorConfig, ModelTypeEnum, ) +from frigate.util.file import FileLock from frigate.util.model import post_process_yolo logger = logging.getLogger(__name__) @@ -177,29 +178,6 @@ class MemryXDetector(DetectionApi): logger.error(f"Failed to initialize MemryX model: {e}") raise - def _acquire_file_lock(self, lock_path: str, timeout: int = 60, poll: float = 0.2): - """ - Create an exclusive lock file. Blocks (with polling) until it can acquire, - or raises TimeoutError. Uses only stdlib (os.O_EXCL). - """ - start = time.time() - while True: - try: - fd = os.open(lock_path, os.O_CREAT | os.O_EXCL | os.O_RDWR) - os.close(fd) - return - except FileExistsError: - if time.time() - start > timeout: - raise TimeoutError(f"Timeout waiting for lock: {lock_path}") - time.sleep(poll) - - def _release_file_lock(self, lock_path: str): - """Best-effort removal of the lock file.""" - try: - os.remove(lock_path) - except FileNotFoundError: - pass - def load_yolo_constants(self): base = f"{self.cache_dir}/{self.model_folder}" # constants for yolov9 post-processing @@ -212,9 +190,9 @@ class MemryXDetector(DetectionApi): os.makedirs(self.cache_dir, exist_ok=True) lock_path = os.path.join(self.cache_dir, f".{self.model_folder}.lock") - self._acquire_file_lock(lock_path) + lock = FileLock(lock_path, timeout=60) - try: + with lock: # ---------- CASE 1: user provided a custom model path ---------- if self.memx_model_path: if not self.memx_model_path.endswith(".zip"): @@ -338,9 +316,6 @@ class MemryXDetector(DetectionApi): f"Failed to remove downloaded zip {zip_path}: {e}" ) - finally: - self._release_file_lock(lock_path) - def send_input(self, connection_id, tensor_input: np.ndarray): """Pre-process (if needed) and send frame to MemryX input queue""" if tensor_input is None: diff --git a/frigate/embeddings/embeddings.py b/frigate/embeddings/embeddings.py index 788e3e6db..5689511a8 100644 --- a/frigate/embeddings/embeddings.py +++ b/frigate/embeddings/embeddings.py @@ -29,7 +29,7 @@ from frigate.db.sqlitevecq import SqliteVecQueueDatabase from frigate.models import Event, Trigger from frigate.types import ModelStatusTypesEnum from frigate.util.builtin import EventsPerSecond, InferenceSpeed, serialize -from frigate.util.path import get_event_thumbnail_bytes +from frigate.util.file import get_event_thumbnail_bytes from .onnx.jina_v1_embedding import JinaV1ImageEmbedding, JinaV1TextEmbedding from .onnx.jina_v2_embedding import JinaV2Embedding diff --git a/frigate/embeddings/maintainer.py b/frigate/embeddings/maintainer.py index a99ef72a2..bde81522d 100644 --- a/frigate/embeddings/maintainer.py +++ b/frigate/embeddings/maintainer.py @@ -62,8 +62,8 @@ from frigate.events.types import EventTypeEnum, RegenerateDescriptionEnum from frigate.genai import get_genai_client from frigate.models import Event, Recordings, ReviewSegment, Trigger from frigate.util.builtin import serialize +from frigate.util.file import get_event_thumbnail_bytes from frigate.util.image import SharedMemoryFrameManager -from frigate.util.path import get_event_thumbnail_bytes from .embeddings import Embeddings diff --git a/frigate/events/cleanup.py b/frigate/events/cleanup.py index d5e6ca3fb..1ac03b2ed 100644 --- a/frigate/events/cleanup.py +++ b/frigate/events/cleanup.py @@ -12,7 +12,7 @@ from frigate.config import FrigateConfig from frigate.const import CLIPS_DIR from frigate.db.sqlitevecq import SqliteVecQueueDatabase from frigate.models import Event, Timeline -from frigate.util.path import delete_event_snapshot, delete_event_thumbnail +from frigate.util.file import delete_event_snapshot, delete_event_thumbnail logger = logging.getLogger(__name__) diff --git a/frigate/util/classification.py b/frigate/util/classification.py index ab17a9444..43dfd7fd7 100644 --- a/frigate/util/classification.py +++ b/frigate/util/classification.py @@ -20,8 +20,8 @@ from frigate.const import ( from frigate.log import redirect_output_to_logger from frigate.models import Event, Recordings, ReviewSegment from frigate.types import ModelStatusTypesEnum +from frigate.util.file import get_event_thumbnail_bytes from frigate.util.image import get_image_from_recording -from frigate.util.path import get_event_thumbnail_bytes from frigate.util.process import FrigateProcess BATCH_SIZE = 16 diff --git a/frigate/util/downloader.py b/frigate/util/downloader.py index 49b05dd05..ee80b3816 100644 --- a/frigate/util/downloader.py +++ b/frigate/util/downloader.py @@ -1,7 +1,6 @@ import logging import os import threading -import time from pathlib import Path from typing import Callable, List @@ -10,40 +9,11 @@ import requests from frigate.comms.inter_process import InterProcessRequestor from frigate.const import UPDATE_MODEL_STATE from frigate.types import ModelStatusTypesEnum +from frigate.util.file import FileLock logger = logging.getLogger(__name__) -class FileLock: - def __init__(self, path): - self.path = path - self.lock_file = f"{path}.lock" - - # we have not acquired the lock yet so it should not exist - if os.path.exists(self.lock_file): - try: - os.remove(self.lock_file) - except Exception: - pass - - def acquire(self): - parent_dir = os.path.dirname(self.lock_file) - os.makedirs(parent_dir, exist_ok=True) - - while True: - try: - with open(self.lock_file, "x"): - return - except FileExistsError: - time.sleep(0.1) - - def release(self): - try: - os.remove(self.lock_file) - except FileNotFoundError: - pass - - class ModelDownloader: def __init__( self, @@ -81,15 +51,13 @@ class ModelDownloader: def _download_models(self): for file_name in self.file_names: path = os.path.join(self.download_path, file_name) - lock = FileLock(path) + lock_path = f"{path}.lock" + lock = FileLock(lock_path, cleanup_stale_on_init=True) if not os.path.exists(path): - lock.acquire() - try: + with lock: if not os.path.exists(path): self.download_func(path) - finally: - lock.release() self.requestor.send_data( UPDATE_MODEL_STATE, diff --git a/frigate/util/file.py b/frigate/util/file.py new file mode 100644 index 000000000..22be3e511 --- /dev/null +++ b/frigate/util/file.py @@ -0,0 +1,276 @@ +"""Path and file utilities.""" + +import base64 +import fcntl +import logging +import os +import time +from pathlib import Path +from typing import Optional + +import cv2 +from numpy import ndarray + +from frigate.const import CLIPS_DIR, THUMB_DIR +from frigate.models import Event + +logger = logging.getLogger(__name__) + + +def get_event_thumbnail_bytes(event: Event) -> bytes | None: + if event.thumbnail: + return base64.b64decode(event.thumbnail) + else: + try: + with open( + os.path.join(THUMB_DIR, event.camera, f"{event.id}.webp"), "rb" + ) as f: + return f.read() + except Exception: + return None + + +def get_event_snapshot(event: Event) -> ndarray: + media_name = f"{event.camera}-{event.id}" + return cv2.imread(f"{os.path.join(CLIPS_DIR, media_name)}.jpg") + + +### Deletion + + +def delete_event_images(event: Event) -> bool: + return delete_event_snapshot(event) and delete_event_thumbnail(event) + + +def delete_event_snapshot(event: Event) -> bool: + media_name = f"{event.camera}-{event.id}" + media_path = Path(f"{os.path.join(CLIPS_DIR, media_name)}.jpg") + + try: + media_path.unlink(missing_ok=True) + media_path = Path(f"{os.path.join(CLIPS_DIR, media_name)}-clean.webp") + media_path.unlink(missing_ok=True) + # also delete clean.png (legacy) for backward compatibility + media_path = Path(f"{os.path.join(CLIPS_DIR, media_name)}-clean.png") + media_path.unlink(missing_ok=True) + return True + except OSError: + return False + + +def delete_event_thumbnail(event: Event) -> bool: + if event.thumbnail: + return True + else: + Path(os.path.join(THUMB_DIR, event.camera, f"{event.id}.webp")).unlink( + missing_ok=True + ) + return True + + +### File Locking + + +class FileLock: + """ + A file-based lock for coordinating access to resources across processes. + + Uses fcntl.flock() for proper POSIX file locking on Linux. Supports timeouts, + stale lock detection, and can be used as a context manager. + + Example: + ```python + # Using as a context manager (recommended) + with FileLock("/path/to/resource.lock", timeout=60): + # Critical section + do_something() + + # Manual acquisition and release + lock = FileLock("/path/to/resource.lock") + if lock.acquire(timeout=60): + try: + do_something() + finally: + lock.release() + ``` + + Attributes: + lock_path: Path to the lock file + timeout: Maximum time to wait for lock acquisition (seconds) + poll_interval: Time to wait between lock acquisition attempts (seconds) + stale_timeout: Time after which a lock is considered stale (seconds) + """ + + def __init__( + self, + lock_path: str | Path, + timeout: int = 300, + poll_interval: float = 1.0, + stale_timeout: int = 600, + cleanup_stale_on_init: bool = False, + ): + """ + Initialize a FileLock. + + Args: + lock_path: Path to the lock file + timeout: Maximum time to wait for lock acquisition in seconds (default: 300) + poll_interval: Time to wait between lock attempts in seconds (default: 1.0) + stale_timeout: Time after which a lock is considered stale in seconds (default: 600) + cleanup_stale_on_init: Whether to clean up stale locks on initialization (default: False) + """ + self.lock_path = Path(lock_path) + self.timeout = timeout + self.poll_interval = poll_interval + self.stale_timeout = stale_timeout + self._fd: Optional[int] = None + self._acquired = False + + if cleanup_stale_on_init: + self._cleanup_stale_lock() + + def _cleanup_stale_lock(self) -> bool: + """ + Clean up a stale lock file if it exists and is old. + + Returns: + True if lock was cleaned up, False otherwise + """ + try: + if self.lock_path.exists(): + # Check if lock file is older than stale_timeout + lock_age = time.time() - self.lock_path.stat().st_mtime + if lock_age > self.stale_timeout: + logger.warning( + f"Removing stale lock file: {self.lock_path} (age: {lock_age:.1f}s)" + ) + self.lock_path.unlink() + return True + except Exception as e: + logger.error(f"Error cleaning up stale lock: {e}") + + return False + + def is_stale(self) -> bool: + """ + Check if the lock file is stale (older than stale_timeout). + + Returns: + True if lock is stale, False otherwise + """ + try: + if self.lock_path.exists(): + lock_age = time.time() - self.lock_path.stat().st_mtime + return lock_age > self.stale_timeout + except Exception: + pass + + return False + + def acquire(self, timeout: Optional[int] = None) -> bool: + """ + Acquire the file lock using fcntl.flock(). + + Args: + timeout: Maximum time to wait for lock in seconds (uses instance timeout if None) + + Returns: + True if lock acquired, False if timeout or error + """ + if self._acquired: + logger.warning(f"Lock already acquired: {self.lock_path}") + return True + + if timeout is None: + timeout = self.timeout + + # Ensure parent directory exists + self.lock_path.parent.mkdir(parents=True, exist_ok=True) + + # Clean up stale lock before attempting to acquire + self._cleanup_stale_lock() + + try: + self._fd = os.open(self.lock_path, os.O_CREAT | os.O_RDWR) + + start_time = time.time() + while time.time() - start_time < timeout: + try: + fcntl.flock(self._fd, fcntl.LOCK_EX | fcntl.LOCK_NB) + self._acquired = True + logger.debug(f"Acquired lock: {self.lock_path}") + return True + except (OSError, IOError): + # Lock is held by another process + if time.time() - start_time >= timeout: + logger.warning(f"Timeout waiting for lock: {self.lock_path}") + os.close(self._fd) + self._fd = None + return False + + time.sleep(self.poll_interval) + + # Timeout reached + if self._fd is not None: + os.close(self._fd) + self._fd = None + return False + + except Exception as e: + logger.error(f"Error acquiring lock: {e}") + if self._fd is not None: + try: + os.close(self._fd) + except Exception: + pass + self._fd = None + return False + + def release(self) -> None: + """ + Release the file lock. + + This closes the file descriptor and removes the lock file. + """ + if not self._acquired: + return + + try: + # Close file descriptor and release fcntl lock + if self._fd is not None: + try: + fcntl.flock(self._fd, fcntl.LOCK_UN) + os.close(self._fd) + except Exception as e: + logger.warning(f"Error closing lock file descriptor: {e}") + finally: + self._fd = None + + # Remove lock file + if self.lock_path.exists(): + self.lock_path.unlink() + logger.debug(f"Released lock: {self.lock_path}") + + except FileNotFoundError: + # Lock file already removed, that's fine + pass + except Exception as e: + logger.error(f"Error releasing lock: {e}") + finally: + self._acquired = False + + def __enter__(self): + """Context manager entry - acquire the lock.""" + if not self.acquire(): + raise TimeoutError(f"Failed to acquire lock: {self.lock_path}") + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + """Context manager exit - release the lock.""" + self.release() + return False + + def __del__(self): + """Destructor - ensure lock is released.""" + if self._acquired: + self.release() diff --git a/frigate/util/path.py b/frigate/util/path.py deleted file mode 100644 index 6a62bd44c..000000000 --- a/frigate/util/path.py +++ /dev/null @@ -1,62 +0,0 @@ -"""Path utilities.""" - -import base64 -import os -from pathlib import Path - -import cv2 -from numpy import ndarray - -from frigate.const import CLIPS_DIR, THUMB_DIR -from frigate.models import Event - - -def get_event_thumbnail_bytes(event: Event) -> bytes | None: - if event.thumbnail: - return base64.b64decode(event.thumbnail) - else: - try: - with open( - os.path.join(THUMB_DIR, event.camera, f"{event.id}.webp"), "rb" - ) as f: - return f.read() - except Exception: - return None - - -def get_event_snapshot(event: Event) -> ndarray: - media_name = f"{event.camera}-{event.id}" - return cv2.imread(f"{os.path.join(CLIPS_DIR, media_name)}.jpg") - - -### Deletion - - -def delete_event_images(event: Event) -> bool: - return delete_event_snapshot(event) and delete_event_thumbnail(event) - - -def delete_event_snapshot(event: Event) -> bool: - media_name = f"{event.camera}-{event.id}" - media_path = Path(f"{os.path.join(CLIPS_DIR, media_name)}.jpg") - - try: - media_path.unlink(missing_ok=True) - media_path = Path(f"{os.path.join(CLIPS_DIR, media_name)}-clean.webp") - media_path.unlink(missing_ok=True) - # also delete clean.png (legacy) for backward compatibility - media_path = Path(f"{os.path.join(CLIPS_DIR, media_name)}-clean.png") - media_path.unlink(missing_ok=True) - return True - except OSError: - return False - - -def delete_event_thumbnail(event: Event) -> bool: - if event.thumbnail: - return True - else: - Path(os.path.join(THUMB_DIR, event.camera, f"{event.id}.webp")).unlink( - missing_ok=True - ) - return True diff --git a/frigate/util/rknn_converter.py b/frigate/util/rknn_converter.py index 48fc0139e..f9a1a86d1 100644 --- a/frigate/util/rknn_converter.py +++ b/frigate/util/rknn_converter.py @@ -1,6 +1,5 @@ """RKNN model conversion utility for Frigate.""" -import fcntl import logging import os import subprocess @@ -9,6 +8,8 @@ import time from pathlib import Path from typing import Optional +from frigate.util.file import FileLock + logger = logging.getLogger(__name__) MODEL_TYPE_CONFIGS = { @@ -245,112 +246,6 @@ def convert_onnx_to_rknn( logger.warning(f"Failed to remove temporary ONNX file: {e}") -def cleanup_stale_lock(lock_file_path: Path) -> bool: - """ - Clean up a stale lock file if it exists and is old. - - Args: - lock_file_path: Path to the lock file - - Returns: - True if lock was cleaned up, False otherwise - """ - try: - if lock_file_path.exists(): - # Check if lock file is older than 10 minutes (stale) - lock_age = time.time() - lock_file_path.stat().st_mtime - if lock_age > 600: # 10 minutes - logger.warning( - f"Removing stale lock file: {lock_file_path} (age: {lock_age:.1f}s)" - ) - lock_file_path.unlink() - return True - except Exception as e: - logger.error(f"Error cleaning up stale lock: {e}") - - return False - - -def acquire_conversion_lock(lock_file_path: Path, timeout: int = 300) -> bool: - """ - Acquire a file-based lock for model conversion. - - Args: - lock_file_path: Path to the lock file - timeout: Maximum time to wait for lock in seconds - - Returns: - True if lock acquired, False if timeout or error - """ - try: - lock_file_path.parent.mkdir(parents=True, exist_ok=True) - cleanup_stale_lock(lock_file_path) - lock_fd = os.open(lock_file_path, os.O_CREAT | os.O_RDWR) - - # Try to acquire exclusive lock - start_time = time.time() - while time.time() - start_time < timeout: - try: - fcntl.flock(lock_fd, fcntl.LOCK_EX | fcntl.LOCK_NB) - # Lock acquired successfully - logger.debug(f"Acquired conversion lock: {lock_file_path}") - return True - except (OSError, IOError): - # Lock is held by another process, wait and retry - if time.time() - start_time >= timeout: - logger.warning( - f"Timeout waiting for conversion lock: {lock_file_path}" - ) - os.close(lock_fd) - return False - - logger.debug("Waiting for conversion lock to be released...") - time.sleep(1) - - os.close(lock_fd) - return False - - except Exception as e: - logger.error(f"Error acquiring conversion lock: {e}") - return False - - -def release_conversion_lock(lock_file_path: Path) -> None: - """ - Release the conversion lock. - - Args: - lock_file_path: Path to the lock file - """ - try: - if lock_file_path.exists(): - lock_file_path.unlink() - logger.debug(f"Released conversion lock: {lock_file_path}") - except Exception as e: - logger.error(f"Error releasing conversion lock: {e}") - - -def is_lock_stale(lock_file_path: Path, max_age: int = 600) -> bool: - """ - Check if a lock file is stale (older than max_age seconds). - - Args: - lock_file_path: Path to the lock file - max_age: Maximum age in seconds before considering lock stale - - Returns: - True if lock is stale, False otherwise - """ - try: - if lock_file_path.exists(): - lock_age = time.time() - lock_file_path.stat().st_mtime - return lock_age > max_age - except Exception: - pass - - return False - - def wait_for_conversion_completion( model_type: str, rknn_path: Path, lock_file_path: Path, timeout: int = 300 ) -> bool: @@ -358,6 +253,7 @@ def wait_for_conversion_completion( Wait for another process to complete the conversion. Args: + model_type: Type of model being converted rknn_path: Path to the expected RKNN model lock_file_path: Path to the lock file to monitor timeout: Maximum time to wait in seconds @@ -366,6 +262,8 @@ def wait_for_conversion_completion( True if RKNN model appears, False if timeout """ start_time = time.time() + lock = FileLock(lock_file_path, stale_timeout=600) + while time.time() - start_time < timeout: # Check if RKNN model appeared if rknn_path.exists(): @@ -385,11 +283,14 @@ def wait_for_conversion_completion( return False # Check if lock is stale - if is_lock_stale(lock_file_path): + if lock.is_stale(): logger.warning("Lock file is stale, attempting to clean up and retry...") - cleanup_stale_lock(lock_file_path) + lock._cleanup_stale_lock() # Try to acquire lock again - if acquire_conversion_lock(lock_file_path, timeout=60): + retry_lock = FileLock( + lock_file_path, timeout=60, cleanup_stale_on_init=True + ) + if retry_lock.acquire(): try: # Check if RKNN file appeared while waiting if rknn_path.exists(): @@ -415,7 +316,7 @@ def wait_for_conversion_completion( return False finally: - release_conversion_lock(lock_file_path) + retry_lock.release() logger.debug("Waiting for RKNN model to appear...") time.sleep(1) @@ -452,8 +353,9 @@ def auto_convert_model( return str(rknn_path) lock_file_path = base_path.parent / f"{base_name}.conversion.lock" + lock = FileLock(lock_file_path, timeout=300, cleanup_stale_on_init=True) - if acquire_conversion_lock(lock_file_path): + if lock.acquire(): try: if rknn_path.exists(): logger.info( @@ -476,7 +378,7 @@ def auto_convert_model( return None finally: - release_conversion_lock(lock_file_path) + lock.release() else: logger.info( f"Another process is converting {model_path}, waiting for completion..." diff --git a/web/src/views/events/EventView.tsx b/web/src/views/events/EventView.tsx index 9b4b0bdab..082a2c0ee 100644 --- a/web/src/views/events/EventView.tsx +++ b/web/src/views/events/EventView.tsx @@ -136,7 +136,7 @@ export default function EventView({ const [selectedReviews, setSelectedReviews] = useState([]); const onSelectReview = useCallback( - (review: ReviewSegment, ctrl: boolean) => { + (review: ReviewSegment, ctrl: boolean, detail: boolean) => { if (selectedReviews.length > 0 || ctrl) { const index = selectedReviews.findIndex((r) => r.id === review.id); @@ -156,17 +156,31 @@ export default function EventView({ setSelectedReviews(copy); } } else { + // If a specific date is selected in the calendar and it's after the event start, + // use the selected date instead of the event start time + const effectiveStartTime = + timeRange.after > review.start_time + ? timeRange.after + : review.start_time; + onOpenRecording({ camera: review.camera, - startTime: review.start_time - REVIEW_PADDING, + startTime: effectiveStartTime - REVIEW_PADDING, severity: review.severity, + timelineType: detail ? "detail" : undefined, }); review.has_been_reviewed = true; markItemAsReviewed(review); } }, - [selectedReviews, setSelectedReviews, onOpenRecording, markItemAsReviewed], + [ + selectedReviews, + setSelectedReviews, + onOpenRecording, + markItemAsReviewed, + timeRange.after, + ], ); const onSelectAllReviews = useCallback(() => { if (!currentReviewItems || currentReviewItems.length == 0) { @@ -402,7 +416,6 @@ export default function EventView({ onSelectAllReviews={onSelectAllReviews} setSelectedReviews={setSelectedReviews} pullLatestData={pullLatestData} - onOpenRecording={onOpenRecording} /> )} {severity == "significant_motion" && ( @@ -442,11 +455,14 @@ type DetectionReviewProps = { loading: boolean; markItemAsReviewed: (review: ReviewSegment) => void; markAllItemsAsReviewed: (currentItems: ReviewSegment[]) => void; - onSelectReview: (review: ReviewSegment, ctrl: boolean) => void; + onSelectReview: ( + review: ReviewSegment, + ctrl: boolean, + detail: boolean, + ) => void; onSelectAllReviews: () => void; setSelectedReviews: (reviews: ReviewSegment[]) => void; pullLatestData: () => void; - onOpenRecording: (recordingInfo: RecordingStartingPoint) => void; }; function DetectionReview({ contentRef, @@ -466,7 +482,6 @@ function DetectionReview({ onSelectAllReviews, setSelectedReviews, pullLatestData, - onOpenRecording, }: DetectionReviewProps) { const { t } = useTranslation(["views/events"]); @@ -758,16 +773,7 @@ function DetectionReview({ ctrl: boolean, detail: boolean, ) => { - if (detail) { - onOpenRecording({ - camera: review.camera, - startTime: review.start_time - REVIEW_PADDING, - severity: review.severity, - timelineType: "detail", - }); - } else { - onSelectReview(review, ctrl); - } + onSelectReview(review, ctrl, detail); }} />
From 9f0b6004f20b5cde891380fed1c8198a963395ec Mon Sep 17 00:00:00 2001 From: Artem Vladimirov Date: Wed, 5 Nov 2025 17:02:54 +0500 Subject: [PATCH 17/21] fix: add pluralization for deletedModel toast message (#20803) * fix: add pluralization for deletedModel toast message * revert ru translation --------- Co-authored-by: Artem Vladimirov --- web/public/locales/en/views/classificationModel.json | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/web/public/locales/en/views/classificationModel.json b/web/public/locales/en/views/classificationModel.json index 291b2bdf3..59c8e53d4 100644 --- a/web/public/locales/en/views/classificationModel.json +++ b/web/public/locales/en/views/classificationModel.json @@ -16,7 +16,8 @@ "success": { "deletedCategory": "Deleted Class", "deletedImage": "Deleted Images", - "deletedModel": "Successfully deleted {{count}} model(s)", + "deletedModel_one": "Successfully deleted {{count}} model", + "deletedModel_other": "Successfully deleted {{count}} models", "categorizedImage": "Successfully Classified Image", "trainedModel": "Successfully trained model.", "trainingModel": "Successfully started model training." From 043bd9e6ee2e6a9a2e4ed5e144430a41308a8376 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Wed, 5 Nov 2025 07:10:56 -0700 Subject: [PATCH 18/21] Fix jetson build (#20808) * Fix jetson build * Set numpy version in model wheels * Use constraint instead * Simplify --- docker/main/build_pysqlite3.sh | 12 +++++++++--- docker/tensorrt/Dockerfile.arm64 | 5 +++-- 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/docker/main/build_pysqlite3.sh b/docker/main/build_pysqlite3.sh index c84c6fcf7..14d0cde44 100755 --- a/docker/main/build_pysqlite3.sh +++ b/docker/main/build_pysqlite3.sh @@ -5,21 +5,27 @@ set -euxo pipefail SQLITE3_VERSION="3.46.1" PYSQLITE3_VERSION="0.5.3" +# Install libsqlite3-dev if not present (needed for some base images like NVIDIA TensorRT) +if ! dpkg -l | grep -q libsqlite3-dev; then + echo "Installing libsqlite3-dev for compilation..." + apt-get update && apt-get install -y libsqlite3-dev && rm -rf /var/lib/apt/lists/* +fi + # Fetch the pre-built sqlite amalgamation instead of building from source if [[ ! -d "sqlite" ]]; then mkdir sqlite cd sqlite - + # Download the pre-built amalgamation from sqlite.org # For SQLite 3.46.1, the amalgamation version is 3460100 SQLITE_AMALGAMATION_VERSION="3460100" - + wget https://www.sqlite.org/2024/sqlite-amalgamation-${SQLITE_AMALGAMATION_VERSION}.zip -O sqlite-amalgamation.zip unzip sqlite-amalgamation.zip mv sqlite-amalgamation-${SQLITE_AMALGAMATION_VERSION}/* . rmdir sqlite-amalgamation-${SQLITE_AMALGAMATION_VERSION} rm sqlite-amalgamation.zip - + cd ../ fi diff --git a/docker/tensorrt/Dockerfile.arm64 b/docker/tensorrt/Dockerfile.arm64 index 0ae9c38e9..dd3c5de5e 100644 --- a/docker/tensorrt/Dockerfile.arm64 +++ b/docker/tensorrt/Dockerfile.arm64 @@ -112,7 +112,7 @@ RUN apt-get update \ && apt-get install -y protobuf-compiler libprotobuf-dev \ && rm -rf /var/lib/apt/lists/* RUN --mount=type=bind,source=docker/tensorrt/requirements-models-arm64.txt,target=/requirements-tensorrt-models.txt \ - pip3 wheel --wheel-dir=/trt-model-wheels -r /requirements-tensorrt-models.txt + pip3 wheel --wheel-dir=/trt-model-wheels --no-deps -r /requirements-tensorrt-models.txt FROM wget AS jetson-ffmpeg ARG DEBIAN_FRONTEND @@ -145,7 +145,8 @@ COPY --from=trt-wheels /etc/TENSORRT_VER /etc/TENSORRT_VER RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \ --mount=type=bind,from=trt-model-wheels,source=/trt-model-wheels,target=/deps/trt-model-wheels \ pip3 uninstall -y onnxruntime \ - && pip3 install -U /deps/trt-wheels/*.whl /deps/trt-model-wheels/*.whl \ + && pip3 install -U /deps/trt-wheels/*.whl \ + && pip3 install -U /deps/trt-model-wheels/*.whl \ && ldconfig WORKDIR /opt/frigate/ From 81faa8899d45dc2ff5d0bc0f7fadfa5c500c814c Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Wed, 5 Nov 2025 07:11:12 -0700 Subject: [PATCH 19/21] Classification Improvements (#20807) * Don't show model selection or back button when in multi select mode * Add dialog to edit classification models * Fix header spacing * Cleanup desktop * Incrase max number of object classifications * fix iOS mobile card * Cleanup --- .../real_time/custom_classification.py | 4 +- .../locales/en/views/classificationModel.json | 15 +- .../components/card/ClassificationCard.tsx | 26 +- .../ClassificationModelEditDialog.tsx | 477 ++++++++++++++++++ web/src/types/frigateConfig.ts | 1 + .../classification/ModelSelectionView.tsx | 24 +- .../classification/ModelTrainingView.tsx | 56 +- 7 files changed, 560 insertions(+), 43 deletions(-) create mode 100644 web/src/components/classification/ClassificationModelEditDialog.tsx diff --git a/frigate/data_processing/real_time/custom_classification.py b/frigate/data_processing/real_time/custom_classification.py index 65a62b568..e4048b6ec 100644 --- a/frigate/data_processing/real_time/custom_classification.py +++ b/frigate/data_processing/real_time/custom_classification.py @@ -466,6 +466,7 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi): now, self.labelmap[best_id], score, + max_files=200, ) if score < self.model_config.threshold: @@ -529,6 +530,7 @@ def write_classification_attempt( timestamp: float, label: str, score: float, + max_files: int = 100, ) -> None: if "-" in label: label = label.replace("-", "_") @@ -544,5 +546,5 @@ def write_classification_attempt( ) # delete oldest face image if maximum is reached - if len(files) > 100: + if len(files) > max_files: os.unlink(os.path.join(folder, files[-1])) diff --git a/web/public/locales/en/views/classificationModel.json b/web/public/locales/en/views/classificationModel.json index 59c8e53d4..ebc819e4e 100644 --- a/web/public/locales/en/views/classificationModel.json +++ b/web/public/locales/en/views/classificationModel.json @@ -10,7 +10,8 @@ "deleteImages": "Delete Images", "trainModel": "Train Model", "addClassification": "Add Classification", - "deleteModels": "Delete Models" + "deleteModels": "Delete Models", + "editModel": "Edit Model" }, "toast": { "success": { @@ -20,14 +21,16 @@ "deletedModel_other": "Successfully deleted {{count}} models", "categorizedImage": "Successfully Classified Image", "trainedModel": "Successfully trained model.", - "trainingModel": "Successfully started model training." + "trainingModel": "Successfully started model training.", + "updatedModel": "Successfully updated model configuration" }, "error": { "deleteImageFailed": "Failed to delete: {{errorMessage}}", "deleteCategoryFailed": "Failed to delete class: {{errorMessage}}", "deleteModelFailed": "Failed to delete model: {{errorMessage}}", "categorizeFailed": "Failed to categorize image: {{errorMessage}}", - "trainingFailed": "Failed to start model training: {{errorMessage}}" + "trainingFailed": "Failed to start model training: {{errorMessage}}", + "updateModelFailed": "Failed to update model: {{errorMessage}}" } }, "deleteCategory": { @@ -39,6 +42,12 @@ "single": "Are you sure you want to delete {{name}}? This will permanently delete all associated data including images and training data. This action cannot be undone.", "desc": "Are you sure you want to delete {{count}} model(s)? This will permanently delete all associated data including images and training data. This action cannot be undone." }, + "edit": { + "title": "Edit Classification Model", + "descriptionState": "Edit the classes for this state classification model. Changes will require retraining the model.", + "descriptionObject": "Edit the object type and classification type for this object classification model.", + "stateClassesInfo": "Note: Changing state classes requires retraining the model with the updated classes." + }, "deleteDatasetImages": { "title": "Delete Dataset Images", "desc": "Are you sure you want to delete {{count}} images from {{dataset}}? This action cannot be undone and will require re-training the model." diff --git a/web/src/components/card/ClassificationCard.tsx b/web/src/components/card/ClassificationCard.tsx index 73be455bc..bde452770 100644 --- a/web/src/components/card/ClassificationCard.tsx +++ b/web/src/components/card/ClassificationCard.tsx @@ -7,7 +7,7 @@ import { } from "@/types/classification"; import { Event } from "@/types/event"; import { forwardRef, useMemo, useRef, useState } from "react"; -import { isDesktop, isMobile } from "react-device-detect"; +import { isDesktop, isMobile, isMobileOnly } from "react-device-detect"; import { useTranslation } from "react-i18next"; import TimeAgo from "../dynamic/TimeAgo"; import { Tooltip, TooltipContent, TooltipTrigger } from "../ui/tooltip"; @@ -264,8 +264,8 @@ export function GroupedClassificationCard({ const Overlay = isDesktop ? Dialog : MobilePage; const Trigger = isDesktop ? DialogTrigger : MobilePageTrigger; - const Header = isDesktop ? DialogHeader : MobilePageHeader; const Content = isDesktop ? DialogContent : MobilePageContent; + const Header = isDesktop ? DialogHeader : MobilePageHeader; const ContentTitle = isDesktop ? DialogTitle : MobilePageTitle; const ContentDescription = isDesktop ? DialogDescription @@ -298,9 +298,9 @@ export function GroupedClassificationCard({ e.preventDefault()} > @@ -308,16 +308,16 @@ export function GroupedClassificationCard({
-
- +
+ {event?.sub_label && event.sub_label !== "none" ? event.sub_label : t(noClassificationLabel)} @@ -390,7 +390,7 @@ export function GroupedClassificationCard({ className={cn( "grid w-full auto-rows-min grid-cols-2 gap-2 sm:grid-cols-3 md:grid-cols-4 lg:grid-cols-6 xl:grid-cols-6 2xl:grid-cols-8", isDesktop && "p-2", - isMobile && "scrollbar-container flex-1 overflow-y-auto", + isMobile && "px-4 pb-4", )} > {group.map((data: ClassificationItemData) => ( diff --git a/web/src/components/classification/ClassificationModelEditDialog.tsx b/web/src/components/classification/ClassificationModelEditDialog.tsx new file mode 100644 index 000000000..ff80a1a29 --- /dev/null +++ b/web/src/components/classification/ClassificationModelEditDialog.tsx @@ -0,0 +1,477 @@ +import { Button } from "@/components/ui/button"; +import { + Dialog, + DialogContent, + DialogDescription, + DialogHeader, + DialogTitle, +} from "@/components/ui/dialog"; +import { + Form, + FormControl, + FormField, + FormItem, + FormLabel, + FormMessage, +} from "@/components/ui/form"; +import { Input } from "@/components/ui/input"; +import { Label } from "@/components/ui/label"; +import { RadioGroup, RadioGroupItem } from "@/components/ui/radio-group"; +import { + Select, + SelectContent, + SelectItem, + SelectTrigger, + SelectValue, +} from "@/components/ui/select"; +import { + CustomClassificationModelConfig, + FrigateConfig, +} from "@/types/frigateConfig"; +import { getTranslatedLabel } from "@/utils/i18n"; +import { zodResolver } from "@hookform/resolvers/zod"; +import axios from "axios"; +import { useCallback, useEffect, useMemo, useState } from "react"; +import { useForm } from "react-hook-form"; +import { useTranslation } from "react-i18next"; +import { LuPlus, LuX } from "react-icons/lu"; +import { toast } from "sonner"; +import useSWR from "swr"; +import { z } from "zod"; + +type ClassificationModelEditDialogProps = { + open: boolean; + model: CustomClassificationModelConfig; + onClose: () => void; + onSuccess: () => void; +}; + +type ObjectClassificationType = "sub_label" | "attribute"; + +type ObjectFormData = { + objectLabel: string; + objectType: ObjectClassificationType; +}; + +type StateFormData = { + classes: string[]; +}; + +export default function ClassificationModelEditDialog({ + open, + model, + onClose, + onSuccess, +}: ClassificationModelEditDialogProps) { + const { t } = useTranslation(["views/classificationModel"]); + const { data: config } = useSWR("config"); + const [isSaving, setIsSaving] = useState(false); + + const isStateModel = model.state_config !== undefined; + const isObjectModel = model.object_config !== undefined; + + const objectLabels = useMemo(() => { + if (!config) return []; + + const labels = new Set(); + + Object.values(config.cameras).forEach((cameraConfig) => { + if (!cameraConfig.enabled || !cameraConfig.enabled_in_config) { + return; + } + + cameraConfig.objects.track.forEach((label) => { + if (!config.model.all_attributes.includes(label)) { + labels.add(label); + } + }); + }); + + return [...labels].sort(); + }, [config]); + + // Define form schema based on model type + const formSchema = useMemo(() => { + if (isObjectModel) { + return z.object({ + objectLabel: z + .string() + .min(1, t("wizard.step1.errors.objectLabelRequired")), + objectType: z.enum(["sub_label", "attribute"]), + }); + } else { + // State model + return z.object({ + classes: z + .array(z.string()) + .min(1, t("wizard.step1.errors.classRequired")) + .refine( + (classes) => { + const nonEmpty = classes.filter((c) => c.trim().length > 0); + return nonEmpty.length >= 2; + }, + { message: t("wizard.step1.errors.stateRequiresTwoClasses") }, + ) + .refine( + (classes) => { + const nonEmpty = classes.filter((c) => c.trim().length > 0); + const unique = new Set(nonEmpty.map((c) => c.toLowerCase())); + return unique.size === nonEmpty.length; + }, + { message: t("wizard.step1.errors.classesUnique") }, + ), + }); + } + }, [isObjectModel, t]); + + const form = useForm({ + resolver: zodResolver(formSchema), + defaultValues: isObjectModel + ? ({ + objectLabel: model.object_config?.objects?.[0] || "", + objectType: + (model.object_config + ?.classification_type as ObjectClassificationType) || "sub_label", + } as ObjectFormData) + : ({ + classes: [""], // Will be populated from dataset + } as StateFormData), + mode: "onChange", + }); + + // Fetch dataset to get current classes for state models + const { data: dataset } = useSWR<{ + [id: string]: string[]; + }>(isStateModel ? `classification/${model.name}/dataset` : null, { + revalidateOnFocus: false, + }); + + // Update form with classes from dataset when loaded + useEffect(() => { + if (isStateModel && dataset) { + const classes = Object.keys(dataset).filter((key) => key !== "none"); + if (classes.length > 0) { + (form as ReturnType>).setValue( + "classes", + classes, + ); + } + } + }, [dataset, isStateModel, form]); + + const watchedClasses = isStateModel + ? (form as ReturnType>).watch("classes") + : undefined; + const watchedObjectType = isObjectModel + ? (form as ReturnType>).watch("objectType") + : undefined; + + const handleAddClass = useCallback(() => { + const currentClasses = ( + form as ReturnType> + ).getValues("classes"); + (form as ReturnType>).setValue( + "classes", + [...currentClasses, ""], + { + shouldValidate: true, + }, + ); + }, [form]); + + const handleRemoveClass = useCallback( + (index: number) => { + const currentClasses = ( + form as ReturnType> + ).getValues("classes"); + const newClasses = currentClasses.filter((_, i) => i !== index); + + // Ensure at least one field remains (even if empty) + if (newClasses.length === 0) { + (form as ReturnType>).setValue( + "classes", + [""], + { shouldValidate: true }, + ); + } else { + (form as ReturnType>).setValue( + "classes", + newClasses, + { shouldValidate: true }, + ); + } + }, + [form], + ); + + const onSubmit = useCallback( + async (data: ObjectFormData | StateFormData) => { + setIsSaving(true); + try { + if (isObjectModel) { + const objectData = data as ObjectFormData; + + // Update the config + await axios.put("/config/set", { + requires_restart: 0, + update_topic: `config/classification/custom/${model.name}`, + config_data: { + classification: { + custom: { + [model.name]: { + enabled: model.enabled, + name: model.name, + threshold: model.threshold, + object_config: { + objects: [objectData.objectLabel], + classification_type: objectData.objectType, + }, + }, + }, + }, + }, + }); + + toast.success(t("toast.success.updatedModel"), { + position: "top-center", + }); + } else { + // State model - update classes + // Note: For state models, updating classes requires renaming categories + // which is handled through the dataset API, not the config API + // We'll need to implement this by calling the rename endpoint for each class + // For now, we just show a message that this requires retraining + + toast.info(t("edit.stateClassesInfo"), { + position: "top-center", + }); + } + + onSuccess(); + onClose(); + } catch (err) { + const error = err as { + response?: { data?: { message?: string; detail?: string } }; + }; + const errorMessage = + error.response?.data?.message || + error.response?.data?.detail || + "Unknown error"; + toast.error(t("toast.error.updateModelFailed", { errorMessage }), { + position: "top-center", + }); + } finally { + setIsSaving(false); + } + }, + [isObjectModel, model, t, onSuccess, onClose], + ); + + const handleCancel = useCallback(() => { + form.reset(); + onClose(); + }, [form, onClose]); + + return ( + !open && handleCancel()}> + + + {t("edit.title")} + + {isStateModel + ? t("edit.descriptionState") + : t("edit.descriptionObject")} + + + +
+
+ + {isObjectModel && ( + <> + ( + + + {t("wizard.step1.objectLabel")} + + + + + )} + /> + + ( + + + {t("wizard.step1.classificationType")} + + + +
+ + +
+
+ + +
+
+
+ +
+ )} + /> + + )} + + {isStateModel && ( +
+
+ + {t("wizard.step1.states")} + + +
+
+ {watchedClasses?.map((_: string, index: number) => ( + >) + .control + } + name={`classes.${index}` as const} + render={({ field }) => ( + + +
+ + {watchedClasses && + watchedClasses.length > 1 && ( + + )} +
+
+
+ )} + /> + ))} +
+ {isStateModel && + "classes" in form.formState.errors && + form.formState.errors.classes && ( +

+ {form.formState.errors.classes.message} +

+ )} +
+ )} + +
+ + +
+ + +
+
+
+ ); +} diff --git a/web/src/types/frigateConfig.ts b/web/src/types/frigateConfig.ts index ffe4cc14d..f10563379 100644 --- a/web/src/types/frigateConfig.ts +++ b/web/src/types/frigateConfig.ts @@ -306,6 +306,7 @@ export type CustomClassificationModelConfig = { threshold: number; object_config?: { objects: string[]; + classification_type: string; }; state_config?: { cameras: { diff --git a/web/src/views/classification/ModelSelectionView.tsx b/web/src/views/classification/ModelSelectionView.tsx index b1b462497..c5e65e0e5 100644 --- a/web/src/views/classification/ModelSelectionView.tsx +++ b/web/src/views/classification/ModelSelectionView.tsx @@ -1,5 +1,6 @@ import { baseUrl } from "@/api/baseUrl"; import ClassificationModelWizardDialog from "@/components/classification/ClassificationModelWizardDialog"; +import ClassificationModelEditDialog from "@/components/classification/ClassificationModelEditDialog"; import ActivityIndicator from "@/components/indicators/activity-indicator"; import { ImageShadowOverlay } from "@/components/overlay/ImageShadowOverlay"; import { Button, buttonVariants } from "@/components/ui/button"; @@ -14,7 +15,7 @@ import { useCallback, useEffect, useMemo, useState } from "react"; import { useTranslation } from "react-i18next"; import { FaFolderPlus } from "react-icons/fa"; import { MdModelTraining } from "react-icons/md"; -import { LuTrash2 } from "react-icons/lu"; +import { LuPencil, LuTrash2 } from "react-icons/lu"; import { FiMoreVertical } from "react-icons/fi"; import useSWR from "swr"; import Heading from "@/components/ui/heading"; @@ -163,6 +164,7 @@ export default function ModelSelectionView({ key={config.name} config={config} onClick={() => onClick(config)} + onUpdate={() => refreshConfig()} onDelete={() => refreshConfig()} /> ))} @@ -201,9 +203,10 @@ function NoModelsView({ type ModelCardProps = { config: CustomClassificationModelConfig; onClick: () => void; + onUpdate: () => void; onDelete: () => void; }; -function ModelCard({ config, onClick, onDelete }: ModelCardProps) { +function ModelCard({ config, onClick, onUpdate, onDelete }: ModelCardProps) { const { t } = useTranslation(["views/classificationModel"]); const { data: dataset } = useSWR<{ @@ -211,6 +214,7 @@ function ModelCard({ config, onClick, onDelete }: ModelCardProps) { }>(`classification/${config.name}/dataset`, { revalidateOnFocus: false }); const [deleteDialogOpen, setDeleteDialogOpen] = useState(false); + const [editDialogOpen, setEditDialogOpen] = useState(false); const handleDelete = useCallback(async () => { try { @@ -250,6 +254,11 @@ function ModelCard({ config, onClick, onDelete }: ModelCardProps) { setDeleteDialogOpen(true); }, []); + const handleEditClick = useCallback((e: React.MouseEvent) => { + e.stopPropagation(); + setEditDialogOpen(true); + }, []); + const coverImage = useMemo(() => { if (!dataset) { return undefined; @@ -270,6 +279,13 @@ function ModelCard({ config, onClick, onDelete }: ModelCardProps) { return ( <> + setEditDialogOpen(false)} + onSuccess={() => onUpdate()} + /> + setDeleteDialogOpen(!deleteDialogOpen)} @@ -320,6 +336,10 @@ function ModelCard({ config, onClick, onDelete }: ModelCardProps) { align="end" onClick={(e) => e.stopPropagation()} > + + + {t("button.edit", { ns: "common" })} + {t("button.delete", { ns: "common" })} diff --git a/web/src/views/classification/ModelTrainingView.tsx b/web/src/views/classification/ModelTrainingView.tsx index 92a4adcdf..a27a06a9e 100644 --- a/web/src/views/classification/ModelTrainingView.tsx +++ b/web/src/views/classification/ModelTrainingView.tsx @@ -327,31 +327,39 @@ export default function ModelTrainingView({ model }: ModelTrainingViewProps) {
-
- - {}} - /> -
+ {(isDesktop || !selectedImages?.length) && ( +
+ + + {}} + /> +
+ )} {selectedImages?.length > 0 ? ( -
-
+
+
{`${selectedImages.length} selected`}
{"|"}
Date: Wed, 5 Nov 2025 08:49:31 -0600 Subject: [PATCH 20/21] UI tweaks (#20811) * camera wizard input mobile font zooming * ensure the selected page is visible when navigating via url on mobile * Filter detail stream to only show items from within the review item * remove incorrect classes causing extra scroll in detail stream * change button label * fix mobile menu button highlight issue --------- Co-authored-by: Nicolas Mowen --- .../settings/wizard/Step1NameCamera.tsx | 10 ++++----- web/src/components/timeline/DetailStream.tsx | 22 ++++++++++++++++++- web/src/pages/Settings.tsx | 16 ++++++++++---- web/src/views/recording/RecordingView.tsx | 1 - web/src/views/settings/CameraSettingsView.tsx | 4 ++-- 5 files changed, 40 insertions(+), 13 deletions(-) diff --git a/web/src/components/settings/wizard/Step1NameCamera.tsx b/web/src/components/settings/wizard/Step1NameCamera.tsx index 6eeb2f91c..8895f82f6 100644 --- a/web/src/components/settings/wizard/Step1NameCamera.tsx +++ b/web/src/components/settings/wizard/Step1NameCamera.tsx @@ -385,7 +385,7 @@ export default function Step1NameCamera({ @@ -495,7 +495,7 @@ export default function Step1NameCamera({
diff --git a/web/src/components/timeline/DetailStream.tsx b/web/src/components/timeline/DetailStream.tsx index 706c6c5b4..4b152aadb 100644 --- a/web/src/components/timeline/DetailStream.tsx +++ b/web/src/components/timeline/DetailStream.tsx @@ -458,6 +458,7 @@ function ReviewGroup({ void; @@ -499,6 +501,7 @@ type EventListProps = { }; function EventList({ event, + review, effectiveTime, annotationOffset, onSeek, @@ -617,6 +620,7 @@ function EventList({
void; effectiveTime?: number; @@ -780,13 +786,27 @@ function ObjectTimeline({ endTime?: number; }) { const { t } = useTranslation("views/events"); - const { data: timeline, isValidating } = useSWR([ + const { data: fullTimeline, isValidating } = useSWR< + TrackingDetailsSequence[] + >([ "timeline", { source_id: eventId, }, ]); + const timeline = useMemo(() => { + if (!fullTimeline) { + return fullTimeline; + } + + return fullTimeline.filter( + (t) => + t.timestamp >= review.start_time && + (review.end_time == undefined || t.timestamp <= review.end_time), + ); + }, [fullTimeline, review]); + if (isValidating && (!timeline || timeline.length === 0)) { return ; } diff --git a/web/src/pages/Settings.tsx b/web/src/pages/Settings.tsx index 844329fc7..19fd63f5c 100644 --- a/web/src/pages/Settings.tsx +++ b/web/src/pages/Settings.tsx @@ -157,9 +157,11 @@ function MobileMenuItem({ const { t } = useTranslation(["views/settings"]); return ( - +
); } @@ -273,6 +275,9 @@ export default function Settings() { } else { setPageToggle(page as SettingsType); } + if (isMobile) { + setContentMobileOpen(true); + } } // don't clear url params if we're creating a new object mask return !(searchParams.has("object_mask") || searchParams.has("event_id")); @@ -282,6 +287,9 @@ export default function Settings() { const cameraNames = cameras.map((c) => c.name); if (cameraNames.includes(camera)) { setSelectedCamera(camera); + if (isMobile) { + setContentMobileOpen(true); + } } // don't clear url params if we're creating a new object mask or trigger return !(searchParams.has("object_mask") || searchParams.has("event_id")); diff --git a/web/src/views/recording/RecordingView.tsx b/web/src/views/recording/RecordingView.tsx index 00e46411e..149237b63 100644 --- a/web/src/views/recording/RecordingView.tsx +++ b/web/src/views/recording/RecordingView.tsx @@ -970,7 +970,6 @@ function Timeline({ "relative overflow-hidden", isDesktop ? cn( - "no-scrollbar overflow-y-auto", timelineType == "timeline" ? "w-[100px] flex-shrink-0" : timelineType == "detail" diff --git a/web/src/views/settings/CameraSettingsView.tsx b/web/src/views/settings/CameraSettingsView.tsx index f42ec84fe..dae7c3365 100644 --- a/web/src/views/settings/CameraSettingsView.tsx +++ b/web/src/views/settings/CameraSettingsView.tsx @@ -717,11 +717,11 @@ export default function CameraSettingsView({