From fad62b996ad9b66420128265f65ce1d035c25db0 Mon Sep 17 00:00:00 2001 From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> Date: Mon, 17 Mar 2025 13:44:57 -0500 Subject: [PATCH 01/97] Add Frigate+ pane to Settings UI (#17208) * add plus data to config api response * add fields to frontend type * add frigate+ page in settings * add docs * fix label in explore detail dialog --- docs/docs/plus/faq.md | 10 + frigate/api/app.py | 13 + web/public/locales/en/views/settings.json | 37 ++- .../overlay/detail/SearchDetailDialog.tsx | 2 +- web/src/pages/Settings.tsx | 3 + web/src/types/frigateConfig.ts | 6 + .../settings/FrigatePlusSettingsView.tsx | 229 ++++++++++++++++++ 7 files changed, 297 insertions(+), 3 deletions(-) create mode 100644 web/src/views/settings/FrigatePlusSettingsView.tsx diff --git a/docs/docs/plus/faq.md b/docs/docs/plus/faq.md index fb0cd2512..151eb3f60 100644 --- a/docs/docs/plus/faq.md +++ b/docs/docs/plus/faq.md @@ -22,3 +22,13 @@ Yes. Models and metadata are stored in the `model_cache` directory within the co ### Can I keep using my Frigate+ models even if I do not renew my subscription? Yes. Subscriptions to Frigate+ provide access to the infrastructure used to train the models. Models trained with your subscription are yours to keep and use forever. However, do note that the terms and conditions prohibit you from sharing, reselling, or creating derivative products from the models. + +### Why can't I submit images to Frigate+? + +If you've configured your API key and the Frigate+ Settings page in the UI shows that the key is active, you need to ensure that you've enabled both snapshots and `clean_copy` snapshots for the cameras you'd like to submit images for. Note that `clean_copy` is enabled by default when snapshots are enabled. + +```yaml +snapshots: + enabled: true + clean_copy: true +``` diff --git a/frigate/api/app.py b/frigate/api/app.py index 05013ed12..9d7b3768f 100644 --- a/frigate/api/app.py +++ b/frigate/api/app.py @@ -9,6 +9,7 @@ import traceback from datetime import datetime, timedelta from functools import reduce from io import StringIO +from pathlib import Path as FilePath from typing import Any, Optional import aiofiles @@ -174,6 +175,18 @@ def config(request: Request): config["model"]["all_attributes"] = config_obj.model.all_attributes config["model"]["non_logo_attributes"] = config_obj.model.non_logo_attributes + # Add model plus data if plus is enabled + if config["plus"]["enabled"]: + model_json_path = FilePath(config["model"]["path"]).with_suffix(".json") + try: + with open(model_json_path, "r") as f: + model_plus_data = json.load(f) + config["model"]["plus"] = model_plus_data + except FileNotFoundError: + config["model"]["plus"] = None + except json.JSONDecodeError: + config["model"]["plus"] = None + # use merged labelamp for detector_config in config["detectors"].values(): detector_config["model"]["labelmap"] = ( diff --git a/web/public/locales/en/views/settings.json b/web/public/locales/en/views/settings.json index f19ac5ee6..3d25b92c1 100644 --- a/web/public/locales/en/views/settings.json +++ b/web/public/locales/en/views/settings.json @@ -7,7 +7,8 @@ "masksAndZones": "Mask and Zone Editor - Frigate", "motionTuner": "Motion Tuner - Frigate", "object": "Object Settings - Frigate", - "general": "General Settings - Frigate" + "general": "General Settings - Frigate", + "frigatePlus": "Frigate+ Settings - Frigate" }, "menu": { "uiSettings": "UI Settings", @@ -17,7 +18,8 @@ "motionTuner": "Motion Tuner", "debug": "Debug", "users": "Users", - "notifications": "Notifications" + "notifications": "Notifications", + "frigateplus": "Frigate+" }, "dialog": { "unsavedChanges": { @@ -515,5 +517,36 @@ "registerFailed": "Failed to save notification registration." } } + }, + "frigatePlus": { + "title": "Frigate+ Settings", + "apiKey": { + "title": "Frigate+ API Key", + "validated": "Frigate+ API key is detected and validated", + "notValidated": "Frigate+ API key is not detected or not validated", + "desc": "The Frigate+ API key enables integration with the Frigate+ service.", + "plusLink": "Read more about Frigate+" + }, + "snapshotConfig": { + "title": "Snapshot Configuration", + "desc": "Submitting to Frigate+ requires both snapshots and clean_copy snapshots to be enabled in your config.", + "documentation": "Read the documentation", + "cleanCopyWarning": "Some cameras have snapshots enabled but have the clean copy disabled. You need to enable clean_copy in your snapshot config to be able to submit images from these cameras to Frigate+.", + "table": { + "camera": "Camera", + "snapshots": "Snapshots", + "cleanCopySnapshots": "clean_copy Snapshots" + } + }, + "modelInfo": { + "title": "Model Information", + "modelType": "Model Type", + "trainDate": "Train Date", + "baseModel": "Base Model", + "supportedDetectors": "Supported Detectors", + "cameras": "Cameras", + "loading": "Loading model information...", + "error": "Failed to load model information" + } } } diff --git a/web/src/components/overlay/detail/SearchDetailDialog.tsx b/web/src/components/overlay/detail/SearchDetailDialog.tsx index d74efdf6d..891ce88b1 100644 --- a/web/src/components/overlay/detail/SearchDetailDialog.tsx +++ b/web/src/components/overlay/detail/SearchDetailDialog.tsx @@ -563,7 +563,7 @@ function ObjectDetailsTab({
{t("details.label")}
{getIconForLabel(search.label, "size-4 text-primary")} - {t("{search.label}", { ns: "objects" })} + {t(search.label, { ns: "objects" })} {search.sub_label && ` (${search.sub_label})`} diff --git a/web/src/pages/Settings.tsx b/web/src/pages/Settings.tsx index 353d0dbf8..6ccda34f3 100644 --- a/web/src/pages/Settings.tsx +++ b/web/src/pages/Settings.tsx @@ -37,6 +37,7 @@ import AuthenticationView from "@/views/settings/AuthenticationView"; import NotificationView from "@/views/settings/NotificationsSettingsView"; import ClassificationSettingsView from "@/views/settings/ClassificationSettingsView"; import UiSettingsView from "@/views/settings/UiSettingsView"; +import FrigatePlusSettingsView from "@/views/settings/FrigatePlusSettingsView"; import { useSearchEffect } from "@/hooks/use-overlay-state"; import { useSearchParams } from "react-router-dom"; import { useInitialCameraState } from "@/api/ws"; @@ -54,6 +55,7 @@ const allSettingsViews = [ "debug", "users", "notifications", + "frigateplus", ] as const; type SettingsType = (typeof allSettingsViews)[number]; @@ -279,6 +281,7 @@ export default function Settings() { {page == "notifications" && ( )} + {page == "frigateplus" && }
{confirmationDialogOpen && ( | null; diff --git a/web/src/views/settings/FrigatePlusSettingsView.tsx b/web/src/views/settings/FrigatePlusSettingsView.tsx new file mode 100644 index 000000000..965843f09 --- /dev/null +++ b/web/src/views/settings/FrigatePlusSettingsView.tsx @@ -0,0 +1,229 @@ +import Heading from "@/components/ui/heading"; +import { Label } from "@/components/ui/label"; +import { useEffect } from "react"; +import { Toaster } from "sonner"; +import { Separator } from "../../components/ui/separator"; +import useSWR from "swr"; +import { FrigateConfig } from "@/types/frigateConfig"; +import { CheckCircle2, XCircle } from "lucide-react"; +import { Trans, useTranslation } from "react-i18next"; +import { IoIosWarning } from "react-icons/io"; +import { Link } from "react-router-dom"; +import { LuExternalLink } from "react-icons/lu"; + +export default function FrigatePlusSettingsView() { + const { data: config } = useSWR("config"); + const { t } = useTranslation("views/settings"); + + useEffect(() => { + document.title = t("documentTitle.frigatePlus"); + }, [t]); + + const needCleanSnapshots = () => { + if (!config) { + return false; + } + return Object.values(config.cameras).some( + (camera) => camera.snapshots.enabled && !camera.snapshots.clean_copy, + ); + }; + + return ( + <> +
+ +
+ + {t("frigatePlus.title")} + + + + + + {t("frigatePlus.apiKey.title")} + + +
+
+
+ {config?.plus?.enabled ? ( + + ) : ( + + )} + +
+
+

{t("frigatePlus.apiKey.desc")}

+ {!config?.model.plus && ( + <> +
+ + {t("frigatePlus.apiKey.plusLink")} + + +
+ + )} +
+
+ + {config?.model.plus && ( + <> + +
+ + {t("frigatePlus.modelInfo.title")} + +
+ {!config?.model?.plus && ( +

+ {t("frigatePlus.modelInfo.loading")} +

+ )} + {config?.model?.plus === null && ( +

+ {t("frigatePlus.modelInfo.error")} +

+ )} + {config?.model?.plus && ( +
+
+ +

{config.model.plus.name}

+
+
+ +

+ {new Date( + config.model.plus.trainDate, + ).toLocaleString()} +

+
+
+ +

{config.model.plus.baseModel}

+
+
+ +

+ {config.model.plus.supportedDetectors.join(", ")} +

+
+
+ )} +
+
+ + )} + + + +
+ + {t("frigatePlus.snapshotConfig.title")} + +
+
+

+ + frigatePlus.snapshotConfig.desc + +

+
+ + {t("frigatePlus.snapshotConfig.documentation")} + + +
+
+ {config && ( +
+ + + + + + + + + + {Object.entries(config.cameras).map( + ([name, camera]) => ( + + + + + + ), + )} + +
+ {t("frigatePlus.snapshotConfig.table.camera")} + + {t("frigatePlus.snapshotConfig.table.snapshots")} + + + frigatePlus.snapshotConfig.table.cleanCopySnapshots + +
{name} + {camera.snapshots.enabled ? ( + + ) : ( + + )} + + {camera.snapshots?.enabled && + camera.snapshots?.clean_copy ? ( + + ) : ( + + )} +
+
+ )} + {needCleanSnapshots() && ( +
+
+ +
+ + frigatePlus.snapshotConfig.cleanCopyWarning + +
+
+
+ )} +
+
+
+
+
+ + ); +} From ff8e145b906d742d43c66c2f18d98704968e4698 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Mon, 17 Mar 2025 13:50:13 -0600 Subject: [PATCH 02/97] Face setup wizard (#17203) * Fix login page * Increase face image size and add time ago * Add component for indicating steps in a wizard * Split out form inputs from dialog * Add wizard for adding new face to library * Simplify dialog * Translations * Fix scaling bug * Fix key missing * Improve multi select * Adjust wording and spacing * Add tip for face training * Fix padding * Remove text for buttons on mobile --- frigate/data_processing/real_time/face.py | 10 +- web/public/locales/en/common.json | 4 +- web/public/locales/en/views/faceLibrary.json | 9 +- .../components/indicators/StepIndicator.tsx | 28 +++ web/src/components/input/ImageEntry.tsx | 58 ++++++ web/src/components/input/TextEntry.tsx | 68 +++++++ .../overlay/detail/FaceCreateWizardDialog.tsx | 168 ++++++++++++++++++ .../overlay/dialog/TextEntryDialog.tsx | 79 ++------ .../overlay/dialog/UploadImageDialog.tsx | 63 ++----- web/src/pages/FaceLibrary.tsx | 160 +++++++---------- web/src/pages/LoginPage.tsx | 18 +- 11 files changed, 442 insertions(+), 223 deletions(-) create mode 100644 web/src/components/indicators/StepIndicator.tsx create mode 100644 web/src/components/input/ImageEntry.tsx create mode 100644 web/src/components/input/TextEntry.tsx create mode 100644 web/src/components/overlay/detail/FaceCreateWizardDialog.tsx diff --git a/frigate/data_processing/real_time/face.py b/frigate/data_processing/real_time/face.py index e70801812..b51b7a20f 100644 --- a/frigate/data_processing/real_time/face.py +++ b/frigate/data_processing/real_time/face.py @@ -227,6 +227,8 @@ class FaceRealTimeProcessor(RealTimeProcessorApi): scale_factor = MAX_DETECTION_HEIGHT / input.shape[0] new_width = int(scale_factor * input.shape[1]) input = cv2.resize(input, (new_width, MAX_DETECTION_HEIGHT)) + else: + scale_factor = 1 self.face_detector.setInputSize((input.shape[1], input.shape[0])) faces = self.face_detector.detect(input) @@ -241,10 +243,10 @@ class FaceRealTimeProcessor(RealTimeProcessorApi): continue raw_bbox = potential_face[0:4].astype(np.uint16) - x: int = max(raw_bbox[0], 0) - y: int = max(raw_bbox[1], 0) - w: int = raw_bbox[2] - h: int = raw_bbox[3] + x: int = int(max(raw_bbox[0], 0) / scale_factor) + y: int = int(max(raw_bbox[1], 0) / scale_factor) + w: int = int(raw_bbox[2] / scale_factor) + h: int = int(raw_bbox[3] / scale_factor) bbox = (x, y, x + w, y + h) if face is None or area(bbox) > area(face): diff --git a/web/public/locales/en/common.json b/web/public/locales/en/common.json index 4ddd9244e..14b88f707 100644 --- a/web/public/locales/en/common.json +++ b/web/public/locales/en/common.json @@ -64,6 +64,7 @@ "button": { "apply": "Apply", "reset": "Reset", + "done": "Done", "enabled": "Enabled", "enable": "Enable", "disabled": "Disabled", @@ -94,7 +95,8 @@ "play": "Play", "unselect": "Unselect", "export": "Export", - "deleteNow": "Delete Now" + "deleteNow": "Delete Now", + "next": "Next" }, "menu": { "system": "System", diff --git a/web/public/locales/en/views/faceLibrary.json b/web/public/locales/en/views/faceLibrary.json index b95f744d7..4028690e3 100644 --- a/web/public/locales/en/views/faceLibrary.json +++ b/web/public/locales/en/views/faceLibrary.json @@ -1,4 +1,7 @@ { + "description": { + "addFace": "Walk through adding a new face to the Face Library." + }, "documentTitle": "Face Library - Frigate", "uploadFaceImage": { "title": "Upload Face Image", @@ -6,7 +9,8 @@ }, "createFaceLibrary": { "title": "Create Face Library", - "desc": "Create a new face library" + "desc": "Create a new face library", + "nextSteps": "It is recommended to use the Train tab to select and train images for each person as they are detected. When building a strong foundation it is strongly recommended to only train on images that are straight-on. Ignore images from cameras that recognize faces from an angle." }, "train": { "title": "Train", @@ -19,12 +23,13 @@ "uploadImage": "Upload Image", "reprocessFace": "Reprocess Face" }, + "readTheDocs": "Read the documentation to view more details on refining images for the Face Library", "trainFaceAs": "Train Face as:", "trainFaceAsPerson": "Train Face as Person", "toast": { "success": { "uploadedImage": "Successfully uploaded image.", - "addFaceLibrary": "Successfully add face library.", + "addFaceLibrary": "{{name}} has successfully been added to the Face Library!", "deletedFace": "Successfully deleted face.", "trainedFace": "Successfully trained face.", "updatedFaceScore": "Successfully updated face score." diff --git a/web/src/components/indicators/StepIndicator.tsx b/web/src/components/indicators/StepIndicator.tsx new file mode 100644 index 000000000..641ae32ca --- /dev/null +++ b/web/src/components/indicators/StepIndicator.tsx @@ -0,0 +1,28 @@ +import { cn } from "@/lib/utils"; + +type StepIndicatorProps = { + steps: string[]; + currentStep: number; +}; +export default function StepIndicator({ + steps, + currentStep, +}: StepIndicatorProps) { + return ( +
+ {steps.map((name, idx) => ( +
+
+ {idx + 1} +
+
{name}
+
+ ))} +
+ ); +} diff --git a/web/src/components/input/ImageEntry.tsx b/web/src/components/input/ImageEntry.tsx new file mode 100644 index 000000000..afb399177 --- /dev/null +++ b/web/src/components/input/ImageEntry.tsx @@ -0,0 +1,58 @@ +import { Form, FormControl, FormField, FormItem } from "@/components/ui/form"; +import { Input } from "@/components/ui/input"; +import { zodResolver } from "@hookform/resolvers/zod"; +import React, { useCallback } from "react"; +import { useForm } from "react-hook-form"; + +import { z } from "zod"; + +type ImageEntryProps = { + onSave: (file: File) => void; + children?: React.ReactNode; +}; +export default function ImageEntry({ onSave, children }: ImageEntryProps) { + const formSchema = z.object({ + file: z.instanceof(FileList, { message: "Please select an image file." }), + }); + + const form = useForm>({ + resolver: zodResolver(formSchema), + }); + const fileRef = form.register("file"); + + // upload handler + + const onSubmit = useCallback( + (data: z.infer) => { + if (!data["file"] || Object.keys(data.file).length == 0) { + return; + } + + onSave(data["file"]["0"]); + }, + [onSave], + ); + + return ( +
+ + ( + + + + + + )} + /> + {children} + + + ); +} diff --git a/web/src/components/input/TextEntry.tsx b/web/src/components/input/TextEntry.tsx new file mode 100644 index 000000000..c9fa8a8a9 --- /dev/null +++ b/web/src/components/input/TextEntry.tsx @@ -0,0 +1,68 @@ +import { Form, FormControl, FormField, FormItem } from "@/components/ui/form"; +import { Input } from "@/components/ui/input"; +import { zodResolver } from "@hookform/resolvers/zod"; +import React, { useCallback } from "react"; +import { useForm } from "react-hook-form"; + +import { z } from "zod"; + +type TextEntryProps = { + defaultValue?: string; + placeholder?: string; + allowEmpty?: boolean; + onSave: (text: string) => void; + children?: React.ReactNode; +}; +export default function TextEntry({ + defaultValue, + placeholder, + allowEmpty, + onSave, + children, +}: TextEntryProps) { + const formSchema = z.object({ + text: z.string(), + }); + + const form = useForm>({ + resolver: zodResolver(formSchema), + defaultValues: { text: defaultValue }, + }); + const fileRef = form.register("text"); + + // upload handler + + const onSubmit = useCallback( + (data: z.infer) => { + if (!allowEmpty && !data["text"]) { + return; + } + onSave(data["text"]); + }, + [onSave, allowEmpty], + ); + + return ( +
+ + ( + + + + + + )} + /> + {children} + + + ); +} diff --git a/web/src/components/overlay/detail/FaceCreateWizardDialog.tsx b/web/src/components/overlay/detail/FaceCreateWizardDialog.tsx new file mode 100644 index 000000000..659ac4c88 --- /dev/null +++ b/web/src/components/overlay/detail/FaceCreateWizardDialog.tsx @@ -0,0 +1,168 @@ +import StepIndicator from "@/components/indicators/StepIndicator"; +import ImageEntry from "@/components/input/ImageEntry"; +import TextEntry from "@/components/input/TextEntry"; +import { + MobilePage, + MobilePageContent, + MobilePageDescription, + MobilePageHeader, + MobilePageTitle, +} from "@/components/mobile/MobilePage"; +import { Button } from "@/components/ui/button"; +import { + Dialog, + DialogContent, + DialogDescription, + DialogHeader, + DialogTitle, +} from "@/components/ui/dialog"; +import { cn } from "@/lib/utils"; +import axios from "axios"; +import { useCallback, useState } from "react"; +import { isDesktop } from "react-device-detect"; +import { useTranslation } from "react-i18next"; +import { LuExternalLink } from "react-icons/lu"; +import { Link } from "react-router-dom"; +import { toast } from "sonner"; + +const STEPS = ["Enter Face Name", "Upload Face Image", "Next Steps"]; + +type CreateFaceWizardDialogProps = { + open: boolean; + setOpen: (open: boolean) => void; + onFinish: () => void; +}; +export default function CreateFaceWizardDialog({ + open, + setOpen, + onFinish, +}: CreateFaceWizardDialogProps) { + const { t } = useTranslation("views/faceLibrary"); + + // wizard + + const [step, setStep] = useState(0); + const [name, setName] = useState(""); + + const handleReset = useCallback(() => { + setStep(0); + setName(""); + setOpen(false); + }, [setOpen]); + + // data handling + + const onUploadImage = useCallback( + (file: File) => { + const formData = new FormData(); + formData.append("file", file); + axios + .post(`faces/${name}/register`, formData, { + headers: { + "Content-Type": "multipart/form-data", + }, + }) + .then((resp) => { + if (resp.status == 200) { + setStep(2); + toast.success(t("toast.success.uploadedImage"), { + position: "top-center", + }); + } + }) + .catch((error) => { + const errorMessage = + error.response?.data?.message || + error.response?.data?.detail || + "Unknown error"; + toast.error(t("toast.error.uploadingImageFailed", { errorMessage }), { + position: "top-center", + }); + }); + }, + [name, t], + ); + + // layout + + const Overlay = isDesktop ? Dialog : MobilePage; + const Content = isDesktop ? DialogContent : MobilePageContent; + const Header = isDesktop ? DialogHeader : MobilePageHeader; + const Title = isDesktop ? DialogTitle : MobilePageTitle; + const Description = isDesktop ? DialogDescription : MobilePageDescription; + + return ( + { + if (!open) { + handleReset(); + } + }} + > + +
+ {t("button.addFace")} + {isDesktop && {t("description.addFace")}} +
+ + {step == 0 && ( + { + setName(name); + setStep(1); + }} + > +
+ +
+
+ )} + {step == 1 && ( + +
+ +
+
+ )} + {step == 2 && ( +
+ {t("toast.success.addFaceLibrary", { name })} +

+ {t("createFaceLibrary.nextSteps")} +

+
+ + {t("readTheDocs")} + + +
+
+ +
+
+ )} +
+
+ ); +} diff --git a/web/src/components/overlay/dialog/TextEntryDialog.tsx b/web/src/components/overlay/dialog/TextEntryDialog.tsx index a25c023ea..6fc1f9ad3 100644 --- a/web/src/components/overlay/dialog/TextEntryDialog.tsx +++ b/web/src/components/overlay/dialog/TextEntryDialog.tsx @@ -1,3 +1,4 @@ +import TextEntry from "@/components/input/TextEntry"; import { Button } from "@/components/ui/button"; import { Dialog, @@ -7,15 +8,8 @@ import { DialogHeader, DialogTitle, } from "@/components/ui/dialog"; -import { Form, FormControl, FormField, FormItem } from "@/components/ui/form"; -import { Input } from "@/components/ui/input"; -import { zodResolver } from "@hookform/resolvers/zod"; -import { useCallback, useEffect } from "react"; -import { useForm } from "react-hook-form"; import { useTranslation } from "react-i18next"; -import { z } from "zod"; - type TextEntryDialogProps = { open: boolean; title: string; @@ -35,35 +29,7 @@ export default function TextEntryDialog({ defaultValue = "", allowEmpty = false, }: TextEntryDialogProps) { - const formSchema = z.object({ - text: z.string(), - }); - - const { t } = useTranslation("components/dialog"); - - const form = useForm>({ - resolver: zodResolver(formSchema), - defaultValues: { text: defaultValue }, - }); - const fileRef = form.register("text"); - - // upload handler - - const onSubmit = useCallback( - (data: z.infer) => { - if (!allowEmpty && !data["text"]) { - return; - } - onSave(data["text"]); - }, - [onSave, allowEmpty], - ); - - useEffect(() => { - if (open) { - form.reset({ text: defaultValue }); - } - }, [open, defaultValue, form]); + const { t } = useTranslation("common"); return ( @@ -72,33 +38,20 @@ export default function TextEntryDialog({ {title} {description && {description}} -
- - ( - - - - - - )} - /> - - - - - - + + + + + +
); diff --git a/web/src/components/overlay/dialog/UploadImageDialog.tsx b/web/src/components/overlay/dialog/UploadImageDialog.tsx index 6a01a7fab..7fab82eea 100644 --- a/web/src/components/overlay/dialog/UploadImageDialog.tsx +++ b/web/src/components/overlay/dialog/UploadImageDialog.tsx @@ -1,3 +1,4 @@ +import ImageEntry from "@/components/input/ImageEntry"; import { Button } from "@/components/ui/button"; import { Dialog, @@ -7,12 +8,7 @@ import { DialogHeader, DialogTitle, } from "@/components/ui/dialog"; -import { Form, FormControl, FormField, FormItem } from "@/components/ui/form"; -import { Input } from "@/components/ui/input"; -import { zodResolver } from "@hookform/resolvers/zod"; -import { useCallback } from "react"; -import { useForm } from "react-hook-form"; -import { z } from "zod"; +import { useTranslation } from "react-i18next"; type UploadImageDialogProps = { open: boolean; @@ -28,27 +24,7 @@ export default function UploadImageDialog({ setOpen, onSave, }: UploadImageDialogProps) { - const formSchema = z.object({ - file: z.instanceof(FileList, { message: "Please select an image file." }), - }); - - const form = useForm>({ - resolver: zodResolver(formSchema), - }); - const fileRef = form.register("file"); - - // upload handler - - const onSubmit = useCallback( - (data: z.infer) => { - if (!data["file"] || Object.keys(data.file).length == 0) { - return; - } - - onSave(data["file"]["0"]); - }, - [onSave], - ); + const { t } = useTranslation("common"); return ( @@ -57,31 +33,14 @@ export default function UploadImageDialog({ {title} {description && {description}} -
- - ( - - - - - - )} - /> - - - - - - + + + + + +
); diff --git a/web/src/pages/FaceLibrary.tsx b/web/src/pages/FaceLibrary.tsx index 33fbb69d1..afa196f35 100644 --- a/web/src/pages/FaceLibrary.tsx +++ b/web/src/pages/FaceLibrary.tsx @@ -1,7 +1,8 @@ import { baseUrl } from "@/api/baseUrl"; +import TimeAgo from "@/components/dynamic/TimeAgo"; import AddFaceIcon from "@/components/icons/AddFaceIcon"; import ActivityIndicator from "@/components/indicators/activity-indicator"; -import TextEntryDialog from "@/components/overlay/dialog/TextEntryDialog"; +import CreateFaceWizardDialog from "@/components/overlay/detail/FaceCreateWizardDialog"; import UploadImageDialog from "@/components/overlay/dialog/UploadImageDialog"; import { Button } from "@/components/ui/button"; import { @@ -25,6 +26,7 @@ import { cn } from "@/lib/utils"; import { FrigateConfig } from "@/types/frigateConfig"; import axios from "axios"; import { useCallback, useEffect, useMemo, useRef, useState } from "react"; +import { isDesktop } from "react-device-detect"; import { useTranslation } from "react-i18next"; import { LuImagePlus, LuRefreshCw, LuScanFace, LuTrash2 } from "react-icons/lu"; import { toast } from "sonner"; @@ -115,42 +117,16 @@ export default function FaceLibrary() { [pageToggle, refreshFaces, t], ); - const onAddName = useCallback( - (name: string) => { - axios - .post(`faces/${name}/create`, { - headers: { - "Content-Type": "multipart/form-data", - }, - }) - .then((resp) => { - if (resp.status == 200) { - setAddFace(false); - refreshFaces(); - toast.success(t("toast.success.addFaceLibrary"), { - position: "top-center", - }); - } - }) - .catch((error) => { - const errorMessage = - error.response?.data?.message || - error.response?.data?.detail || - "Unknown error"; - toast.error(t("toast.error.addFaceLibraryFailed", { errorMessage }), { - position: "top-center", - }); - }); - }, - [refreshFaces, t], - ); - // face multiselect const [selectedFaces, setSelectedFaces] = useState([]); const onClickFace = useCallback( - (imageId: string) => { + (imageId: string, ctrl: boolean) => { + if (selectedFaces.length == 0 && !ctrl) { + return; + } + const index = selectedFaces.indexOf(imageId); if (index != -1) { @@ -172,33 +148,42 @@ export default function FaceLibrary() { [selectedFaces, setSelectedFaces], ); - const onDelete = useCallback(() => { - axios - .post(`/faces/train/delete`, { ids: selectedFaces }) - .then((resp) => { - setSelectedFaces([]); + const onDelete = useCallback( + (name: string, ids: string[]) => { + axios + .post(`/faces/${name}/delete`, { ids }) + .then((resp) => { + setSelectedFaces([]); - if (resp.status == 200) { - toast.success(t("toast.success.deletedFace"), { + if (resp.status == 200) { + toast.success(t("toast.success.deletedFace"), { + position: "top-center", + }); + + if (faceImages.length == 1) { + // face has been deleted + setPageToggle(""); + } + + refreshFaces(); + } + }) + .catch((error) => { + const errorMessage = + error.response?.data?.message || + error.response?.data?.detail || + "Unknown error"; + toast.error(t("toast.error.deleteFaceFailed", { errorMessage }), { position: "top-center", }); - refreshFaces(); - } - }) - .catch((error) => { - const errorMessage = - error.response?.data?.message || - error.response?.data?.detail || - "Unknown error"; - toast.error(t("toast.error.deleteFaceFailed", { errorMessage }), { - position: "top-center", }); - }); - }, [selectedFaces, refreshFaces, t]); + }, + [faceImages, refreshFaces, setPageToggle, t], + ); // keyboard - useKeyboardListener(["a"], (key, modifiers) => { + useKeyboardListener(["a", "Escape"], (key, modifiers) => { if (modifiers.repeat || !modifiers.down) { return; } @@ -209,6 +194,9 @@ export default function FaceLibrary() { setSelectedFaces([...trainImages]); } break; + case "Escape": + setSelectedFaces([]); + break; } }); @@ -228,12 +216,10 @@ export default function FaceLibrary() { onSave={onUploadImage} /> -
@@ -283,21 +269,24 @@ export default function FaceLibrary() { {selectedFaces?.length > 0 ? (
-
) : (
{pageToggle != "train" && ( )}
@@ -317,7 +306,7 @@ export default function FaceLibrary() { ))}
@@ -329,7 +318,7 @@ type TrainingGridProps = { attemptImages: string[]; faceNames: string[]; selectedFaces: string[]; - onClickFace: (image: string) => void; + onClickFace: (image: string, ctrl: boolean) => void; onRefresh: () => void; }; function TrainingGrid({ @@ -349,7 +338,7 @@ function TrainingGrid({ faceNames={faceNames} threshold={config.face_recognition.recognition_threshold} selected={selectedFaces.includes(image)} - onClick={() => onClickFace(image)} + onClick={(meta) => onClickFace(image, meta)} onRefresh={onRefresh} /> ))} @@ -362,7 +351,7 @@ type FaceAttemptProps = { faceNames: string[]; threshold: number; selected: boolean; - onClick: () => void; + onClick: (meta: boolean) => void; onRefresh: () => void; }; function FaceAttempt({ @@ -378,6 +367,7 @@ function FaceAttempt({ const parts = image.split("-"); return { + timestamp: Number.parseFloat(parts[0]), eventId: `${parts[0]}-${parts[1]}`, name: parts[2], score: parts[3], @@ -439,10 +429,13 @@ function FaceAttempt({ ? "shadow-selected outline-selected" : "outline-transparent duration-500", )} - onClick={onClick} + onClick={(e) => onClick(e.metaKey || e.ctrlKey)} > -
- +
+ +
+ +
@@ -500,9 +493,9 @@ function FaceAttempt({ type FaceGridProps = { faceImages: string[]; pageToggle: string; - onRefresh: () => void; + onDelete: (name: string, ids: string[]) => void; }; -function FaceGrid({ faceImages, pageToggle, onRefresh }: FaceGridProps) { +function FaceGrid({ faceImages, pageToggle, onDelete }: FaceGridProps) { return (
{faceImages.map((image: string) => ( @@ -510,7 +503,7 @@ function FaceGrid({ faceImages, pageToggle, onRefresh }: FaceGridProps) { key={image} name={pageToggle} image={image} - onRefresh={onRefresh} + onDelete={onDelete} /> ))}
@@ -520,31 +513,10 @@ function FaceGrid({ faceImages, pageToggle, onRefresh }: FaceGridProps) { type FaceImageProps = { name: string; image: string; - onRefresh: () => void; + onDelete: (name: string, ids: string[]) => void; }; -function FaceImage({ name, image, onRefresh }: FaceImageProps) { +function FaceImage({ name, image, onDelete }: FaceImageProps) { const { t } = useTranslation(["views/faceLibrary"]); - const onDelete = useCallback(() => { - axios - .post(`/faces/${name}/delete`, { ids: [image] }) - .then((resp) => { - if (resp.status == 200) { - toast.success(t("toast.success.deletedFace"), { - position: "top-center", - }); - onRefresh(); - } - }) - .catch((error) => { - const errorMessage = - error.response?.data?.message || - error.response?.data?.detail || - "Unknown error"; - toast.error(t("toast.error.deleteFaceFailed", { errorMessage }), { - position: "top-center", - }); - }); - }, [name, image, onRefresh, t]); return (
@@ -561,7 +533,7 @@ function FaceImage({ name, image, onRefresh }: FaceImageProps) { onDelete(name, [image])} /> {t("button.deleteFaceAttempts")} diff --git a/web/src/pages/LoginPage.tsx b/web/src/pages/LoginPage.tsx index d79a7c953..8cf87f206 100644 --- a/web/src/pages/LoginPage.tsx +++ b/web/src/pages/LoginPage.tsx @@ -1,20 +1,24 @@ import { UserAuthForm } from "@/components/auth/AuthForm"; import Logo from "@/components/Logo"; import { ThemeProvider } from "@/context/theme-provider"; +import "@/utils/i18n"; +import { LanguageProvider } from "@/context/language-provider"; function LoginPage() { return ( -
-
-
-
- + +
+
+
+
+ +
+
-
-
+ ); } From bf22d89f67d68f1b10dab94c472b62509ff5fede Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Mon, 17 Mar 2025 15:57:46 -0600 Subject: [PATCH 03/97] Improve Face Library Management (#17213) * Set maximum number of face images to be kept * Fix vertical camera scaling * adjust wording * Add attributes to search data * Add button to train face from event * Handle event id saving in API --- frigate/api/classification.py | 57 ++++++++-- frigate/api/event.py | 1 + frigate/data_processing/real_time/face.py | 11 ++ frigate/util/path.py | 8 ++ web/public/locales/en/views/faceLibrary.json | 2 +- .../overlay/detail/SearchDetailDialog.tsx | 107 +++++++++++++++--- web/src/pages/FaceLibrary.tsx | 2 +- web/src/types/search.ts | 1 + web/src/views/settings/MotionTunerView.tsx | 2 +- web/src/views/settings/ObjectSettingsView.tsx | 2 +- 10 files changed, 167 insertions(+), 26 deletions(-) diff --git a/frigate/api/classification.py b/frigate/api/classification.py index 85b604379..df804f34a 100644 --- a/frigate/api/classification.py +++ b/frigate/api/classification.py @@ -6,6 +6,7 @@ import random import shutil import string +import cv2 from fastapi import APIRouter, Depends, Request, UploadFile from fastapi.responses import JSONResponse from pathvalidate import sanitize_filename @@ -14,9 +15,11 @@ from playhouse.shortcuts import model_to_dict from frigate.api.auth import require_role from frigate.api.defs.tags import Tags +from frigate.config.camera import DetectConfig from frigate.const import FACE_DIR from frigate.embeddings import EmbeddingsContext from frigate.models import Event +from frigate.util.path import get_event_snapshot logger = logging.getLogger(__name__) @@ -87,16 +90,27 @@ def train_face(request: Request, name: str, body: dict = None): ) json: dict[str, any] = body or {} - training_file = os.path.join( - FACE_DIR, f"train/{sanitize_filename(json.get('training_file', ''))}" - ) + training_file_name = sanitize_filename(json.get("training_file", "")) + training_file = os.path.join(FACE_DIR, f"train/{training_file_name}") + event_id = json.get("event_id") - if not training_file or not os.path.isfile(training_file): + if not training_file_name and not event_id: return JSONResponse( content=( { "success": False, - "message": f"Invalid filename or no file exists: {training_file}", + "message": "A training file or event_id must be passed.", + } + ), + status_code=400, + ) + + if training_file_name and not os.path.isfile(training_file): + return JSONResponse( + content=( + { + "success": False, + "message": f"Invalid filename or no file exists: {training_file_name}", } ), status_code=404, @@ -106,7 +120,36 @@ def train_face(request: Request, name: str, body: dict = None): rand_id = "".join(random.choices(string.ascii_lowercase + string.digits, k=6)) new_name = f"{sanitized_name}-{rand_id}.webp" new_file = os.path.join(FACE_DIR, f"{sanitized_name}/{new_name}") - shutil.move(training_file, new_file) + + if training_file_name: + shutil.move(training_file, new_file) + else: + try: + event: Event = Event.get(Event.id == event_id) + except DoesNotExist: + return JSONResponse( + content=( + { + "success": False, + "message": f"Invalid event_id or no event exists: {event_id}", + } + ), + status_code=404, + ) + + snapshot = get_event_snapshot(event) + face_box = event.data["attributes"][0]["box"] + detect_config: DetectConfig = request.app.frigate_config.cameras[ + event.camera + ].detect + + # crop onto the face box minus the bounding box itself + x1 = int(face_box[0] * detect_config.width) + 2 + y1 = int(face_box[1] * detect_config.height) + 2 + x2 = x1 + int(face_box[2] * detect_config.width) - 4 + y2 = y1 + int(face_box[3] * detect_config.height) - 4 + face = snapshot[y1:y2, x1:x2] + cv2.imwrite(new_file, face) context: EmbeddingsContext = request.app.embeddings context.clear_face_classifier() @@ -115,7 +158,7 @@ def train_face(request: Request, name: str, body: dict = None): content=( { "success": True, - "message": f"Successfully saved {training_file} as {new_name}.", + "message": f"Successfully saved {training_file_name} as {new_name}.", } ), status_code=200, diff --git a/frigate/api/event.py b/frigate/api/event.py index 88a865318..c4c763bf7 100644 --- a/frigate/api/event.py +++ b/frigate/api/event.py @@ -701,6 +701,7 @@ def events_search(request: Request, params: EventsSearchQueryParams = Depends()) for k, v in event["data"].items() if k in [ + "attributes", "type", "score", "top_score", diff --git a/frigate/data_processing/real_time/face.py b/frigate/data_processing/real_time/face.py index b51b7a20f..acb891449 100644 --- a/frigate/data_processing/real_time/face.py +++ b/frigate/data_processing/real_time/face.py @@ -28,6 +28,7 @@ logger = logging.getLogger(__name__) MAX_DETECTION_HEIGHT = 1080 +MAX_FACE_ATTEMPTS = 100 MIN_MATCHING_FACES = 2 @@ -482,6 +483,16 @@ class FaceRealTimeProcessor(RealTimeProcessorApi): ) shutil.move(current_file, new_file) + files = sorted( + os.listdir(folder), + key=lambda f: os.path.getctime(os.path.join(folder, f)), + reverse=True, + ) + + # delete oldest face image if maximum is reached + if len(files) > MAX_FACE_ATTEMPTS: + os.unlink(os.path.join(folder, files[-1])) + def expire_object(self, object_id: str): if object_id in self.detected_faces: self.detected_faces.pop(object_id) diff --git a/frigate/util/path.py b/frigate/util/path.py index dbe51abe5..565f5a357 100644 --- a/frigate/util/path.py +++ b/frigate/util/path.py @@ -4,6 +4,9 @@ import base64 import os from pathlib import Path +import cv2 +from numpy import ndarray + from frigate.const import CLIPS_DIR, THUMB_DIR from frigate.models import Event @@ -21,6 +24,11 @@ def get_event_thumbnail_bytes(event: Event) -> bytes | None: return None +def get_event_snapshot(event: Event) -> ndarray: + media_name = f"{event.camera}-{event.id}" + return cv2.imread(f"{os.path.join(CLIPS_DIR, media_name)}.jpg") + + ### Deletion diff --git a/web/public/locales/en/views/faceLibrary.json b/web/public/locales/en/views/faceLibrary.json index 4028690e3..46842b7ea 100644 --- a/web/public/locales/en/views/faceLibrary.json +++ b/web/public/locales/en/views/faceLibrary.json @@ -25,7 +25,7 @@ }, "readTheDocs": "Read the documentation to view more details on refining images for the Face Library", "trainFaceAs": "Train Face as:", - "trainFaceAsPerson": "Train Face as Person", + "trainFace": "Train Face", "toast": { "success": { "uploadedImage": "Successfully uploaded image.", diff --git a/web/src/components/overlay/detail/SearchDetailDialog.tsx b/web/src/components/overlay/detail/SearchDetailDialog.tsx index 891ce88b1..b22eb9a4c 100644 --- a/web/src/components/overlay/detail/SearchDetailDialog.tsx +++ b/web/src/components/overlay/detail/SearchDetailDialog.tsx @@ -57,6 +57,7 @@ import { DropdownMenu, DropdownMenuContent, DropdownMenuItem, + DropdownMenuLabel, DropdownMenuTrigger, } from "@/components/ui/dropdown-menu"; import { TransformComponent, TransformWrapper } from "react-zoom-pan-pinch"; @@ -69,11 +70,12 @@ import { PopoverContent, PopoverTrigger, } from "@/components/ui/popover"; -import { LuInfo } from "react-icons/lu"; +import { LuInfo, LuSearch } from "react-icons/lu"; import { TooltipPortal } from "@radix-ui/react-tooltip"; import { FaPencilAlt } from "react-icons/fa"; import TextEntryDialog from "@/components/overlay/dialog/TextEntryDialog"; import { useTranslation } from "react-i18next"; +import { TbFaceId } from "react-icons/tb"; const SEARCH_TABS = [ "details", @@ -99,7 +101,7 @@ export default function SearchDetailDialog({ setSimilarity, setInputFocused, }: SearchDetailDialogProps) { - const { t } = useTranslation(["views/explore"]); + const { t } = useTranslation(["views/explore", "views/faceLibrary"]); const { data: config } = useSWR("config", { revalidateOnFocus: false, }); @@ -555,6 +557,48 @@ function ObjectDetailsTab({ [search, apiHost, mutate, setSearch, t], ); + // face training + + const hasFace = useMemo(() => { + if (!config?.face_recognition.enabled || !search) { + return false; + } + + return search.data.attributes?.find((attr) => attr.label == "face"); + }, [config, search]); + + const { data: faceData } = useSWR(hasFace ? "faces" : null); + + const faceNames = useMemo( + () => + faceData ? Object.keys(faceData).filter((face) => face != "train") : [], + [faceData], + ); + + const onTrainFace = useCallback( + (trainName: string) => { + axios + .post(`/faces/train/${trainName}/classify`, { event_id: search.id }) + .then((resp) => { + if (resp.status == 200) { + toast.success(t("toast.success.trainedFace"), { + position: "top-center", + }); + } + }) + .catch((error) => { + const errorMessage = + error.response?.data?.message || + error.response?.data?.detail || + "Unknown error"; + toast.error(t("toast.error.trainFailed", { errorMessage }), { + position: "top-center", + }); + }); + }, + [search, t], + ); + return (
@@ -673,20 +717,53 @@ function ObjectDetailsTab({ draggable={false} src={`${apiHost}api/events/${search.id}/thumbnail.webp`} /> - {config?.semantic_search.enabled && search.data.type == "object" && ( - - )} + if (setSimilarity) { + setSimilarity(); + } + }} + > +
+ + {t("itemMenu.findSimilar.label")} +
+ + )} + {hasFace && ( + + + + + + + {t("trainFaceAs", { ns: "views/faceLibrary" })} + + {faceNames.map((faceName) => ( + onTrainFace(faceName)} + > + {faceName} + + ))} + + + )} +
diff --git a/web/src/pages/FaceLibrary.tsx b/web/src/pages/FaceLibrary.tsx index afa196f35..94a7f6947 100644 --- a/web/src/pages/FaceLibrary.tsx +++ b/web/src/pages/FaceLibrary.tsx @@ -472,7 +472,7 @@ function FaceAttempt({ ))} - {t("trainFaceAsPerson")} + {t("trainFace")} diff --git a/web/src/types/search.ts b/web/src/types/search.ts index 5dca11973..2a57385f7 100644 --- a/web/src/types/search.ts +++ b/web/src/types/search.ts @@ -50,6 +50,7 @@ export type SearchResult = { score: number; sub_label_score?: number; region: number[]; + attributes?: [{ box: number[]; label: string; score: number }]; box: number[]; area: number; ratio: number; diff --git a/web/src/views/settings/MotionTunerView.tsx b/web/src/views/settings/MotionTunerView.tsx index d1027a14d..98169b4f8 100644 --- a/web/src/views/settings/MotionTunerView.tsx +++ b/web/src/views/settings/MotionTunerView.tsx @@ -323,7 +323,7 @@ export default function MotionTunerView({
{cameraConfig ? ( -
+
{cameraConfig ? ( -
+
Date: Mon, 17 Mar 2025 23:01:40 -0400 Subject: [PATCH 04/97] Fix key error when model path key doesn't exist. (#17217) * fixed metrics race condition * ruff formatting * adjust for default path config * ruff * check for model too --- frigate/api/app.py | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/frigate/api/app.py b/frigate/api/app.py index 9d7b3768f..f19070a3a 100644 --- a/frigate/api/app.py +++ b/frigate/api/app.py @@ -177,14 +177,18 @@ def config(request: Request): # Add model plus data if plus is enabled if config["plus"]["enabled"]: - model_json_path = FilePath(config["model"]["path"]).with_suffix(".json") - try: - with open(model_json_path, "r") as f: - model_plus_data = json.load(f) - config["model"]["plus"] = model_plus_data - except FileNotFoundError: - config["model"]["plus"] = None - except json.JSONDecodeError: + model_path = config.get("model", {}).get("path") + if model_path: + model_json_path = FilePath(model_path).with_suffix(".json") + try: + with open(model_json_path, "r") as f: + model_plus_data = json.load(f) + config["model"]["plus"] = model_plus_data + except FileNotFoundError: + config["model"]["plus"] = None + except json.JSONDecodeError: + config["model"]["plus"] = None + else: config["model"]["plus"] = None # use merged labelamp From dcaaae9a4c4aea46c8de46ab814bcb97a1b5d972 Mon Sep 17 00:00:00 2001 From: GuoQing Liu <842607283@qq.com> Date: Tue, 18 Mar 2025 18:53:22 +0800 Subject: [PATCH 05/97] add chinese frigatePlus settings i18n keys (#17218) --- web/public/locales/zh-CN/common.json | 4 +- .../locales/zh-CN/views/faceLibrary.json | 9 ++++- web/public/locales/zh-CN/views/settings.json | 37 +++++++++++++++++-- 3 files changed, 44 insertions(+), 6 deletions(-) diff --git a/web/public/locales/zh-CN/common.json b/web/public/locales/zh-CN/common.json index 0da7064b1..aa2d9a825 100644 --- a/web/public/locales/zh-CN/common.json +++ b/web/public/locales/zh-CN/common.json @@ -76,6 +76,7 @@ "button": { "apply": "应用", "reset": "重置", + "done": "完成", "enabled": "启用", "enable": "启用", "disabled": "禁用", @@ -104,7 +105,8 @@ "play": "播放", "unselect": "取消选择", "export": "导出", - "deleteNow": "立即删除" + "deleteNow": "立即删除", + "next": "下一个" }, "menu": { "system": "系统", diff --git a/web/public/locales/zh-CN/views/faceLibrary.json b/web/public/locales/zh-CN/views/faceLibrary.json index 72b3cba3d..e8a86a446 100644 --- a/web/public/locales/zh-CN/views/faceLibrary.json +++ b/web/public/locales/zh-CN/views/faceLibrary.json @@ -1,4 +1,7 @@ { + "description": { + "addFace": "我们将指导如何将新面孔添加到人脸库中。" + }, "documentTitle": "人脸库 - Frigate", "uploadFaceImage": { "title": "上传人脸图片", @@ -6,7 +9,8 @@ }, "createFaceLibrary": { "title": "创建人脸库", - "desc": "创建一个新的人脸库" + "desc": "创建一个新的人脸库", + "nextSteps": "建议使用“训练”选项卡为每个检测到的人选择并训练图像。在打好基础前,强烈建议训练仅使用正面图像。而不是从摄像机中识别到的角度拍摄的人脸图像。" }, "train": { "title": "训练", @@ -19,13 +23,14 @@ "uploadImage": "上传图片", "reprocessFace": "重新处理人脸" }, + "readTheDocs": "阅读文档查看更多有关为人脸库优化图像的详细信息", "trainFaceAs": "将人脸训练为:", "trainFaceAsPerson": "将人脸训练为人物", "toast": { "success": { "uploadedImage": "图片上传成功。", - "addFaceLibrary": "人脸库添加成功。", + "addFaceLibrary": "{{name}} 成功添加至人脸库。", "deletedFace": "人脸删除成功。", "trainedFace": "人脸训练成功。", "updatedFaceScore": "人脸分数更新成功。" diff --git a/web/public/locales/zh-CN/views/settings.json b/web/public/locales/zh-CN/views/settings.json index 8fcfb869e..707e6c9f0 100644 --- a/web/public/locales/zh-CN/views/settings.json +++ b/web/public/locales/zh-CN/views/settings.json @@ -25,6 +25,10 @@ "users": "用户", "notifications": "通知" }, + "cameraSetting": { + "camera": "相机", + "noCamera": "没有相机" + }, "general": { "title": "常规设置", "liveDashboard": { @@ -512,8 +516,35 @@ } } }, - "cameraSetting": { - "camera": "相机", - "noCamera": "没有相机" + "frigatePlus": { + "title": "Frigate+ 设置", + "apiKey": { + "title": "Frigate+ API 密钥", + "validated": "Frigate+ API 密钥已检测并验证通过", + "notValidated": "未检测到 Frigate+ API 密钥或验证未通过", + "desc": "Frigate+ API 密钥用于启用与 Frigate+ 服务的集成。", + "plusLink": "了解更多关于 Frigate+" + }, + "snapshotConfig": { + "title": "快照配置", + "desc": "提交到 Frigate+ 需要同时在配置中启用快照和 clean_copy 快照。", + "documentation": "阅读文档", + "cleanCopyWarning": "部分摄像头已启用快照但未启用 clean_copy。您需要在快照配置中启用 clean_copy,才能将这些摄像头的图像提交到 Frigate+。", + "table": { + "camera": "摄像头", + "snapshots": "快照", + "cleanCopySnapshots": "clean_copy 快照" + } + }, + "modelInfo": { + "title": "模型信息", + "modelType": "模型类型", + "trainDate": "训练日期", + "baseModel": "基础模型", + "supportedDetectors": "支持的检测器", + "cameras": "摄像头", + "loading": "正在加载模型信息...", + "error": "加载模型信息失败" + } } } From 9f7ba51f393c2ebf366c7a8d88453c26ba567379 Mon Sep 17 00:00:00 2001 From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> Date: Tue, 18 Mar 2025 09:08:44 -0500 Subject: [PATCH 06/97] Proxy i18n locales dir for ingress (#17223) --- docker/main/rootfs/usr/local/nginx/conf/nginx.conf | 1 + 1 file changed, 1 insertion(+) diff --git a/docker/main/rootfs/usr/local/nginx/conf/nginx.conf b/docker/main/rootfs/usr/local/nginx/conf/nginx.conf index 64d6396b2..137d0ec3d 100644 --- a/docker/main/rootfs/usr/local/nginx/conf/nginx.conf +++ b/docker/main/rootfs/usr/local/nginx/conf/nginx.conf @@ -317,6 +317,7 @@ http { sub_filter '"/BASE_PATH/dist/' '"$http_x_ingress_path/dist/'; sub_filter '"/BASE_PATH/js/' '"$http_x_ingress_path/js/'; sub_filter '"/BASE_PATH/assets/' '"$http_x_ingress_path/assets/'; + sub_filter '"/BASE_PATH/locales/' '"$http_x_ingress_path/locales/'; sub_filter '"/BASE_PATH/monacoeditorwork/' '"$http_x_ingress_path/assets/'; sub_filter 'return"/BASE_PATH/"' 'return window.baseUrl'; sub_filter '' ''; From 5514fc11b9ba2a24805b08e731fcc8e59a869ccb Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Tue, 18 Mar 2025 08:32:15 -0600 Subject: [PATCH 07/97] Face tweaks (#17225) * Always use white text * Add right click as well * Add face details dialog * Clenaup --- web/public/locales/en/views/faceLibrary.json | 6 + web/src/pages/FaceLibrary.tsx | 124 +++++++++++++++---- web/src/types/face.ts | 6 + 3 files changed, 113 insertions(+), 23 deletions(-) create mode 100644 web/src/types/face.ts diff --git a/web/public/locales/en/views/faceLibrary.json b/web/public/locales/en/views/faceLibrary.json index 46842b7ea..57d78b18a 100644 --- a/web/public/locales/en/views/faceLibrary.json +++ b/web/public/locales/en/views/faceLibrary.json @@ -2,6 +2,12 @@ "description": { "addFace": "Walk through adding a new face to the Face Library." }, + "details": { + "confidence": "Confidence", + "face": "Face Details", + "faceDesc": "Details for the face and associated object", + "timestamp": "Timestamp" + }, "documentTitle": "Face Library - Frigate", "uploadFaceImage": { "title": "Upload Face Image", diff --git a/web/src/pages/FaceLibrary.tsx b/web/src/pages/FaceLibrary.tsx index 94a7f6947..a9b7dc230 100644 --- a/web/src/pages/FaceLibrary.tsx +++ b/web/src/pages/FaceLibrary.tsx @@ -5,6 +5,13 @@ import ActivityIndicator from "@/components/indicators/activity-indicator"; import CreateFaceWizardDialog from "@/components/overlay/detail/FaceCreateWizardDialog"; import UploadImageDialog from "@/components/overlay/dialog/UploadImageDialog"; import { Button } from "@/components/ui/button"; +import { + Dialog, + DialogContent, + DialogDescription, + DialogHeader, + DialogTitle, +} from "@/components/ui/dialog"; import { DropdownMenu, DropdownMenuContent, @@ -20,9 +27,12 @@ import { TooltipContent, TooltipTrigger, } from "@/components/ui/tooltip"; +import useContextMenu from "@/hooks/use-contextmenu"; +import { useFormattedTimestamp } from "@/hooks/use-date-utils"; import useKeyboardListener from "@/hooks/use-keyboard-listener"; import useOptimisticState from "@/hooks/use-optimistic-state"; import { cn } from "@/lib/utils"; +import { RecognizedFaceData } from "@/types/face"; import { FrigateConfig } from "@/types/frigateConfig"; import axios from "axios"; import { useCallback, useEffect, useMemo, useRef, useState } from "react"; @@ -329,20 +339,76 @@ function TrainingGrid({ onClickFace, onRefresh, }: TrainingGridProps) { + const { t } = useTranslation(["views/faceLibrary"]); + + // face data + + const [selectedEvent, setSelectedEvent] = useState(); + + const formattedDate = useFormattedTimestamp( + selectedEvent?.timestamp ?? 0, + config?.ui.time_format == "24hour" + ? t("time.formattedTimestampWithYear.24hour", { ns: "common" }) + : t("time.formattedTimestampWithYear.12hour", { ns: "common" }), + config?.ui.timezone, + ); + return ( -
- {attemptImages.map((image: string) => ( - onClickFace(image, meta)} - onRefresh={onRefresh} - /> - ))} -
+ <> + { + if (!open) { + setSelectedEvent(undefined); + } + }} + > + + + {t("details.face")} + {t("details.faceDesc")} + +
+
+ {t("details.confidence")} +
+
+ {(selectedEvent?.score || 0) * 100}% +
+
+
+
+ {t("details.timestamp")} +
+
{formattedDate}
+
+ +
+
+ +
+ {attemptImages.map((image: string) => ( + { + if (meta) { + onClickFace(image, meta); + } else { + setSelectedEvent(data); + } + }} + onRefresh={onRefresh} + /> + ))} +
+ ); } @@ -351,7 +417,7 @@ type FaceAttemptProps = { faceNames: string[]; threshold: number; selected: boolean; - onClick: (meta: boolean) => void; + onClick: (data: RecognizedFaceData, meta: boolean) => void; onRefresh: () => void; }; function FaceAttempt({ @@ -363,17 +429,27 @@ function FaceAttempt({ onRefresh, }: FaceAttemptProps) { const { t } = useTranslation(["views/faceLibrary"]); - const data = useMemo(() => { + const data = useMemo(() => { const parts = image.split("-"); return { timestamp: Number.parseFloat(parts[0]), eventId: `${parts[0]}-${parts[1]}`, name: parts[2], - score: parts[3], + score: Number.parseFloat(parts[3]), }; }, [image]); + // interaction + + const imgRef = useRef(null); + + useContextMenu(imgRef, () => { + onClick(data, true); + }); + + // api calls + const onTrainAttempt = useCallback( (trainName: string) => { axios @@ -429,12 +505,16 @@ function FaceAttempt({ ? "shadow-selected outline-selected" : "outline-transparent duration-500", )} - onClick={(e) => onClick(e.metaKey || e.ctrlKey)} >
- + onClick(data, e.metaKey || e.ctrlKey)} + />
- +
@@ -443,12 +523,10 @@ function FaceAttempt({
{data.name}
= threshold - ? "text-success" - : "text-danger", + data.score >= threshold ? "text-success" : "text-danger", )} > - {Number.parseFloat(data.score) * 100}% + {data.score * 100}%
diff --git a/web/src/types/face.ts b/web/src/types/face.ts new file mode 100644 index 000000000..e8f426a5b --- /dev/null +++ b/web/src/types/face.ts @@ -0,0 +1,6 @@ +export type RecognizedFaceData = { + timestamp: number; + eventId: string; + name: string; + score: number; +}; From 125c2665857c6ae38d63b584da42e6d869a4198c Mon Sep 17 00:00:00 2001 From: GuoQing Liu <842607283@qq.com> Date: Wed, 19 Mar 2025 18:56:48 +0800 Subject: [PATCH 08/97] chore: use better translation (zh-CN) (#17239) --- web/public/locales/zh-CN/views/faceLibrary.json | 7 ++++++- web/public/locales/zh-CN/views/settings.json | 4 ++-- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/web/public/locales/zh-CN/views/faceLibrary.json b/web/public/locales/zh-CN/views/faceLibrary.json index e8a86a446..76a98bf31 100644 --- a/web/public/locales/zh-CN/views/faceLibrary.json +++ b/web/public/locales/zh-CN/views/faceLibrary.json @@ -2,6 +2,12 @@ "description": { "addFace": "我们将指导如何将新面孔添加到人脸库中。" }, + "details": { + "confidence": "置信度", + "face": "人脸详情", + "faceDesc": "人脸及相关对象的详细信息", + "timestamp": "时间戳" + }, "documentTitle": "人脸库 - Frigate", "uploadFaceImage": { "title": "上传人脸图片", @@ -26,7 +32,6 @@ "readTheDocs": "阅读文档查看更多有关为人脸库优化图像的详细信息", "trainFaceAs": "将人脸训练为:", "trainFaceAsPerson": "将人脸训练为人物", - "toast": { "success": { "uploadedImage": "图片上传成功。", diff --git a/web/public/locales/zh-CN/views/settings.json b/web/public/locales/zh-CN/views/settings.json index 707e6c9f0..ccef8f151 100644 --- a/web/public/locales/zh-CN/views/settings.json +++ b/web/public/locales/zh-CN/views/settings.json @@ -26,8 +26,8 @@ "notifications": "通知" }, "cameraSetting": { - "camera": "相机", - "noCamera": "没有相机" + "camera": "摄像头", + "noCamera": "没有摄像头" }, "general": { "title": "常规设置", From e340c9aaba6b71136e142620d386c0ff9f24d9ff Mon Sep 17 00:00:00 2001 From: Jason Hunter Date: Wed, 19 Mar 2025 08:23:24 -0400 Subject: [PATCH 09/97] Add openvino support for the DFINE model (#17238) * add openvino support for the dfine model * update docs to show DFINE support for openvino * remove warning about OpenVINO for DFINE --- docs/docs/configuration/object_detectors.md | 45 +++++++++++++++------ frigate/detectors/plugins/openvino.py | 18 ++++++++- 2 files changed, 49 insertions(+), 14 deletions(-) diff --git a/docs/docs/configuration/object_detectors.md b/docs/docs/configuration/object_detectors.md index a4f4c7c20..8cc6b2f1e 100644 --- a/docs/docs/configuration/object_detectors.md +++ b/docs/docs/configuration/object_detectors.md @@ -129,8 +129,8 @@ detectors: type: edgetpu device: pci ``` ---- +--- ## Hailo-8 @@ -140,12 +140,13 @@ See the [installation docs](../frigate/installation.md#hailo-8l) for information ### Configuration -When configuring the Hailo detector, you have two options to specify the model: a local **path** or a **URL**. +When configuring the Hailo detector, you have two options to specify the model: a local **path** or a **URL**. If both are provided, the detector will first check for the model at the given local path. If the file is not found, it will download the model from the specified URL. The model file is cached under `/config/model_cache/hailo`. -#### YOLO +#### YOLO Use this configuration for YOLO-based models. When no custom model path or URL is provided, the detector automatically downloads the default model based on the detected hardware: + - **Hailo-8 hardware:** Uses **YOLOv6n** (default: `yolov6n.hef`) - **Hailo-8L hardware:** Uses **YOLOv6n** (default: `yolov6n.hef`) @@ -224,17 +225,16 @@ model: # Alternatively, or as a fallback, provide a custom URL: # path: https://custom-model-url.com/path/to/model.hef ``` + For additional ready-to-use models, please visit: https://github.com/hailo-ai/hailo_model_zoo -Hailo8 supports all models in the Hailo Model Zoo that include HailoRT post-processing. You're welcome to choose any of these pre-configured models for your implementation. +Hailo8 supports all models in the Hailo Model Zoo that include HailoRT post-processing. You're welcome to choose any of these pre-configured models for your implementation. -> **Note:** +> **Note:** > The config.path parameter can accept either a local file path or a URL ending with .hef. When provided, the detector will first check if the path is a local file path. If the file exists locally, it will use it directly. If the file is not found locally or if a URL was provided, it will attempt to download the model from the specified URL. --- - - ## OpenVINO Detector The OpenVINO detector type runs an OpenVINO IR model on AMD and Intel CPUs, Intel GPUs and Intel VPU hardware. To configure an OpenVINO detector, set the `"type"` attribute to `"openvino"`. @@ -340,6 +340,30 @@ model: Note that the labelmap uses a subset of the complete COCO label set that has only 80 objects. +#### D-FINE + +[D-FINE](https://github.com/Peterande/D-FINE) is the [current state of the art](https://paperswithcode.com/sota/real-time-object-detection-on-coco?p=d-fine-redefine-regression-task-in-detrs-as) at the time of writing. The ONNX exported models are supported, but not included by default. See [the models section](#downloading-d-fine-model) for more information on downloading the D-FINE model for use in Frigate. + +After placing the downloaded onnx model in your config/model_cache folder, you can use the following configuration: + +```yaml +detectors: + ov: + type: openvino + device: GPU + +model: + model_type: dfine + width: 640 + height: 640 + input_tensor: nchw + input_dtype: float + path: /config/model_cache/dfine_s_obj2coco.onnx + labelmap_path: /labelmap/coco-80.txt +``` + +Note that the labelmap uses a subset of the complete COCO label set that has only 80 objects. + ## NVidia TensorRT Detector Nvidia GPUs may be used for object detection using the TensorRT libraries. Due to the size of the additional libraries, this detector is only provided in images with the `-tensorrt` tag suffix, e.g. `ghcr.io/blakeblackshear/frigate:stable-tensorrt`. This detector is designed to work with Yolo models for object detection. @@ -529,6 +553,7 @@ $ docker exec -it frigate /bin/bash -c '(unset HSA_OVERRIDE_GFX_VERSION && /opt/ ### Supported Models See [ONNX supported models](#supported-models) for supported models, there are some caveats: + - D-FINE models are not supported - YOLO-NAS models are known to not run well on integrated GPUs @@ -626,12 +651,6 @@ Note that the labelmap uses a subset of the complete COCO label set that has onl [D-FINE](https://github.com/Peterande/D-FINE) is the [current state of the art](https://paperswithcode.com/sota/real-time-object-detection-on-coco?p=d-fine-redefine-regression-task-in-detrs-as) at the time of writing. The ONNX exported models are supported, but not included by default. See [the models section](#downloading-d-fine-model) for more information on downloading the D-FINE model for use in Frigate. -:::warning - -D-FINE is currently not supported on OpenVINO - -::: - After placing the downloaded onnx model in your config/model_cache folder, you can use the following configuration: ```yaml diff --git a/frigate/detectors/plugins/openvino.py b/frigate/detectors/plugins/openvino.py index 0f0b99a1f..75d956500 100644 --- a/frigate/detectors/plugins/openvino.py +++ b/frigate/detectors/plugins/openvino.py @@ -10,7 +10,7 @@ from typing_extensions import Literal from frigate.const import MODEL_CACHE_DIR from frigate.detectors.detection_api import DetectionApi from frigate.detectors.detector_config import BaseDetectorConfig, ModelTypeEnum -from frigate.util.model import post_process_yolov9 +from frigate.util.model import post_process_dfine, post_process_yolov9 logger = logging.getLogger(__name__) @@ -29,6 +29,7 @@ class OvDetector(DetectionApi): ModelTypeEnum.yolonas, ModelTypeEnum.yolov9, ModelTypeEnum.yolox, + ModelTypeEnum.dfine, ] def __init__(self, detector_config: OvDetectorConfig): @@ -163,6 +164,21 @@ class OvDetector(DetectionApi): infer_request = self.interpreter.create_infer_request() # TODO: see if we can use shared_memory=True input_tensor = ov.Tensor(array=tensor_input) + + if self.ov_model_type == ModelTypeEnum.dfine: + infer_request.set_tensor("images", input_tensor) + target_sizes_tensor = ov.Tensor( + np.array([[self.h, self.w]], dtype=np.int64) + ) + infer_request.set_tensor("orig_target_sizes", target_sizes_tensor) + infer_request.infer() + tensor_output = ( + infer_request.get_output_tensor(0).data, + infer_request.get_output_tensor(1).data, + infer_request.get_output_tensor(2).data, + ) + return post_process_dfine(tensor_output, self.w, self.h) + infer_request.infer(input_tensor) detections = np.zeros((20, 6), np.float32) From ac9e24e2ed5bddd1cc8cac92e241abd11fc195f8 Mon Sep 17 00:00:00 2001 From: leccelecce <24962424+leccelecce@users.noreply.github.com> Date: Wed, 19 Mar 2025 12:23:44 +0000 Subject: [PATCH 10/97] Face Library: show name on details (#17245) --- web/public/locales/en/views/faceLibrary.json | 1 + web/src/pages/FaceLibrary.tsx | 4 ++++ 2 files changed, 5 insertions(+) diff --git a/web/public/locales/en/views/faceLibrary.json b/web/public/locales/en/views/faceLibrary.json index 57d78b18a..fd5f50825 100644 --- a/web/public/locales/en/views/faceLibrary.json +++ b/web/public/locales/en/views/faceLibrary.json @@ -3,6 +3,7 @@ "addFace": "Walk through adding a new face to the Face Library." }, "details": { + "person": "Person", "confidence": "Confidence", "face": "Face Details", "faceDesc": "Details for the face and associated object", diff --git a/web/src/pages/FaceLibrary.tsx b/web/src/pages/FaceLibrary.tsx index a9b7dc230..73fd54c7a 100644 --- a/web/src/pages/FaceLibrary.tsx +++ b/web/src/pages/FaceLibrary.tsx @@ -368,6 +368,10 @@ function TrainingGrid({ {t("details.face")} {t("details.faceDesc")} +
+
{t("details.person")}
+
{selectedEvent?.name}
+
{t("details.confidence")} From dda7be99ebe65970d2143b1b87db24516c538b61 Mon Sep 17 00:00:00 2001 From: leccelecce <24962424+leccelecce@users.noreply.github.com> Date: Wed, 19 Mar 2025 12:44:07 +0000 Subject: [PATCH 11/97] Face rec: only consider webp files in /faces and handle_request (#17244) * Face rec: only consider webp files * Allow png/jpg/jpeg as well as webp --- frigate/api/classification.py | 5 ++++- frigate/data_processing/real_time/face.py | 2 +- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/frigate/api/classification.py b/frigate/api/classification.py index df804f34a..b9d714ca3 100644 --- a/frigate/api/classification.py +++ b/frigate/api/classification.py @@ -39,7 +39,10 @@ def get_faces(): face_dict[name] = [] for file in sorted( - os.listdir(face_dir), + filter( + lambda f: (f.lower().endswith((".webp", ".png", ".jpg", ".jpeg"))), + os.listdir(face_dir), + ), key=lambda f: os.path.getctime(os.path.join(face_dir, f)), reverse=True, ): diff --git a/frigate/data_processing/real_time/face.py b/frigate/data_processing/real_time/face.py index acb891449..102913442 100644 --- a/frigate/data_processing/real_time/face.py +++ b/frigate/data_processing/real_time/face.py @@ -484,7 +484,7 @@ class FaceRealTimeProcessor(RealTimeProcessorApi): shutil.move(current_file, new_file) files = sorted( - os.listdir(folder), + filter(lambda f: (f.endswith(".webp")), os.listdir(folder)), key=lambda f: os.path.getctime(os.path.join(folder, f)), reverse=True, ) From 7f966df5a450b7f6244a24e1890bf10a6e4c7ff1 Mon Sep 17 00:00:00 2001 From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> Date: Wed, 19 Mar 2025 07:50:36 -0500 Subject: [PATCH 12/97] Nginx fix: make locales public (#17248) * make locales public * remove cache --- docker/main/rootfs/usr/local/nginx/conf/nginx.conf | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/docker/main/rootfs/usr/local/nginx/conf/nginx.conf b/docker/main/rootfs/usr/local/nginx/conf/nginx.conf index 137d0ec3d..6c60019c7 100644 --- a/docker/main/rootfs/usr/local/nginx/conf/nginx.conf +++ b/docker/main/rootfs/usr/local/nginx/conf/nginx.conf @@ -300,6 +300,11 @@ http { add_header Cache-Control "public"; } + location /locales/ { + access_log off; + add_header Cache-Control "public"; + } + location ~ ^/.*-([A-Za-z0-9]+)\.webmanifest$ { access_log off; expires 1y; From e33fa9659927d732d795632a217adaa033d4a29d Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Wed, 19 Mar 2025 09:02:25 -0600 Subject: [PATCH 13/97] Face recognize api (#17233) * Add api to run face recognition on image * Rework save attempts option * Cleanup mobile object pane buttons * Adjust api signature * Remove param * Cleanup --- frigate/api/classification.py | 16 ++++++++++++ frigate/comms/embeddings_updater.py | 1 + frigate/config/classification.py | 4 +-- frigate/data_processing/real_time/face.py | 25 +++++++++++++++++-- frigate/embeddings/__init__.py | 8 ++++++ .../overlay/detail/SearchDetailDialog.tsx | 4 ++- 6 files changed, 53 insertions(+), 5 deletions(-) diff --git a/frigate/api/classification.py b/frigate/api/classification.py index b9d714ca3..498158ff2 100644 --- a/frigate/api/classification.py +++ b/frigate/api/classification.py @@ -201,6 +201,22 @@ async def register_face(request: Request, name: str, file: UploadFile): ) +@router.post("/faces/recognize") +async def recognize_face(request: Request, file: UploadFile): + if not request.app.frigate_config.face_recognition.enabled: + return JSONResponse( + status_code=400, + content={"message": "Face recognition is not enabled.", "success": False}, + ) + + context: EmbeddingsContext = request.app.embeddings + result = context.recognize_face(await file.read()) + return JSONResponse( + status_code=200 if result.get("success", True) else 400, + content=result, + ) + + @router.post("/faces/{name}/delete", dependencies=[Depends(require_role(["admin"]))]) def deregister_faces(request: Request, name: str, body: dict = None): if not request.app.frigate_config.face_recognition.enabled: diff --git a/frigate/comms/embeddings_updater.py b/frigate/comms/embeddings_updater.py index 61c2331cf..fc35c4665 100644 --- a/frigate/comms/embeddings_updater.py +++ b/frigate/comms/embeddings_updater.py @@ -13,6 +13,7 @@ class EmbeddingsRequestEnum(Enum): embed_description = "embed_description" embed_thumbnail = "embed_thumbnail" generate_search = "generate_search" + recognize_face = "recognize_face" register_face = "register_face" reprocess_face = "reprocess_face" reprocess_plate = "reprocess_plate" diff --git a/frigate/config/classification.py b/frigate/config/classification.py index 30cd12b7c..cbe4880a1 100644 --- a/frigate/config/classification.py +++ b/frigate/config/classification.py @@ -70,8 +70,8 @@ class FaceRecognitionConfig(FrigateBaseModel): min_area: int = Field( default=500, title="Min area of face box to consider running face recognition." ) - save_attempts: bool = Field( - default=True, title="Save images of face detections for training." + save_attempts: int = Field( + default=100, ge=0, title="Number of face attempts to save in the train tab." ) blur_confidence_filter: bool = Field( default=True, title="Apply blur quality filter to face confidence." diff --git a/frigate/data_processing/real_time/face.py b/frigate/data_processing/real_time/face.py index 102913442..ac6fa0f80 100644 --- a/frigate/data_processing/real_time/face.py +++ b/frigate/data_processing/real_time/face.py @@ -28,7 +28,6 @@ logger = logging.getLogger(__name__) MAX_DETECTION_HEIGHT = 1080 -MAX_FACE_ATTEMPTS = 100 MIN_MATCHING_FACES = 2 @@ -407,6 +406,28 @@ class FaceRealTimeProcessor(RealTimeProcessorApi): def handle_request(self, topic, request_data) -> dict[str, any] | None: if topic == EmbeddingsRequestEnum.clear_face_classifier.value: self.__clear_classifier() + elif topic == EmbeddingsRequestEnum.recognize_face.value: + img = cv2.imdecode( + np.frombuffer(base64.b64decode(request_data["image"]), dtype=np.uint8), + cv2.IMREAD_COLOR, + ) + + # detect faces with lower confidence since we expect the face + # to be visible in uploaded images + face_box = self.__detect_face(img, 0.5) + + if not face_box: + return {"message": "No face was detected.", "success": False} + + face = img[face_box[1] : face_box[3], face_box[0] : face_box[2]] + res = self.__classify_face(face) + + if not res: + return {"success": False, "message": "No face was recognized."} + + sub_label, score = res + + return {"success": True, "score": score, "face_name": sub_label} elif topic == EmbeddingsRequestEnum.register_face.value: rand_id = "".join( random.choices(string.ascii_lowercase + string.digits, k=6) @@ -490,7 +511,7 @@ class FaceRealTimeProcessor(RealTimeProcessorApi): ) # delete oldest face image if maximum is reached - if len(files) > MAX_FACE_ATTEMPTS: + if len(files) > self.config.face_recognition.save_attempts: os.unlink(os.path.join(folder, files[-1])) def expire_object(self, object_id: str): diff --git a/frigate/embeddings/__init__.py b/frigate/embeddings/__init__.py index 0a0d7200a..c593a6c0d 100644 --- a/frigate/embeddings/__init__.py +++ b/frigate/embeddings/__init__.py @@ -197,6 +197,14 @@ class EmbeddingsContext: }, ) + def recognize_face(self, image_data: bytes) -> dict[str, any]: + return self.requestor.send_data( + EmbeddingsRequestEnum.recognize_face.value, + { + "image": base64.b64encode(image_data).decode("ASCII"), + }, + ) + def get_face_ids(self, name: str) -> list[str]: sql_query = f""" SELECT diff --git a/web/src/components/overlay/detail/SearchDetailDialog.tsx b/web/src/components/overlay/detail/SearchDetailDialog.tsx index b22eb9a4c..afa428eda 100644 --- a/web/src/components/overlay/detail/SearchDetailDialog.tsx +++ b/web/src/components/overlay/detail/SearchDetailDialog.tsx @@ -717,7 +717,9 @@ function ObjectDetailsTab({ draggable={false} src={`${apiHost}api/events/${search.id}/thumbnail.webp`} /> -
+
{config?.semantic_search.enabled && search.data.type == "object" && (
+
+ +

{config.model.plus.id}

+
-
+
From d84fd324b28e28fdd93d2c53b676a3afdedc7829 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Thu, 20 Mar 2025 07:48:50 -0600 Subject: [PATCH 22/97] Actually set the configured face and lpr settings (#17272) --- web/src/views/settings/ClassificationSettingsView.tsx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/web/src/views/settings/ClassificationSettingsView.tsx b/web/src/views/settings/ClassificationSettingsView.tsx index 9ecb844ee..3ff466f13 100644 --- a/web/src/views/settings/ClassificationSettingsView.tsx +++ b/web/src/views/settings/ClassificationSettingsView.tsx @@ -136,7 +136,7 @@ export default function ClassificationSettingsView({ axios .put( - `config/set?semantic_search.enabled=${classificationSettings.search.enabled ? "True" : "False"}&semantic_search.reindex=${classificationSettings.search.reindex ? "True" : "False"}&semantic_search.model_size=${classificationSettings.search.model_size}`, + `config/set?semantic_search.enabled=${classificationSettings.search.enabled ? "True" : "False"}&semantic_search.reindex=${classificationSettings.search.reindex ? "True" : "False"}&semantic_search.model_size=${classificationSettings.search.model_size}&face_recognition.enabled=${classificationSettings.face.enabled}&lpr.enabled=${classificationSettings.lpr.enabled}`, { requires_restart: 0, }, @@ -172,7 +172,7 @@ export default function ClassificationSettingsView({ .finally(() => { setIsLoading(false); }); - }, [updateConfig, classificationSettings.search, t]); + }, [updateConfig, classificationSettings, t]); const onCancel = useCallback(() => { setClassificationSettings(origSearchSettings); From d4d5c4aac8496b2646ad13f1bf6a1132c215502a Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Thu, 20 Mar 2025 10:20:44 -0600 Subject: [PATCH 23/97] Disabled cameras fixing (#17273) * Fix case where objects are returned as null * Fix enabled status not being persisted * Use config as source of truth when refreshed * Ensure camera always have config object updated * Cleanup typing --- frigate/camera/state.py | 1 - frigate/comms/dispatcher.py | 6 +++++- frigate/video.py | 4 +++- web/src/api/ws.tsx | 9 ++++----- web/src/components/player/LivePlayer.tsx | 5 ++++- web/src/hooks/use-camera-activity.ts | 17 ++++++++--------- web/src/types/ws.ts | 13 ++++++++++++- 7 files changed, 36 insertions(+), 19 deletions(-) diff --git a/frigate/camera/state.py b/frigate/camera/state.py index 0e02c6c14..f2469dffd 100644 --- a/frigate/camera/state.py +++ b/frigate/camera/state.py @@ -306,7 +306,6 @@ class CameraState: # TODO: can i switch to looking this up and only changing when an event ends? # maintain best objects camera_activity: dict[str, list[any]] = { - "enabled": True, "motion": len(motion_boxes) > 0, "objects": [], } diff --git a/frigate/comms/dispatcher.py b/frigate/comms/dispatcher.py index 586b70cbb..4c0b0a8ff 100644 --- a/frigate/comms/dispatcher.py +++ b/frigate/comms/dispatcher.py @@ -164,8 +164,12 @@ class Dispatcher: def handle_on_connect(): camera_status = self.camera_activity.last_camera_activity.copy() + cameras_with_status = camera_status.keys() + + for camera in self.config.cameras.keys(): + if camera not in cameras_with_status: + camera_status[camera] = {} - for camera in camera_status.keys(): camera_status[camera]["config"] = { "detect": self.config.cameras[camera].detect.enabled, "enabled": self.config.cameras[camera].enabled, diff --git a/frigate/video.py b/frigate/video.py index abf490a72..91e92fee1 100755 --- a/frigate/video.py +++ b/frigate/video.py @@ -113,8 +113,10 @@ def capture_frames( def get_enabled_state(): """Fetch the latest enabled state from ZMQ.""" _, config_data = config_subscriber.check_for_update() + if config_data: - return config_data.enabled + config.enabled = config_data.enabled + return config.enabled while not stop_event.is_set(): diff --git a/web/src/api/ws.tsx b/web/src/api/ws.tsx index 5eedcdbcd..3e9c8c14f 100644 --- a/web/src/api/ws.tsx +++ b/web/src/api/ws.tsx @@ -44,7 +44,8 @@ function useValue(): useValueReturn { return; } - const cameraActivity: { [key: string]: object } = JSON.parse(activityValue); + const cameraActivity: { [key: string]: FrigateCameraState } = + JSON.parse(activityValue); if (Object.keys(cameraActivity).length === 0) { return; @@ -64,9 +65,7 @@ function useValue(): useValueReturn { autotracking, alerts, detections, - } = - // @ts-expect-error we know this is correct - state["config"]; + } = state["config"]; cameraStates[`${name}/recordings/state`] = record ? "ON" : "OFF"; cameraStates[`${name}/enabled/state`] = enabled ? "ON" : "OFF"; cameraStates[`${name}/detect/state`] = detect ? "ON" : "OFF"; @@ -174,7 +173,7 @@ export function useEnabledState(camera: string): { value: { payload }, send, } = useWs(`${camera}/enabled/state`, `${camera}/enabled/set`); - return { payload: (payload ?? "ON") as ToggleableSetting, send }; + return { payload: payload as ToggleableSetting, send }; } export function useDetectState(camera: string): { diff --git a/web/src/components/player/LivePlayer.tsx b/web/src/components/player/LivePlayer.tsx index b73e7991c..51af9877d 100644 --- a/web/src/components/player/LivePlayer.tsx +++ b/web/src/components/player/LivePlayer.tsx @@ -385,7 +385,10 @@ export default function LivePlayer({
([]); + const [objects, setObjects] = useState([]); // init camera activity @@ -54,7 +54,7 @@ export function useCameraActivity( // handle camera activity const hasActiveObjects = useMemo( - () => objects?.filter((obj) => !obj?.stationary)?.length > 0, + () => (objects || []).filter((obj) => !obj?.stationary)?.length > 0, [objects], ); @@ -81,11 +81,10 @@ export function useCameraActivity( return; } - const updatedEventIndex = objects.findIndex( - (obj) => obj.id === updatedEvent.after.id, - ); + const updatedEventIndex = + objects?.findIndex((obj) => obj.id === updatedEvent.after.id) ?? -1; - let newObjects: ObjectType[] = [...objects]; + let newObjects: ObjectType[] = [...(objects ?? [])]; if (updatedEvent.type === "end") { if (updatedEventIndex !== -1) { @@ -104,10 +103,10 @@ export function useCameraActivity( score: updatedEvent.after.score, sub_label: updatedEvent.after.sub_label?.[0] ?? "", }; - newObjects = [...objects, newActiveObject]; + newObjects = [...(objects ?? []), newActiveObject]; } } else { - const newObjects = [...objects]; + const newObjects = [...(objects ?? [])]; let label = updatedEvent.after.label; @@ -158,7 +157,7 @@ export function useCameraActivity( ? detectingMotion === "ON" : updatedCameraState?.motion === true : false, - objects: isCameraEnabled ? objects : [], + objects: isCameraEnabled ? (objects ?? []) : [], offline, }; } diff --git a/web/src/types/ws.ts b/web/src/types/ws.ts index 2590d45a7..3badd961d 100644 --- a/web/src/types/ws.ts +++ b/web/src/types/ws.ts @@ -52,7 +52,18 @@ export type ObjectType = { }; export interface FrigateCameraState { - enabled: boolean; + config: { + enabled: boolean; + detect: boolean; + snapshots: boolean; + record: boolean; + audio: boolean; + notifications: boolean; + notifications_suspended: number; + autotracking: boolean; + alerts: boolean; + detections: boolean; + }; motion: boolean; objects: ObjectType[]; } From be56305b4e4cb79162f6d0c5e75969d5a48f8483 Mon Sep 17 00:00:00 2001 From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> Date: Thu, 20 Mar 2025 11:37:40 -0500 Subject: [PATCH 24/97] Small tweaks (#17275) * Fix missing i18n key and make small UI tweak for disabled cameras * simplify colors --- web/src/components/player/LivePlayer.tsx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/web/src/components/player/LivePlayer.tsx b/web/src/components/player/LivePlayer.tsx index 51af9877d..1989a55ab 100644 --- a/web/src/components/player/LivePlayer.tsx +++ b/web/src/components/player/LivePlayer.tsx @@ -410,9 +410,9 @@ export default function LivePlayer({

streamOffline.desc @@ -423,7 +423,7 @@ export default function LivePlayer({ )} {!cameraEnabled && ( -

+

From 0308a88111974dbf3df5eccda84afa6ce26f54f8 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Thu, 20 Mar 2025 10:51:08 -0600 Subject: [PATCH 25/97] Add MQTT topic to expose current camera review status (#17276) * Add MQTT topic to expose current camera review status * Formatting --- docs/docs/integrations/mqtt.md | 4 ++++ frigate/review/maintainer.py | 7 +++++++ 2 files changed, 11 insertions(+) diff --git a/docs/docs/integrations/mqtt.md b/docs/docs/integrations/mqtt.md index abbc12974..4139efaf3 100644 --- a/docs/docs/integrations/mqtt.md +++ b/docs/docs/integrations/mqtt.md @@ -305,6 +305,10 @@ Topic to adjust motion contour area for a camera. Expected value is an integer. Topic with current motion contour area for a camera. Published value is an integer. +### `frigate//review_status` + +Topic with current activity status of the camera. Possible values are `NONE`, `DETECTION`, or `ALERT`. + ### `frigate//ptz` Topic to send PTZ commands to camera. diff --git a/frigate/review/maintainer.py b/frigate/review/maintainer.py index 3819f4cb4..d9e8bdaa9 100644 --- a/frigate/review/maintainer.py +++ b/frigate/review/maintainer.py @@ -181,6 +181,9 @@ class ReviewSegmentMaintainer(threading.Thread): } ), ) + self.requestor.send_data( + f"{segment.camera}/review_status", segment.severity.value.upper() + ) def _publish_segment_update( self, @@ -206,6 +209,9 @@ class ReviewSegmentMaintainer(threading.Thread): } ), ) + self.requestor.send_data( + f"{segment.camera}/review_status", segment.severity.value.upper() + ) def _publish_segment_end( self, @@ -225,6 +231,7 @@ class ReviewSegmentMaintainer(threading.Thread): } ), ) + self.requestor.send_data(f"{segment.camera}/review_status", "NONE") self.active_review_segments[segment.camera] = None def end_segment(self, camera: str) -> None: From e396043f794bbd6e6fdf06eb8fa178a98b1392bb Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Thu, 20 Mar 2025 12:38:18 -0600 Subject: [PATCH 26/97] Don't log go2rtc failure when camera is disabled (#17277) --- frigate/api/app.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/frigate/api/app.py b/frigate/api/app.py index f19070a3a..4f45678d0 100644 --- a/frigate/api/app.py +++ b/frigate/api/app.py @@ -80,12 +80,14 @@ def go2rtc_streams(): @router.get("/go2rtc/streams/{camera_name}") -def go2rtc_camera_stream(camera_name: str): +def go2rtc_camera_stream(request: Request, camera_name: str): r = requests.get( f"http://127.0.0.1:1984/api/streams?src={camera_name}&video=all&audio=allµphone" ) if not r.ok: - logger.error("Failed to fetch streams from go2rtc") + if request.app.frigate_config.cameras.get(camera_name, {}).get("enabled", True): + logger.error("Failed to fetch streams from go2rtc") + return JSONResponse( content=({"success": False, "message": "Error fetching stream data"}), status_code=500, From 060659044ee6fb91ddd5c314170e33170d187a39 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Thu, 20 Mar 2025 14:09:57 -0600 Subject: [PATCH 27/97] Quick fixes (#17278) * Fix check * Fix default loading state --- frigate/api/app.py | 4 +++- web/src/hooks/use-camera-activity.ts | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/frigate/api/app.py b/frigate/api/app.py index 4f45678d0..0d391035e 100644 --- a/frigate/api/app.py +++ b/frigate/api/app.py @@ -85,7 +85,9 @@ def go2rtc_camera_stream(request: Request, camera_name: str): f"http://127.0.0.1:1984/api/streams?src={camera_name}&video=all&audio=allµphone" ) if not r.ok: - if request.app.frigate_config.cameras.get(camera_name, {}).get("enabled", True): + camera_config = request.app.frigate_config.cameras.get(camera_name) + + if camera_config and camera_config.enabled: logger.error("Failed to fetch streams from go2rtc") return JSONResponse( diff --git a/web/src/hooks/use-camera-activity.ts b/web/src/hooks/use-camera-activity.ts index 96b5f6ea5..b81ad54b0 100644 --- a/web/src/hooks/use-camera-activity.ts +++ b/web/src/hooks/use-camera-activity.ts @@ -147,7 +147,7 @@ export function useCameraActivity( return cameras[camera.name].camera_fps == 0 && stats["service"].uptime > 60; }, [camera, stats]); - const isCameraEnabled = cameraEnabled ? cameraEnabled === "ON" : undefined; + const isCameraEnabled = cameraEnabled ? cameraEnabled === "ON" : true; return { enabled: isCameraEnabled, From 08cf0def6e50b8101233ad49dd97075e17500b3e Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Fri, 21 Mar 2025 11:47:32 -0600 Subject: [PATCH 28/97] Face tweaks (#17290) * Ensure doesn't fail due to missing dir * Remove redundant settings from tabs * Adjust selection method for mobile * Fix button descendent error * Ensure train is option on mobile * Cleanup face images * Cleanup --- frigate/api/classification.py | 3 + frigate/data_processing/real_time/face.py | 1 + web/src/pages/FaceLibrary.tsx | 164 +++++++++++++++------- web/src/pages/Settings.tsx | 35 +++-- web/src/types/face.ts | 4 + 5 files changed, 137 insertions(+), 70 deletions(-) diff --git a/frigate/api/classification.py b/frigate/api/classification.py index 498158ff2..d3ee9c3d9 100644 --- a/frigate/api/classification.py +++ b/frigate/api/classification.py @@ -30,6 +30,9 @@ router = APIRouter(tags=[Tags.events]) def get_faces(): face_dict: dict[str, list[str]] = {} + if not os.path.exists(FACE_DIR): + return JSONResponse(status_code=200, content={}) + for name in os.listdir(FACE_DIR): face_dir = os.path.join(FACE_DIR, name) diff --git a/frigate/data_processing/real_time/face.py b/frigate/data_processing/real_time/face.py index 61baba98e..ce2ea85c1 100644 --- a/frigate/data_processing/real_time/face.py +++ b/frigate/data_processing/real_time/face.py @@ -506,6 +506,7 @@ class FaceRealTimeProcessor(RealTimeProcessorApi): if self.config.face_recognition.save_attempts: # write face to library folder = os.path.join(FACE_DIR, "train") + os.makedirs(folder, exist_ok=True) new_file = os.path.join( folder, f"{id}-{sub_label}-{score}-{face_score}.webp" ) diff --git a/web/src/pages/FaceLibrary.tsx b/web/src/pages/FaceLibrary.tsx index 73fd54c7a..0ac937283 100644 --- a/web/src/pages/FaceLibrary.tsx +++ b/web/src/pages/FaceLibrary.tsx @@ -32,11 +32,11 @@ import { useFormattedTimestamp } from "@/hooks/use-date-utils"; import useKeyboardListener from "@/hooks/use-keyboard-listener"; import useOptimisticState from "@/hooks/use-optimistic-state"; import { cn } from "@/lib/utils"; -import { RecognizedFaceData } from "@/types/face"; +import { FaceLibraryData, RecognizedFaceData } from "@/types/face"; import { FrigateConfig } from "@/types/frigateConfig"; import axios from "axios"; import { useCallback, useEffect, useMemo, useRef, useState } from "react"; -import { isDesktop } from "react-device-detect"; +import { isDesktop, isMobile } from "react-device-detect"; import { useTranslation } from "react-i18next"; import { LuImagePlus, LuRefreshCw, LuScanFace, LuTrash2 } from "react-icons/lu"; import { toast } from "sonner"; @@ -55,11 +55,11 @@ export default function FaceLibrary() { const [page, setPage] = useState(); const [pageToggle, setPageToggle] = useOptimisticState(page, setPage, 100); - const tabsRef = useRef(null); // face data - const { data: faceData, mutate: refreshFaces } = useSWR("faces"); + const { data: faceData, mutate: refreshFaces } = + useSWR("faces"); const faces = useMemo( () => @@ -233,50 +233,13 @@ export default function FaceLibrary() { />

- -
- { - if (value) { - setPageToggle(value); - } - }} - > - {trainImages.length > 0 && ( - <> - -
{t("train.title")}
-
-
|
- - )} - - {Object.values(faces).map((item) => ( - -
- {item} ({faceData[item].length}) -
-
- ))} -
- -
-
+ {selectedFaces?.length > 0 ? (
+ + + {trainImages.length > 0 && ( + setPageToggle("train")} + > +
{t("train.title")}
+
+ )} + {Object.values(faces).map((face) => ( + setPageToggle(face)} + > + {face} ({faceData?.[face].length}) + + ))} +
+ + ); +} + type TrainingGridProps = { config: FrigateConfig; attemptImages: string[]; @@ -536,7 +588,7 @@ function FaceAttempt({
- + @@ -579,7 +631,12 @@ type FaceGridProps = { }; function FaceGrid({ faceImages, pageToggle, onDelete }: FaceGridProps) { return ( -
+
{faceImages.map((image: string) => ( -
+
diff --git a/web/src/pages/Settings.tsx b/web/src/pages/Settings.tsx index 6ccda34f3..b00d3255c 100644 --- a/web/src/pages/Settings.tsx +++ b/web/src/pages/Settings.tsx @@ -47,9 +47,9 @@ import { useIsAdmin } from "@/hooks/use-is-admin"; import { useTranslation } from "react-i18next"; const allSettingsViews = [ - "uiSettings", - "classificationSettings", - "cameraSettings", + "ui", + "classification", + "cameras", "masksAndZones", "motionTuner", "debug", @@ -61,7 +61,7 @@ type SettingsType = (typeof allSettingsViews)[number]; export default function Settings() { const { t } = useTranslation(["views/settings"]); - const [page, setPage] = useState("uiSettings"); + const [page, setPage] = useState("ui"); const [pageToggle, setPageToggle] = useOptimisticState(page, setPage, 100); const tabsRef = useRef(null); @@ -73,7 +73,7 @@ export default function Settings() { const isAdmin = useIsAdmin(); - const allowedViewsForViewer: SettingsType[] = ["uiSettings", "debug"]; + const allowedViewsForViewer: SettingsType[] = ["ui", "debug"]; const visibleSettingsViews = !isAdmin ? allowedViewsForViewer : allSettingsViews; @@ -135,10 +135,7 @@ export default function Settings() { const firstEnabledCamera = cameras.find((cam) => cameraEnabledStates[cam.name]) || cameras[0]; setSelectedCamera(firstEnabledCamera.name); - } else if ( - !cameraEnabledStates[selectedCamera] && - page !== "cameraSettings" - ) { + } else if (!cameraEnabledStates[selectedCamera] && page !== "cameras") { // Switch to first enabled camera if current one is disabled, unless on "camera settings" page const firstEnabledCamera = cameras.find((cam) => cameraEnabledStates[cam.name]) || cameras[0]; @@ -167,8 +164,8 @@ export default function Settings() { useSearchEffect("page", (page: string) => { if (allSettingsViews.includes(page as SettingsType)) { // Restrict viewer to UI settings - if (!isAdmin && !["uiSettings", "debug"].includes(page)) { - setPage("uiSettings"); + if (!isAdmin && !["ui", "debug"].includes(page)) { + setPage("ui"); } else { setPage(page as SettingsType); } @@ -203,8 +200,8 @@ export default function Settings() { onValueChange={(value: SettingsType) => { if (value) { // Restrict viewer navigation - if (!isAdmin && !["uiSettings", "debug"].includes(value)) { - setPageToggle("uiSettings"); + if (!isAdmin && !["ui", "debug"].includes(value)) { + setPageToggle("ui"); } else { setPageToggle(value); } @@ -214,7 +211,7 @@ export default function Settings() { {visibleSettingsViews.map((item) => ( {(page == "debug" || - page == "cameraSettings" || + page == "cameras" || page == "masksAndZones" || page == "motionTuner") && (
@@ -251,14 +248,14 @@ export default function Settings() { )}
- {page == "uiSettings" && } - {page == "classificationSettings" && ( + {page == "ui" && } + {page == "classification" && ( )} {page == "debug" && ( )} - {page == "cameraSettings" && ( + {page == "cameras" && ( {allCameras.map((item) => { const isEnabled = cameraEnabledStates[item.name]; - const isCameraSettingsPage = currentPage === "cameraSettings"; + const isCameraSettingsPage = currentPage === "cameras"; return ( Date: Fri, 21 Mar 2025 16:37:09 -0600 Subject: [PATCH 29/97] Sub label snapshot (#17296) * Publish sub label for logos * Cleanup check --- frigate/track/object_processing.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/frigate/track/object_processing.py b/frigate/track/object_processing.py index b18ad97fa..4593da5a4 100644 --- a/frigate/track/object_processing.py +++ b/frigate/track/object_processing.py @@ -172,6 +172,16 @@ class TrackedObjectProcessor(threading.Thread): retain=True, ) + if obj.obj_data.get("sub_label"): + sub_label = obj.obj_data["sub_label"][0] + + if sub_label in self.config.model.all_attribute_logos: + self.dispatcher.publish( + f"{camera}/{sub_label}/snapshot", + jpg_bytes, + retain=True, + ) + def camera_activity(camera, activity): last_activity = self.camera_activity.get(camera) From 48e4c44b32619c01d683ff2ca3e742057e355b03 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Fri, 21 Mar 2025 18:55:46 -0600 Subject: [PATCH 30/97] Add support for RF-DETR models (#17298) * Add support for rf-detr models * Add docs for rf-detr model * Cleanup --- docs/docs/configuration/object_detectors.md | 34 +++++++++++- frigate/detectors/detector_config.py | 3 +- frigate/detectors/plugins/onnx.py | 5 +- frigate/util/model.py | 60 ++++++++++++++++++++- 4 files changed, 97 insertions(+), 5 deletions(-) diff --git a/docs/docs/configuration/object_detectors.md b/docs/docs/configuration/object_detectors.md index 8cc6b2f1e..71716de6a 100644 --- a/docs/docs/configuration/object_detectors.md +++ b/docs/docs/configuration/object_detectors.md @@ -342,7 +342,7 @@ Note that the labelmap uses a subset of the complete COCO label set that has onl #### D-FINE -[D-FINE](https://github.com/Peterande/D-FINE) is the [current state of the art](https://paperswithcode.com/sota/real-time-object-detection-on-coco?p=d-fine-redefine-regression-task-in-detrs-as) at the time of writing. The ONNX exported models are supported, but not included by default. See [the models section](#downloading-d-fine-model) for more information on downloading the D-FINE model for use in Frigate. +[D-FINE](https://github.com/Peterande/D-FINE) is a DETR based model. The ONNX exported models are supported, but not included by default. See [the models section](#downloading-d-fine-model) for more information on downloading the D-FINE model for use in Frigate. After placing the downloaded onnx model in your config/model_cache folder, you can use the following configuration: @@ -647,9 +647,29 @@ model: Note that the labelmap uses a subset of the complete COCO label set that has only 80 objects. +#### RF-DETR + +[RF-DETR](https://github.com/roboflow/rf-detr) is a DETR based model. The ONNX exported models are supported, but not included by default. See [the models section](#downloading-rf-detr-model) for more informatoin on downloading the RF-DETR model for use in Frigate. + +After placing the downloaded onnx model in your `config/model_cache` folder, you can use the following configuration: + +``` +detectors: + onnx: + type: onnx + +model: + model_type: rfdetr + width: 560 + height: 560 + input_tensor: nchw + input_dtype: float + path: /config/model_cache/rfdetr.onnx +``` + #### D-FINE -[D-FINE](https://github.com/Peterande/D-FINE) is the [current state of the art](https://paperswithcode.com/sota/real-time-object-detection-on-coco?p=d-fine-redefine-regression-task-in-detrs-as) at the time of writing. The ONNX exported models are supported, but not included by default. See [the models section](#downloading-d-fine-model) for more information on downloading the D-FINE model for use in Frigate. +[D-FINE](https://github.com/Peterande/D-FINE) is a DETR based model. The ONNX exported models are supported, but not included by default. See [the models section](#downloading-d-fine-model) for more information on downloading the D-FINE model for use in Frigate. After placing the downloaded onnx model in your config/model_cache folder, you can use the following configuration: @@ -873,6 +893,16 @@ Make sure you change the batch size to 1 before exporting. ::: +### Download RF-DETR Model + +To export as ONNX: + +1. `pip3 install rfdetr` +2. `python` +3. `from rfdetr import RFDETRBase` +4. `x = RFDETRBase()` +5. `x.export()` + ### Downloading YOLO-NAS Model You can build and download a compatible model with pre-trained weights using [this notebook](https://github.com/blakeblackshear/frigate/blob/dev/notebooks/YOLO_NAS_Pretrained_Export.ipynb) [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/blakeblackshear/frigate/blob/dev/notebooks/YOLO_NAS_Pretrained_Export.ipynb). diff --git a/frigate/detectors/detector_config.py b/frigate/detectors/detector_config.py index be12e7fcc..ce7738493 100644 --- a/frigate/detectors/detector_config.py +++ b/frigate/detectors/detector_config.py @@ -33,11 +33,12 @@ class InputDTypeEnum(str, Enum): class ModelTypeEnum(str, Enum): + dfine = "dfine" + rfdetr = "rfdetr" ssd = "ssd" yolox = "yolox" yolov9 = "yolov9" yolonas = "yolonas" - dfine = "dfine" yologeneric = "yolo-generic" diff --git a/frigate/detectors/plugins/onnx.py b/frigate/detectors/plugins/onnx.py index d94b4660f..2679185a9 100644 --- a/frigate/detectors/plugins/onnx.py +++ b/frigate/detectors/plugins/onnx.py @@ -12,6 +12,7 @@ from frigate.detectors.detector_config import ( from frigate.util.model import ( get_ort_providers, post_process_dfine, + post_process_rfdetr, post_process_yolov9, ) @@ -73,7 +74,9 @@ class ONNXDetector(DetectionApi): model_input_name = self.model.get_inputs()[0].name tensor_output = self.model.run(None, {model_input_name: tensor_input}) - if self.onnx_model_type == ModelTypeEnum.yolonas: + if self.onnx_model_type == ModelTypeEnum.rfdetr: + return post_process_rfdetr(tensor_output) + elif self.onnx_model_type == ModelTypeEnum.yolonas: predictions = tensor_output[0] detections = np.zeros((20, 6), np.float32) diff --git a/frigate/util/model.py b/frigate/util/model.py index d96493ee6..19b3b1bf5 100644 --- a/frigate/util/model.py +++ b/frigate/util/model.py @@ -13,7 +13,11 @@ logger = logging.getLogger(__name__) ### Post Processing -def post_process_dfine(tensor_output: np.ndarray, width, height) -> np.ndarray: + + +def post_process_dfine( + tensor_output: np.ndarray, width: int, height: int +) -> np.ndarray: class_ids = tensor_output[0][tensor_output[2] > 0.4] boxes = tensor_output[1][tensor_output[2] > 0.4] scores = tensor_output[2][tensor_output[2] > 0.4] @@ -41,6 +45,60 @@ def post_process_dfine(tensor_output: np.ndarray, width, height) -> np.ndarray: return detections +def post_process_rfdetr(tensor_output: list[np.ndarray, np.ndarray]) -> np.ndarray: + boxes = tensor_output[0] + raw_scores = tensor_output[1] + + # apply soft max to scores + exp = np.exp(raw_scores - np.max(raw_scores, axis=-1, keepdims=True)) + all_scores = exp / np.sum(exp, axis=-1, keepdims=True) + + # get highest scoring class from every detection + scores = np.max(all_scores[0, :, 1:], axis=-1) + labels = np.argmax(all_scores[0, :, 1:], axis=-1) + + idxs = scores > 0.4 + filtered_boxes = boxes[0, idxs] + filtered_scores = scores[idxs] + filtered_labels = labels[idxs] + + # convert boxes from [x_center, y_center, width, height] + x_center, y_center, w, h = ( + filtered_boxes[:, 0], + filtered_boxes[:, 1], + filtered_boxes[:, 2], + filtered_boxes[:, 3], + ) + x_min = x_center - w / 2 + y_min = y_center - h / 2 + x_max = x_center + w / 2 + y_max = y_center + h / 2 + filtered_boxes = np.stack([x_min, y_min, x_max, y_max], axis=-1) + + # apply nms + indices = cv2.dnn.NMSBoxes( + filtered_boxes, filtered_scores, score_threshold=0.4, nms_threshold=0.4 + ) + detections = np.zeros((20, 6), np.float32) + + for i, (bbox, confidence, class_id) in enumerate( + zip(filtered_boxes[indices], filtered_scores[indices], filtered_labels[indices]) + ): + if i == 20: + break + + detections[i] = [ + class_id, + confidence, + bbox[1], + bbox[0], + bbox[3], + bbox[2], + ] + + return detections + + def post_process_yolov9(predictions: np.ndarray, width, height) -> np.ndarray: predictions = np.squeeze(predictions).T scores = np.max(predictions[:, 4:], axis=1) From d32949017b88f62c5ee4bd88a010c763faa0f4bb Mon Sep 17 00:00:00 2001 From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> Date: Sat, 22 Mar 2025 07:38:33 -0500 Subject: [PATCH 31/97] Bugfixes and docs tweaks (#17307) * ensure config file is updated with booleans instead of strings * catch onvif error * ensure object type is available as a ptz tracker * update live view docs --- docs/docs/configuration/live.md | 6 ++++-- frigate/ptz/onvif.py | 6 +++++- frigate/track/norfair_tracker.py | 1 + web/src/views/settings/ClassificationSettingsView.tsx | 2 +- 4 files changed, 11 insertions(+), 4 deletions(-) diff --git a/docs/docs/configuration/live.md b/docs/docs/configuration/live.md index 42809739a..494413682 100644 --- a/docs/docs/configuration/live.md +++ b/docs/docs/configuration/live.md @@ -203,9 +203,9 @@ Note that disabling a camera through the config file (`enabled: False`) removes Frigate intelligently selects the live streaming technology based on a number of factors (user-selected modes like two-way talk, camera settings, browser capabilities, available bandwidth) and prioritizes showing an actual up-to-date live view of your camera's stream as quickly as possible. - When you have go2rtc configured, Live view initially attempts to load and play back your stream with a clearer, fluent stream technology (MSE). An initial timeout, a low bandwidth condition that would cause buffering of the stream, or decoding errors in the stream will cause Frigate to switch to the stream defined by the `detect` role, using the jsmpeg format. This is what the UI labels as "low bandwidth mode". On Live dashboards, the mode will automatically reset when smart streaming is configured and activity stops. You can also try using the _Reset_ button to force a reload of your stream. + When you have go2rtc configured, Live view initially attempts to load and play back your stream with a clearer, fluent stream technology (MSE). An initial timeout, a low bandwidth condition that would cause buffering of the stream, or decoding errors in the stream will cause Frigate to switch to the stream defined by the `detect` role, using the jsmpeg format. This is what the UI labels as "low bandwidth mode". On Live dashboards, the mode will automatically reset when smart streaming is configured and activity stops. Continuous streaming mode does not have an automatic reset mechanism, but you can use the _Reset_ option to force a reload of your stream. - If you are still experiencing Frigate falling back to low bandwidth mode, you may need to adjust your camera's settings per the recommendations above or ensure you have enough bandwidth available. + If you are still experiencing Frigate falling back to low bandwidth mode, you may need to adjust your camera's settings per the (recommendations above)[#camera_settings_recommendations]. 3. **It doesn't seem like my cameras are streaming on the Live dashboard. Why?** @@ -221,6 +221,8 @@ Note that disabling a camera through the config file (`enabled: False`) removes This static image is pulled from the stream defined in your config with the `detect` role. When activity is detected, images from the `detect` stream immediately begin updating at ~5 frames per second so you can see the activity until the live player is loaded and begins playing. This usually only takes a second or two. If the live player times out, buffers, or has streaming errors, the jsmpeg player is loaded and plays a video-only stream from the `detect` role. When activity ends, the players are destroyed and a static image is displayed until activity is detected again, and the process repeats. + Smart streaming depends on having your camera's motion `threshold` and `contour_area` config values dialed in. Use the Motion Tuner in Settings in the UI to tune these values in real-time. + This is Frigate's default and recommended setting because it results in a significant bandwidth savings, especially for high resolution cameras. 6. **I have unmuted some cameras on my dashboard, but I do not hear sound. Why?** diff --git a/frigate/ptz/onvif.py b/frigate/ptz/onvif.py index dea7f5b77..eec57dbac 100644 --- a/frigate/ptz/onvif.py +++ b/frigate/ptz/onvif.py @@ -86,7 +86,11 @@ class OnvifController: async def _init_onvif(self, camera_name: str) -> bool: onvif: ONVIFCamera = self.cams[camera_name]["onvif"] - await onvif.update_xaddrs() + try: + await onvif.update_xaddrs() + except Exception as e: + logger.error(f"Onvif connection failed for {camera_name}: {e}") + return False # create init services media: ONVIFService = await onvif.create_media_service() diff --git a/frigate/track/norfair_tracker.py b/frigate/track/norfair_tracker.py index db17f9313..3487aa8c0 100644 --- a/frigate/track/norfair_tracker.py +++ b/frigate/track/norfair_tracker.py @@ -246,6 +246,7 @@ class NorfairTracker(ObjectTracker): "ptz" if self.camera_config.onvif.autotracking.enabled_in_config and object_type in self.camera_config.onvif.autotracking.track + and object_type in self.ptz_object_type_configs.keys() else "static" ) if object_type in self.trackers: diff --git a/web/src/views/settings/ClassificationSettingsView.tsx b/web/src/views/settings/ClassificationSettingsView.tsx index 3ff466f13..dac136e0c 100644 --- a/web/src/views/settings/ClassificationSettingsView.tsx +++ b/web/src/views/settings/ClassificationSettingsView.tsx @@ -136,7 +136,7 @@ export default function ClassificationSettingsView({ axios .put( - `config/set?semantic_search.enabled=${classificationSettings.search.enabled ? "True" : "False"}&semantic_search.reindex=${classificationSettings.search.reindex ? "True" : "False"}&semantic_search.model_size=${classificationSettings.search.model_size}&face_recognition.enabled=${classificationSettings.face.enabled}&lpr.enabled=${classificationSettings.lpr.enabled}`, + `config/set?semantic_search.enabled=${classificationSettings.search.enabled ? "True" : "False"}&semantic_search.reindex=${classificationSettings.search.reindex ? "True" : "False"}&semantic_search.model_size=${classificationSettings.search.model_size}&face_recognition.enabled=${classificationSettings.face.enabled ? "True" : "False"}&lpr.enabled=${classificationSettings.lpr.enabled ? "True" : "False"}`, { requires_restart: 0, }, From 17e14cefd931c12733bf3aefc532f6ff38cf941f Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Sat, 22 Mar 2025 12:58:27 -0600 Subject: [PATCH 32/97] Various fixes & tweaks (#17308) * Catch case where returned face box is invalid * Update detector docs * Add note for customizing rfdetr resolution --- docs/docs/configuration/object_detectors.md | 22 ++++++++++++++++++--- frigate/data_processing/real_time/face.py | 6 +++++- 2 files changed, 24 insertions(+), 4 deletions(-) diff --git a/docs/docs/configuration/object_detectors.md b/docs/docs/configuration/object_detectors.md index 71716de6a..174343ef4 100644 --- a/docs/docs/configuration/object_detectors.md +++ b/docs/docs/configuration/object_detectors.md @@ -344,6 +344,12 @@ Note that the labelmap uses a subset of the complete COCO label set that has onl [D-FINE](https://github.com/Peterande/D-FINE) is a DETR based model. The ONNX exported models are supported, but not included by default. See [the models section](#downloading-d-fine-model) for more information on downloading the D-FINE model for use in Frigate. +:::warning + +Currently D-FINE models only run on OpenVINO in CPU mode, GPUs currently fail to compile the model + +::: + After placing the downloaded onnx model in your config/model_cache folder, you can use the following configuration: ```yaml @@ -653,7 +659,7 @@ Note that the labelmap uses a subset of the complete COCO label set that has onl After placing the downloaded onnx model in your `config/model_cache` folder, you can use the following configuration: -``` +```yaml detectors: onnx: type: onnx @@ -671,7 +677,7 @@ model: [D-FINE](https://github.com/Peterande/D-FINE) is a DETR based model. The ONNX exported models are supported, but not included by default. See [the models section](#downloading-d-fine-model) for more information on downloading the D-FINE model for use in Frigate. -After placing the downloaded onnx model in your config/model_cache folder, you can use the following configuration: +After placing the downloaded onnx model in your `config/model_cache` folder, you can use the following configuration: ```yaml detectors: @@ -898,11 +904,21 @@ Make sure you change the batch size to 1 before exporting. To export as ONNX: 1. `pip3 install rfdetr` -2. `python` +2. `python3` 3. `from rfdetr import RFDETRBase` 4. `x = RFDETRBase()` 5. `x.export()` +#### Additional Configuration + +The input tensor resolution can be customized: + +```python +from rfdetr import RFDETRBase +x = RFDETRBase(resolution=560) # resolution must be a multiple of 56 +x.export() +``` + ### Downloading YOLO-NAS Model You can build and download a compatible model with pre-trained weights using [this notebook](https://github.com/blakeblackshear/frigate/blob/dev/notebooks/YOLO_NAS_Pretrained_Export.ipynb) [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/blakeblackshear/frigate/blob/dev/notebooks/YOLO_NAS_Pretrained_Export.ipynb). diff --git a/frigate/data_processing/real_time/face.py b/frigate/data_processing/real_time/face.py index ce2ea85c1..7b49a2f47 100644 --- a/frigate/data_processing/real_time/face.py +++ b/frigate/data_processing/real_time/face.py @@ -329,7 +329,11 @@ class FaceRealTimeProcessor(RealTimeProcessorApi): max(0, face_box[1]) : min(frame.shape[0], face_box[3]), max(0, face_box[0]) : min(frame.shape[1], face_box[2]), ] - face_frame = cv2.cvtColor(face_frame, cv2.COLOR_RGB2BGR) + + try: + face_frame = cv2.cvtColor(face_frame, cv2.COLOR_RGB2BGR) + except Exception: + return else: # don't run for object without attributes if not obj_data.get("current_attributes"): From 644faaf65b9effbd0b1c6071cfb46209b9a35022 Mon Sep 17 00:00:00 2001 From: leccelecce <24962424+leccelecce@users.noreply.github.com> Date: Sat, 22 Mar 2025 19:13:41 +0000 Subject: [PATCH 33/97] Fix webUI generating HTTP500s when camera disabled (#17305) * Check camera enabled state before querying go2rtc * lint * Add change to CameraStreamingDialog --- web/src/components/settings/CameraStreamingDialog.tsx | 7 ++++++- web/src/views/live/LiveCameraView.tsx | 11 +++++------ 2 files changed, 11 insertions(+), 7 deletions(-) diff --git a/web/src/components/settings/CameraStreamingDialog.tsx b/web/src/components/settings/CameraStreamingDialog.tsx index b1c758a38..6e5fde8ba 100644 --- a/web/src/components/settings/CameraStreamingDialog.tsx +++ b/web/src/components/settings/CameraStreamingDialog.tsx @@ -1,4 +1,5 @@ import { useState, useCallback, useEffect, useMemo } from "react"; +import { useEnabledState } from "@/api/ws"; import { IoIosWarning } from "react-icons/io"; import { Button } from "@/components/ui/button"; import { @@ -63,6 +64,10 @@ export function CameraStreamingDialog({ // metadata + // camera enabled state + const { payload: enabledState } = useEnabledState(camera); + const cameraEnabled = enabledState === "ON"; + const isRestreamed = useMemo( () => config && @@ -71,7 +76,7 @@ export function CameraStreamingDialog({ ); const { data: cameraMetadata } = useSWR( - isRestreamed ? `go2rtc/streams/${streamName}` : null, + cameraEnabled && isRestreamed ? `go2rtc/streams/${streamName}` : null, { revalidateOnFocus: false, }, diff --git a/web/src/views/live/LiveCameraView.tsx b/web/src/views/live/LiveCameraView.tsx index 34d61d684..9a4b733ed 100644 --- a/web/src/views/live/LiveCameraView.tsx +++ b/web/src/views/live/LiveCameraView.tsx @@ -142,8 +142,11 @@ export default function LiveCameraView({ const [{ width: windowWidth, height: windowHeight }] = useResizeObserver(window); - // supported features + // camera enabled state + const { payload: enabledState } = useEnabledState(camera.name); + const cameraEnabled = enabledState === "ON"; + // supported features const [streamName, setStreamName] = usePersistence( `${camera.name}-stream`, Object.values(camera.live.streams)[0], @@ -157,7 +160,7 @@ export default function LiveCameraView({ ); const { data: cameraMetadata } = useSWR( - isRestreamed ? `go2rtc/streams/${streamName}` : null, + cameraEnabled && isRestreamed ? `go2rtc/streams/${streamName}` : null, { revalidateOnFocus: false, }, @@ -192,10 +195,6 @@ export default function LiveCameraView({ ); }, [cameraMetadata]); - // camera enabled state - const { payload: enabledState } = useEnabledState(camera.name); - const cameraEnabled = enabledState === "ON"; - // click overlay for ptzs const [clickOverlay, setClickOverlay] = useState(false); From e36fe797828e695520d58faccf4dd7397deabaf3 Mon Sep 17 00:00:00 2001 From: leccelecce <24962424+leccelecce@users.noreply.github.com> Date: Sun, 23 Mar 2025 11:34:33 +0000 Subject: [PATCH 34/97] Update webmanifest to use /BASE_PATH/ (#17310) --- docker/main/rootfs/usr/local/nginx/conf/nginx.conf | 4 ++-- web/site.webmanifest | 10 +++++----- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/docker/main/rootfs/usr/local/nginx/conf/nginx.conf b/docker/main/rootfs/usr/local/nginx/conf/nginx.conf index 6c60019c7..c8cd7fd45 100644 --- a/docker/main/rootfs/usr/local/nginx/conf/nginx.conf +++ b/docker/main/rootfs/usr/local/nginx/conf/nginx.conf @@ -313,8 +313,8 @@ http { proxy_set_header Accept-Encoding ""; sub_filter_once off; sub_filter_types application/json; - sub_filter '"start_url": "/"' '"start_url" : "$http_x_ingress_path"'; - sub_filter '"src": "/' '"src": "$http_x_ingress_path/'; + sub_filter '"start_url": "/BASE_PATH/"' '"start_url" : "$http_x_ingress_path/"'; + sub_filter '"src": "/BASE_PATH/' '"src": "$http_x_ingress_path/'; } sub_filter 'href="/BASE_PATH/' 'href="$http_x_ingress_path/'; diff --git a/web/site.webmanifest b/web/site.webmanifest index 94e455ec8..7040ce5c9 100644 --- a/web/site.webmanifest +++ b/web/site.webmanifest @@ -1,28 +1,28 @@ { "name": "Frigate", "short_name": "Frigate", - "start_url": "/", + "start_url": "/BASE_PATH/", "icons": [ { - "src": "/images/android-chrome-512x512.png", + "src": "/BASE_PATH/images/android-chrome-512x512.png", "sizes": "512x512", "type": "image/png", "purpose": "any" }, { - "src": "/images/android-chrome-192x192.png", + "src": "/BASE_PATH/images/android-chrome-192x192.png", "sizes": "192x192", "type": "image/png", "purpose": "any" }, { - "src": "/images/maskable-icon.png", + "src": "/BASE_PATH/images/maskable-icon.png", "sizes": "180x180", "type": "image/png", "purpose": "maskable" }, { - "src": "/images/maskable-badge.png", + "src": "/BASE_PATH/images/maskable-badge.png", "sizes": "96x96", "type": "image/png", "purpose": "maskable" From b7fcd41737a6ed29b572bf6e920eea2bcccb379b Mon Sep 17 00:00:00 2001 From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> Date: Sun, 23 Mar 2025 13:51:06 -0500 Subject: [PATCH 35/97] UI tweaks (#17319) --- .../settings/CameraStreamingDialog.tsx | 37 ++++++++++++++----- web/src/views/live/DraggableGridLayout.tsx | 17 +++++++-- web/src/views/system/StorageMetrics.tsx | 4 +- 3 files changed, 44 insertions(+), 14 deletions(-) diff --git a/web/src/components/settings/CameraStreamingDialog.tsx b/web/src/components/settings/CameraStreamingDialog.tsx index 6e5fde8ba..393de0419 100644 --- a/web/src/components/settings/CameraStreamingDialog.tsx +++ b/web/src/components/settings/CameraStreamingDialog.tsx @@ -103,16 +103,24 @@ export function CameraStreamingDialog({ if (!config) { return; } + + // Get available streams from the config and first stream entry as fallback + const availableStreams = config?.cameras[camera]?.live?.streams || {}; + const firstStreamEntry = Object.entries(availableStreams)[0]?.[1] || ""; + if (groupStreamingSettings && groupStreamingSettings[camera]) { const cameraSettings = groupStreamingSettings[camera]; - setStreamName(cameraSettings.streamName || ""); + const streamNameFromSettings = cameraSettings.streamName || ""; + + const streamExists = + streamNameFromSettings && + Object.values(availableStreams).includes(streamNameFromSettings); + + setStreamName(streamExists ? streamNameFromSettings : firstStreamEntry); setStreamType(cameraSettings.streamType || "smart"); setCompatibilityMode(cameraSettings.compatibilityMode || false); } else { - setStreamName( - Object.entries(config?.cameras[camera]?.live?.streams || {})[0]?.[1] || - "", - ); + setStreamName(firstStreamEntry); setStreamType("smart"); setCompatibilityMode(false); } @@ -150,19 +158,28 @@ export function CameraStreamingDialog({ if (!config) { return; } + + // Get available streams from the config and first stream entry as fallback + const availableStreams = config?.cameras[camera]?.live?.streams || {}; + const firstStreamEntry = Object.entries(availableStreams)[0]?.[1] || ""; + if (groupStreamingSettings && groupStreamingSettings[camera]) { const cameraSettings = groupStreamingSettings[camera]; - setStreamName(cameraSettings.streamName || ""); + const streamNameFromSettings = cameraSettings.streamName || ""; + + const streamExists = + streamNameFromSettings && + Object.values(availableStreams).includes(streamNameFromSettings); + + setStreamName(streamExists ? streamNameFromSettings : firstStreamEntry); setStreamType(cameraSettings.streamType || "smart"); setCompatibilityMode(cameraSettings.compatibilityMode || false); } else { - setStreamName( - Object.entries(config?.cameras[camera]?.live?.streams || {})[0]?.[1] || - "", - ); + setStreamName(firstStreamEntry); setStreamType("smart"); setCompatibilityMode(false); } + setIsDialogOpen(false); }, [groupStreamingSettings, camera, config, setIsDialogOpen]); diff --git a/web/src/views/live/DraggableGridLayout.tsx b/web/src/views/live/DraggableGridLayout.tsx index 2fc21357d..b80f59b27 100644 --- a/web/src/views/live/DraggableGridLayout.tsx +++ b/web/src/views/live/DraggableGridLayout.tsx @@ -549,9 +549,20 @@ export default function DraggableGridLayout({ } else { grow = "aspect-video"; } - const streamName = - currentGroupStreamingSettings?.[camera.name]?.streamName || - Object.values(camera.live.streams)[0]; + const availableStreams = camera.live.streams || {}; + const firstStreamEntry = Object.values(availableStreams)[0] || ""; + + const streamNameFromSettings = + currentGroupStreamingSettings?.[camera.name]?.streamName || ""; + const streamExists = + streamNameFromSettings && + Object.values(availableStreams).includes( + streamNameFromSettings, + ); + + const streamName = streamExists + ? streamNameFromSettings + : firstStreamEntry; const autoLive = currentGroupStreamingSettings?.[camera.name]?.streamType !== "no-streaming"; diff --git a/web/src/views/system/StorageMetrics.tsx b/web/src/views/system/StorageMetrics.tsx index 0bd150bfb..4e7ff646d 100644 --- a/web/src/views/system/StorageMetrics.tsx +++ b/web/src/views/system/StorageMetrics.tsx @@ -117,7 +117,9 @@ export default function StorageMetrics({ {formatUnixTimestampToDateTime(earliestDate, { timezone: timezone, strftime_fmt: - config.ui.time_format == "24hour" ? "%d %b %Y" : "%B %d, %Y", + config.ui.time_format === "24hour" + ? "%d %b %Y, %H:%M" + : "%B %d, %Y, %I:%M %p", })}
)} From fa4643fddf947f68d2df5698f1bfb952a00bb7be Mon Sep 17 00:00:00 2001 From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> Date: Sun, 23 Mar 2025 14:30:48 -0500 Subject: [PATCH 36/97] LPR improvements (#17289) * config options * processing in maintainer * detect and process dedicated lpr plates * create camera type, add manual event and save snapshot * use const * ensure lpr events are always detections, typing fixes * docs * docs tweaks * add preprocessing and penalization for low confidence chars --- .../license_plate_recognition.md | 94 ++- docs/docs/configuration/reference.md | 4 + frigate/camera/state.py | 8 +- frigate/comms/detections_updater.py | 1 + frigate/comms/event_metadata_updater.py | 2 + frigate/config/camera/camera.py | 7 + frigate/config/classification.py | 5 + .../common/license_plate/mixin.py | 569 ++++++++++++------ .../real_time/license_plate.py | 9 +- frigate/embeddings/maintainer.py | 64 ++ frigate/events/maintainer.py | 7 + frigate/record/maintainer.py | 2 +- frigate/review/maintainer.py | 52 +- frigate/track/object_processing.py | 76 ++- 14 files changed, 706 insertions(+), 194 deletions(-) diff --git a/docs/docs/configuration/license_plate_recognition.md b/docs/docs/configuration/license_plate_recognition.md index ee490a7a6..8f30728ec 100644 --- a/docs/docs/configuration/license_plate_recognition.md +++ b/docs/docs/configuration/license_plate_recognition.md @@ -3,16 +3,17 @@ id: license_plate_recognition title: License Plate Recognition (LPR) --- -Frigate can recognize license plates on vehicles and automatically add the detected characters to the `recognized_license_plate` field or a known name as a `sub_label` to objects that are of type `car`. A common use case may be to read the license plates of cars pulling into a driveway or cars passing by on a street. +Frigate can recognize license plates on vehicles and automatically add the detected characters to the `recognized_license_plate` field or a known name as a `sub_label` to tracked objects of type `car`. A common use case may be to read the license plates of cars pulling into a driveway or cars passing by on a street. LPR works best when the license plate is clearly visible to the camera. For moving vehicles, Frigate continuously refines the recognition process, keeping the most confident result. However, LPR does not run on stationary vehicles. When a plate is recognized, the recognized name is: -- Added to the `car` tracked object as a `sub_label` (if known) or the `recognized_license_plate` field (if unknown) -- Viewable in the Review Item Details pane in Review and the Tracked Object Details pane in Explore. +- Added as a `sub_label` (if known) or the `recognized_license_plate` field (if unknown) to a tracked object. +- Viewable in the Review Item Details pane in Review (sub labels). +- Viewable in the Tracked Object Details pane in Explore (sub labels and recognized license plates). - Filterable through the More Filters menu in Explore. -- Published via the `frigate/events` MQTT topic as a `sub_label` (known) or `recognized_license_plate` (unknown) for the tracked object. +- Published via the `frigate/events` MQTT topic as a `sub_label` (known) or `recognized_license_plate` (unknown) for the `car` tracked object. ## Model Requirements @@ -22,7 +23,7 @@ Users without a model that detects license plates can still run LPR. Frigate use :::note -Frigate needs to first detect a `car` before it can recognize a license plate. If you're using a dedicated LPR camera or have a zoomed-in view, make sure the camera captures enough of the `car` for Frigate to detect it reliably. +In the default mode, Frigate's LPR needs to first detect a `car` before it can recognize a license plate. If you're using a dedicated LPR camera and have a zoomed-in view where a `car` will not be detected, you can still run LPR, but the configuration parameters will differ from the default mode. See the [Dedicated LPR Cameras](#dedicated-lpr-cameras) section below. ::: @@ -39,7 +40,17 @@ lpr: enabled: True ``` -Ensure that your camera is configured to detect objects of type `car`, and that a car is actually being detected by Frigate. Otherwise, LPR will not run. +You can also enable it for specific cameras only at the camera level: + +```yaml +cameras: + driveway: + ... + lpr: + enabled: True +``` + +For non-dedicated LPR cameras, ensure that your camera is configured to detect objects of type `car`, and that a car is actually being detected by Frigate. Otherwise, LPR will not run. Like the other real-time processors in Frigate, license plate recognition runs on the camera stream defined by the `detect` role in your config. To ensure optimal performance, select a suitable resolution for this stream in your camera's firmware that fits your specific scene and requirements. @@ -78,6 +89,8 @@ Fine-tune the LPR feature using these optional parameters: ## Configuration Examples +These configuration parameters are available at the global level of your config. The only optional parameters that should be set at the camera level are `enabled` and `min_area`. + ```yaml lpr: enabled: True @@ -110,6 +123,70 @@ lpr: - "MN D3163" ``` +## Dedicated LPR Cameras + +Dedicated LPR cameras are single-purpose cameras with powerful optical zoom to capture license plates on distant vehicles, often with fine-tuned settings to capture plates at night. + +Users with a dedicated LPR camera can run Frigate's LPR by specifying a camera type of `lpr` in the camera configuration. An example config for a dedicated LPR camera might look like this: + +```yaml +# LPR global configuration +lpr: + enabled: True + min_area: 2000 + min_plate_length: 4 + +# Dedicated LPR camera configuration +cameras: + dedicated_lpr_camera: + type: "lpr" # required to use dedicated LPR camera mode + lpr: + enabled: True + expire_time: 3 # optional, default + ffmpeg: ... + detect: + enabled: False # optional, disable Frigate's standard object detection pipeline + fps: 5 + width: 1920 + height: 1080 + motion: + threshold: 30 + contour_area: 80 # use an increased value here to tune out small motion changes + improve_contrast: false + mask: 0.704,0.007,0.709,0.052,0.989,0.055,0.993,0.001 # ensure your camera's timestamp is masked + record: + enabled: True # disable recording if you only want snapshots + detections: + enabled: True + retain: + default: 7 +``` + +The camera-level `type` setting tells Frigate to treat your camera as a dedicated LPR camera. Setting this option bypasses Frigate's standard object detection pipeline so that a `car` does not need to be detected to run LPR. This dedicated LPR pipeline does not utilize defined zones or object masks, and the license plate detector is always run on the full frame whenever motion activity occurs. If a plate is found, a snapshot at the highest scoring moment is saved as a `car` object, visible in Explore and searchable by the recognized plate via Explore's More Filters. + +An optional config variable for dedicated LPR cameras only, `expire_time`, can be specified under the `lpr` configuration at the camera level to change the time it takes for Frigate to consider a previously tracked plate as expired. + +:::note + +When using `type: "lpr"` for a camera, a non-standard object detection pipeline is used. Any detected license plates on dedicated LPR cameras are treated similarly to manual events in Frigate. Note that for `car` objects with license plates: + +- Review items will always be classified as a `detection`. +- Snapshots will always be saved. +- Tracked objects are retained according to your retain settings for `record` and `snapshots`. +- Zones and object masks cannot be used. +- The `frigate/events` MQTT topic will not publish tracked object updates, though `frigate/reviews` will if recordings are enabled. + +::: + +### Best practices for using Dedicated LPR camera mode + +- Tune your motion detection and increase the `contour_area` until you see only larger motion boxes being created as cars pass through the frame (likely somewhere between 50-90 for a 1920x1080 detect stream). Increasing the `contour_area` filters out small areas of motion and will prevent excessive resource use from looking for license plates in frames that don't even have a car passing through it. +- Disable the `improve_contrast` motion setting, especially if you are running LPR at night and the frame is mostly dark. This will prevent small pixel changes and smaller areas of motion from triggering license plate detection. +- Ensure your camera's timestamp is covered with a motion mask so that it's not incorrectly detected as a license plate. +- While not strictly required, it may be beneficial to disable standard object detection on your dedicated LPR camera (`detect` --> `enabled: False`). If you've set the camera type to `"lpr"`, license plate detection will still be performed on the entire frame when motion occurs. +- If multiple tracked objects are being produced for the same license plate, you can tweak the `expire_time` to prevent plates from being expired from the view as quickly. +- You may need to change your camera settings for a clearer image or decrease your global `recognition_threshold` config if your plates are not being accurately recognized at night. + ## FAQ ### Why isn't my license plate being detected and recognized? @@ -118,14 +195,13 @@ Ensure that: - Your camera has a clear, human-readable, well-lit view of the plate. If you can't read the plate, Frigate certainly won't be able to. This may require changing video size, quality, or frame rate settings on your camera, depending on your scene and how fast the vehicles are traveling. - The plate is large enough in the image (try adjusting `min_area`) or increasing the resolution of your camera's stream. -- A `car` is detected first, as LPR only runs on recognized vehicles. If you are using a Frigate+ model or a custom model that detects license plates, ensure that `license_plate` is added to your list of objects to track. If you are using the free model that ships with Frigate, you should _not_ add `license_plate` to the list of objects to track. ### Can I run LPR without detecting `car` objects? -No, Frigate requires a `car` to be detected first before recognizing a license plate. +In normal LPR mode, Frigate requires a `car` to be detected first before recognizing a license plate. If you have a dedicated LPR camera, you can change the camera `type` to `"lpr"` to use the Dedicated LPR Camera algorithm. This comes with important caveats, though. See the [Dedicated LPR Cameras](#dedicated-lpr-cameras) section above. ### How can I improve detection accuracy? @@ -150,4 +226,4 @@ Use `match_distance` to allow small character mismatches. Alternatively, define ### Will LPR slow down my system? -LPR runs on the CPU, so performance impact depends on your hardware. Ensure you have at least 4GB RAM and a capable CPU for optimal results. +LPR runs on the CPU, so performance impact depends on your hardware. Ensure you have at least 4GB RAM and a capable CPU for optimal results. If you are running the Dedicated LPR Camera mode, resource usage will be higher compared to users who run a model that natively detects license plates. Tune your motion detection settings for your dedicated LPR camera so that the license plate detection model runs only when necessary. diff --git a/docs/docs/configuration/reference.md b/docs/docs/configuration/reference.md index 2bdf842e0..3f099ba25 100644 --- a/docs/docs/configuration/reference.md +++ b/docs/docs/configuration/reference.md @@ -562,6 +562,7 @@ face_recognition: blur_confidence_filter: True # Optional: Configuration for license plate recognition capability +# NOTE: enabled and min_area can be overridden at the camera level lpr: # Optional: Enable license plate recognition (default: shown below) enabled: False @@ -656,6 +657,9 @@ cameras: # If disabled: config is used but no live stream and no capture etc. # Events/Recordings are still viewable. enabled: True + # Optional: camera type used for some Frigate features (default: shown below) + # Options are "generic" and "lpr" + type: "generic" # Required: ffmpeg settings for the camera ffmpeg: # Required: A list of input streams for the camera. See documentation for more information. diff --git a/frigate/camera/state.py b/frigate/camera/state.py index f2469dffd..0495be1fc 100644 --- a/frigate/camera/state.py +++ b/frigate/camera/state.py @@ -409,9 +409,13 @@ class CameraState: self.previous_frame_id = frame_name def save_manual_event_image( - self, event_id: str, label: str, draw: dict[str, list[dict]] + self, + frame: np.ndarray | None, + event_id: str, + label: str, + draw: dict[str, list[dict]], ) -> None: - img_frame = self.get_current_frame() + img_frame = frame if frame is not None else self.get_current_frame() # write clean snapshot if enabled if self.camera_config.snapshots.clean_copy: diff --git a/frigate/comms/detections_updater.py b/frigate/comms/detections_updater.py index a60bd0699..f585b570d 100644 --- a/frigate/comms/detections_updater.py +++ b/frigate/comms/detections_updater.py @@ -11,6 +11,7 @@ class DetectionTypeEnum(str, Enum): api = "api" video = "video" audio = "audio" + lpr = "lpr" class DetectionPublisher(Publisher): diff --git a/frigate/comms/event_metadata_updater.py b/frigate/comms/event_metadata_updater.py index c702208bc..42a3419ce 100644 --- a/frigate/comms/event_metadata_updater.py +++ b/frigate/comms/event_metadata_updater.py @@ -15,6 +15,8 @@ class EventMetadataTypeEnum(str, Enum): regenerate_description = "regenerate_description" sub_label = "sub_label" recognized_license_plate = "recognized_license_plate" + lpr_event_create = "lpr_event_create" + save_lpr_snapshot = "save_lpr_snapshot" class EventMetadataPublisher(Publisher): diff --git a/frigate/config/camera/camera.py b/frigate/config/camera/camera.py index 0b925d46d..3b24dabac 100644 --- a/frigate/config/camera/camera.py +++ b/frigate/config/camera/camera.py @@ -1,4 +1,5 @@ import os +from enum import Enum from typing import Optional from pydantic import Field, PrivateAttr @@ -42,6 +43,11 @@ from .zone import ZoneConfig __all__ = ["CameraConfig"] +class CameraTypeEnum(str, Enum): + generic = "generic" + lpr = "lpr" + + class CameraConfig(FrigateBaseModel): name: Optional[str] = Field(None, title="Camera name.", pattern=REGEX_CAMERA_NAME) enabled: bool = Field(default=True, title="Enable camera.") @@ -102,6 +108,7 @@ class CameraConfig(FrigateBaseModel): onvif: OnvifConfig = Field( default_factory=OnvifConfig, title="Camera Onvif Configuration." ) + type: CameraTypeEnum = Field(default=CameraTypeEnum.generic, title="Camera Type") ui: CameraUiConfig = Field( default_factory=CameraUiConfig, title="Camera UI Modifications." ) diff --git a/frigate/config/classification.py b/frigate/config/classification.py index 74eee0a55..0070569a8 100644 --- a/frigate/config/classification.py +++ b/frigate/config/classification.py @@ -127,6 +127,11 @@ class LicensePlateRecognitionConfig(FrigateBaseModel): class CameraLicensePlateRecognitionConfig(FrigateBaseModel): enabled: bool = Field(default=False, title="Enable license plate recognition.") + expire_time: int = Field( + default=3, + title="Expire plates not seen after number of seconds (for dedicated LPR cameras only).", + gt=0, + ) min_area: int = Field( default=1000, title="Minimum area of license plate to begin running recognition.", diff --git a/frigate/data_processing/common/license_plate/mixin.py b/frigate/data_processing/common/license_plate/mixin.py index 52f59e71a..3bf94a550 100644 --- a/frigate/data_processing/common/license_plate/mixin.py +++ b/frigate/data_processing/common/license_plate/mixin.py @@ -1,18 +1,26 @@ """Handle processing images for face detection and recognition.""" +import base64 import datetime import logging import math +import random import re +import string from typing import List, Optional, Tuple import cv2 import numpy as np -from Levenshtein import distance +from Levenshtein import distance, jaro_winkler from pyclipper import ET_CLOSEDPOLYGON, JT_ROUND, PyclipperOffset from shapely.geometry import Polygon -from frigate.comms.event_metadata_updater import EventMetadataTypeEnum +from frigate.comms.event_metadata_updater import ( + EventMetadataPublisher, + EventMetadataTypeEnum, +) +from frigate.config.camera.camera import CameraTypeEnum +from frigate.embeddings.onnx.lpr_embedding import LPR_EMBEDDING_SIZE from frigate.util.image import area logger = logging.getLogger(__name__) @@ -28,6 +36,8 @@ class LicensePlateProcessingMixin: "license_plate" not in self.config.objects.all_objects ) + self.event_metadata_publisher = EventMetadataPublisher() + self.ctc_decoder = CTCDecoder() self.batch_size = 6 @@ -38,6 +48,9 @@ class LicensePlateProcessingMixin: self.box_thresh = 0.6 self.mask_thresh = 0.6 + # matching + self.similarity_threshold = 0.8 + def _detect(self, image: np.ndarray) -> List[np.ndarray]: """ Detect possible license plates in the input image by first resizing and normalizing it, @@ -197,11 +210,8 @@ class LicensePlateProcessingMixin: # set to True to write each cropped image for debugging if False: - save_image = cv2.cvtColor( - plate_images[original_idx], cv2.COLOR_RGB2BGR - ) filename = f"debug/frames/plate_{original_idx}_{plate}_{area}.jpg" - cv2.imwrite(filename, save_image) + cv2.imwrite(filename, plate_images[original_idx]) license_plates[original_idx] = plate average_confidences[original_idx] = average_confidence @@ -320,7 +330,7 @@ class LicensePlateProcessingMixin: # Use pyclipper to shrink the polygon slightly based on the computed distance. offset = PyclipperOffset() offset.AddPath(points, JT_ROUND, ET_CLOSEDPOLYGON) - points = np.array(offset.Execute(distance * 1.75)).reshape((-1, 1, 2)) + points = np.array(offset.Execute(distance * 1.5)).reshape((-1, 1, 2)) # get the minimum bounding box around the shrunken polygon. box, min_side = self._get_min_boxes(points) @@ -624,6 +634,47 @@ class LicensePlateProcessingMixin: assert image.shape[2] == input_shape[0], "Unexpected number of image channels." + # convert to grayscale + if image.shape[2] == 3: + gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) + else: + gray = image + + # detect noise with Laplacian variance + laplacian = cv2.Laplacian(gray, cv2.CV_64F) + noise_variance = np.var(laplacian) + brightness = cv2.mean(gray)[0] + noise_threshold = 70 + brightness_threshold = 150 + is_noisy = ( + noise_variance > noise_threshold and brightness < brightness_threshold + ) + + # apply bilateral filter and sharpening only if noisy + if is_noisy: + logger.debug( + f"Noise detected (variance: {noise_variance:.1f}, brightness: {brightness:.1f}) - denoising" + ) + smoothed = cv2.bilateralFilter(gray, d=15, sigmaColor=100, sigmaSpace=100) + sharpening_kernel = np.array([[-1, -1, -1], [-1, 9, -1], [-1, -1, -1]]) + processed = cv2.filter2D(smoothed, -1, sharpening_kernel) + else: + logger.debug( + f"No noise detected (variance: {noise_variance:.1f}, brightness: {brightness:.1f}) - skipping denoising and sharpening" + ) + processed = gray + + # apply CLAHE for contrast enhancement + grid_size = ( + max(4, input_w // 40), + max(4, input_h // 40), + ) + clahe = cv2.createCLAHE(clipLimit=1.5, tileGridSize=grid_size) + enhanced = clahe.apply(processed) + + # Convert back to 3-channel for model compatibility + image = cv2.cvtColor(enhanced, cv2.COLOR_GRAY2RGB) + # dynamically adjust input width based on max_wh_ratio input_w = int(input_h * max_wh_ratio) @@ -649,6 +700,13 @@ class LicensePlateProcessingMixin: ) padded_image[:, :, :resized_w] = resized_image + if False: + current_time = int(datetime.datetime.now().timestamp() * 1000) + cv2.imwrite( + f"debug/frames/preprocessed_recognition_{current_time}.jpg", + image, + ) + return padded_image @staticmethod @@ -710,18 +768,38 @@ class LicensePlateProcessingMixin: top_score = -1 top_box = None + img_h, img_w = input.shape[0], input.shape[1] + + # Calculate resized dimensions and padding based on _preprocess_inputs + if img_w > img_h: + resized_h = int(((img_h / img_w) * LPR_EMBEDDING_SIZE) // 4 * 4) + resized_w = LPR_EMBEDDING_SIZE + x_offset = (LPR_EMBEDDING_SIZE - resized_w) // 2 + y_offset = (LPR_EMBEDDING_SIZE - resized_h) // 2 + scale_x = img_w / resized_w + scale_y = img_h / resized_h + else: + resized_w = int(((img_w / img_h) * LPR_EMBEDDING_SIZE) // 4 * 4) + resized_h = LPR_EMBEDDING_SIZE + x_offset = (LPR_EMBEDDING_SIZE - resized_w) // 2 + y_offset = (LPR_EMBEDDING_SIZE - resized_h) // 2 + scale_x = img_w / resized_w + scale_y = img_h / resized_h + # Loop over predictions for prediction in predictions: score = prediction[6] if score >= confidence_threshold: bbox = prediction[1:5] - # Scale boxes back to original image size - scale_x = input.shape[1] / 256 - scale_y = input.shape[0] / 256 - bbox[0] *= scale_x - bbox[1] *= scale_y - bbox[2] *= scale_x - bbox[3] *= scale_y + # Adjust for padding and scale to original image + bbox[0] = (bbox[0] - x_offset) * scale_x + bbox[1] = (bbox[1] - y_offset) * scale_y + bbox[2] = (bbox[2] - x_offset) * scale_x + bbox[3] = (bbox[3] - y_offset) * scale_y + + if score > top_score: + top_score = score + top_box = bbox if score > top_score: top_score = score @@ -729,8 +807,8 @@ class LicensePlateProcessingMixin: # Return the top scoring bounding box if found if top_box is not None: - # expand box by 30% to help with OCR - expansion = (top_box[2:] - top_box[:2]) * 0.30 + # expand box by 5% to help with OCR + expansion = (top_box[2:] - top_box[:2]) * 0.05 # Expand box expanded_box = np.array( @@ -750,6 +828,7 @@ class LicensePlateProcessingMixin: def _should_keep_previous_plate( self, id, top_plate, top_char_confidences, top_area, avg_confidence ): + """Determine if the previous plate should be kept over the current one.""" if id not in self.detected_license_plates: return False @@ -764,68 +843,88 @@ class LicensePlateProcessingMixin: ) # 1. Normalize metrics - # Length score - use relative comparison - # If lengths are equal, score is 0.5 for both - # If one is longer, it gets a higher score up to 1.0 - max_length_diff = 4 # Maximum expected difference in plate lengths + # Length score: Equal lengths = 0.5, penalize extra characters if low confidence length_diff = len(top_plate) - len(prev_plate) - curr_length_score = 0.5 + ( - length_diff / (2 * max_length_diff) - ) # Normalize to 0-1 - curr_length_score = max(0, min(1, curr_length_score)) # Clamp to 0-1 - prev_length_score = 1 - curr_length_score # Inverse relationship + max_length_diff = 3 + curr_length_score = 0.5 + (length_diff / (2 * max_length_diff)) + curr_length_score = max(0, min(1, curr_length_score)) + prev_length_score = 0.5 - (length_diff / (2 * max_length_diff)) + prev_length_score = max(0, min(1, prev_length_score)) - # Area score (normalize based on max of current and previous) + # Adjust length score based on confidence of extra characters + conf_threshold = 0.75 # Minimum confidence for a character to be "trusted" + if len(top_plate) > len(prev_plate): + extra_conf = min( + top_char_confidences[len(prev_plate) :] + ) # Lowest extra char confidence + if extra_conf < conf_threshold: + curr_length_score *= extra_conf / conf_threshold # Penalize if weak + elif len(prev_plate) > len(top_plate): + extra_conf = min(prev_char_confidences[len(top_plate) :]) + if extra_conf < conf_threshold: + prev_length_score *= extra_conf / conf_threshold + + # Area score: Normalize by max area max_area = max(top_area, prev_area) - curr_area_score = top_area / max_area - prev_area_score = prev_area / max_area + curr_area_score = top_area / max_area if max_area > 0 else 0 + prev_area_score = prev_area / max_area if max_area > 0 else 0 - # Average confidence score (already normalized 0-1) + # Confidence scores curr_conf_score = avg_confidence prev_conf_score = prev_avg_confidence - # Character confidence comparison score + # Character confidence comparison (average over shared length) min_length = min(len(top_plate), len(prev_plate)) if min_length > 0: curr_char_conf = sum(top_char_confidences[:min_length]) / min_length prev_char_conf = sum(prev_char_confidences[:min_length]) / min_length else: - curr_char_conf = 0 - prev_char_conf = 0 + curr_char_conf = prev_char_conf = 0 - # 2. Define weights + # Penalize any character below threshold + curr_min_conf = min(top_char_confidences) if top_char_confidences else 0 + prev_min_conf = min(prev_char_confidences) if prev_char_confidences else 0 + curr_conf_penalty = ( + 1.0 if curr_min_conf >= conf_threshold else (curr_min_conf / conf_threshold) + ) + prev_conf_penalty = ( + 1.0 if prev_min_conf >= conf_threshold else (prev_min_conf / conf_threshold) + ) + + # 2. Define weights (boost confidence importance) weights = { - "length": 0.4, - "area": 0.3, - "avg_confidence": 0.2, - "char_confidence": 0.1, + "length": 0.2, + "area": 0.2, + "avg_confidence": 0.35, + "char_confidence": 0.25, } - # 3. Calculate weighted scores + # 3. Calculate weighted scores with penalty curr_score = ( curr_length_score * weights["length"] + curr_area_score * weights["area"] + curr_conf_score * weights["avg_confidence"] + curr_char_conf * weights["char_confidence"] - ) + ) * curr_conf_penalty prev_score = ( prev_length_score * weights["length"] + prev_area_score * weights["area"] + prev_conf_score * weights["avg_confidence"] + prev_char_conf * weights["char_confidence"] - ) + ) * prev_conf_penalty - # 4. Log the comparison for debugging + # 4. Log the comparison logger.debug( - f"Plate comparison - Current plate: {top_plate} (score: {curr_score:.3f}) vs " - f"Previous plate: {prev_plate} (score: {prev_score:.3f})\n" + f"Plate comparison - Current: {top_plate} (score: {curr_score:.3f}, min_conf: {curr_min_conf:.2f}) vs " + f"Previous: {prev_plate} (score: {prev_score:.3f}, min_conf: {prev_min_conf:.2f})\n" f"Metrics - Length: {len(top_plate)} vs {len(prev_plate)} (scores: {curr_length_score:.2f} vs {prev_length_score:.2f}), " f"Area: {top_area} vs {prev_area}, " - f"Avg Conf: {avg_confidence:.2f} vs {prev_avg_confidence:.2f}" + f"Avg Conf: {avg_confidence:.2f} vs {prev_avg_confidence:.2f}, " + f"Char Conf: {curr_char_conf:.2f} vs {prev_char_conf:.2f}" ) - # 5. Return True if we should keep the previous plate (i.e., if it scores higher) + # 5. Return True if previous plate scores higher return prev_score > curr_score def __update_yolov9_metrics(self, duration: float) -> None: @@ -842,57 +941,55 @@ class LicensePlateProcessingMixin: """ self.metrics.alpr_pps.value = (self.metrics.alpr_pps.value * 9 + duration) / 10 - def lpr_process(self, obj_data: dict[str, any], frame: np.ndarray): + def _generate_plate_event(self, camera: str, plate: str, plate_score: float) -> str: + """Generate a unique ID for a plate event based on camera and text.""" + now = datetime.datetime.now().timestamp() + rand_id = "".join(random.choices(string.ascii_lowercase + string.digits, k=6)) + event_id = f"{now}-{rand_id}" + + self.event_metadata_publisher.publish( + EventMetadataTypeEnum.lpr_event_create, + ( + now, + camera, + "car", + event_id, + True, + plate_score, + None, + plate, + ), + ) + return event_id + + def lpr_process( + self, obj_data: dict[str, any], frame: np.ndarray, dedicated_lpr: bool = False + ): """Look for license plates in image.""" - if not self.config.cameras[obj_data["camera"]].lpr.enabled: + camera = obj_data if dedicated_lpr else obj_data["camera"] + current_time = int(datetime.datetime.now().timestamp()) + + if not self.config.cameras[camera].lpr.enabled: return - id = obj_data["id"] - - # don't run for non car objects - if obj_data.get("label") != "car": - logger.debug("Not a processing license plate for non car object.") + if not dedicated_lpr and self.config.cameras[camera].type == CameraTypeEnum.lpr: return - # don't run for stationary car objects - if obj_data.get("stationary") == True: - logger.debug("Not a processing license plate for a stationary car object.") - return - - # don't overwrite sub label for objects that have a sub label - # that is not a license plate - if obj_data.get("sub_label") and id not in self.detected_license_plates: - logger.debug( - f"Not processing license plate due to existing sub label: {obj_data.get('sub_label')}." - ) - return - - license_plate: Optional[dict[str, any]] = None - - if self.requires_license_plate_detection: - logger.debug("Running manual license_plate detection.") - - car_box = obj_data.get("box") - - if not car_box: - return - + if dedicated_lpr: rgb = cv2.cvtColor(frame, cv2.COLOR_YUV2BGR_I420) - left, top, right, bottom = car_box - car = rgb[top:bottom, left:right] - # double the size of the car for better box detection - car = cv2.resize(car, (int(2 * car.shape[1]), int(2 * car.shape[0]))) + # apply motion mask + rgb[self.config.cameras[obj_data].motion.mask == 0] = [0, 0, 0] if WRITE_DEBUG_IMAGES: - current_time = int(datetime.datetime.now().timestamp()) cv2.imwrite( - f"debug/frames/car_frame_{current_time}.jpg", - car, + f"debug/frames/dedicated_lpr_masked_{current_time}.jpg", + rgb, ) yolov9_start = datetime.datetime.now().timestamp() - license_plate = self._detect_license_plate(car) + license_plate = self._detect_license_plate(rgb) + logger.debug( f"YOLOv9 LPD inference time: {(datetime.datetime.now().timestamp() - yolov9_start) * 1000:.2f} ms" ) @@ -901,107 +998,185 @@ class LicensePlateProcessingMixin: ) if not license_plate: - logger.debug("Detected no license plates for car object.") + logger.debug("Detected no license plates in full frame.") return - license_plate_area = max( - 0, - (license_plate[2] - license_plate[0]) - * (license_plate[3] - license_plate[1]), + license_plate_area = (license_plate[2] - license_plate[0]) * ( + license_plate[3] - license_plate[1] ) - - # check that license plate is valid - # double the value because we've doubled the size of the car - if ( - license_plate_area - < self.config.cameras[obj_data["camera"]].lpr.min_area * 2 - ): - logger.debug("License plate is less than min_area") + if license_plate_area < self.lpr_config.min_area: + logger.debug("License plate area below minimum threshold.") return - license_plate_frame = car[ - license_plate[1] : license_plate[3], license_plate[0] : license_plate[2] - ] - else: - # don't run for object without attributes - if not obj_data.get("current_attributes"): - logger.debug("No attributes to parse.") - return - - attributes: list[dict[str, any]] = obj_data.get("current_attributes", []) - for attr in attributes: - if attr.get("label") != "license_plate": - continue - - if license_plate is None or attr.get("score", 0.0) > license_plate.get( - "score", 0.0 - ): - license_plate = attr - - # no license plates detected in this frame - if not license_plate: - return - - license_plate_box = license_plate.get("box") - - # check that license plate is valid - if ( - not license_plate_box - or area(license_plate_box) - < self.config.cameras[obj_data["camera"]].lpr.min_area - ): - logger.debug(f"Invalid license plate box {license_plate}") - return - - license_plate_frame = cv2.cvtColor(frame, cv2.COLOR_YUV2BGR_I420) - - # Expand the license_plate_box by 30% - box_array = np.array(license_plate_box) - expansion = (box_array[2:] - box_array[:2]) * 0.30 - expanded_box = np.array( - [ - license_plate_box[0] - expansion[0], - license_plate_box[1] - expansion[1], - license_plate_box[2] + expansion[0], - license_plate_box[3] + expansion[1], - ] - ).clip(0, [license_plate_frame.shape[1], license_plate_frame.shape[0]] * 2) - - # Crop using the expanded box - license_plate_frame = license_plate_frame[ - int(expanded_box[1]) : int(expanded_box[3]), - int(expanded_box[0]) : int(expanded_box[2]), + license_plate_frame = rgb[ + license_plate[1] : license_plate[3], + license_plate[0] : license_plate[2], ] - # double the size of the license plate frame for better OCR - license_plate_frame = cv2.resize( - license_plate_frame, - ( - int(2 * license_plate_frame.shape[1]), - int(2 * license_plate_frame.shape[0]), - ), - ) - - if WRITE_DEBUG_IMAGES: - current_time = int(datetime.datetime.now().timestamp()) - cv2.imwrite( - f"debug/frames/license_plate_frame_{current_time}.jpg", + # Double the size for better OCR + license_plate_frame = cv2.resize( license_plate_frame, + ( + int(2 * license_plate_frame.shape[1]), + int(2 * license_plate_frame.shape[0]), + ), ) - start = datetime.datetime.now().timestamp() + else: + id = obj_data["id"] + + # don't run for non car objects + if obj_data.get("label") != "car": + logger.debug("Not a processing license plate for non car object.") + return + + # don't run for stationary car objects + if obj_data.get("stationary") == True: + logger.debug( + "Not a processing license plate for a stationary car object." + ) + return + + # don't overwrite sub label for objects that have a sub label + # that is not a license plate + if obj_data.get("sub_label") and id not in self.detected_license_plates: + logger.debug( + f"Not processing license plate due to existing sub label: {obj_data.get('sub_label')}." + ) + return + + license_plate: Optional[dict[str, any]] = None + + if self.requires_license_plate_detection: + logger.debug("Running manual license_plate detection.") + + car_box = obj_data.get("box") + + if not car_box: + return + + rgb = cv2.cvtColor(frame, cv2.COLOR_YUV2BGR_I420) + left, top, right, bottom = car_box + car = rgb[top:bottom, left:right] + + # double the size of the car for better box detection + car = cv2.resize(car, (int(2 * car.shape[1]), int(2 * car.shape[0]))) + + if WRITE_DEBUG_IMAGES: + cv2.imwrite( + f"debug/frames/car_frame_{current_time}.jpg", + car, + ) + + yolov9_start = datetime.datetime.now().timestamp() + license_plate = self._detect_license_plate(car) + logger.debug( + f"YOLOv9 LPD inference time: {(datetime.datetime.now().timestamp() - yolov9_start) * 1000:.2f} ms" + ) + self.__update_yolov9_metrics( + datetime.datetime.now().timestamp() - yolov9_start + ) + + if not license_plate: + logger.debug("Detected no license plates for car object.") + return + + license_plate_area = max( + 0, + (license_plate[2] - license_plate[0]) + * (license_plate[3] - license_plate[1]), + ) + + # check that license plate is valid + # double the value because we've doubled the size of the car + if ( + license_plate_area + < self.config.cameras[obj_data["camera"]].lpr.min_area * 2 + ): + logger.debug("License plate is less than min_area") + return + + license_plate_frame = car[ + license_plate[1] : license_plate[3], + license_plate[0] : license_plate[2], + ] + else: + # don't run for object without attributes + if not obj_data.get("current_attributes"): + logger.debug("No attributes to parse.") + return + + attributes: list[dict[str, any]] = obj_data.get( + "current_attributes", [] + ) + for attr in attributes: + if attr.get("label") != "license_plate": + continue + + if license_plate is None or attr.get( + "score", 0.0 + ) > license_plate.get("score", 0.0): + license_plate = attr + + # no license plates detected in this frame + if not license_plate: + return + + license_plate_box = license_plate.get("box") + + # check that license plate is valid + if ( + not license_plate_box + or area(license_plate_box) + < self.config.cameras[obj_data["camera"]].lpr.min_area + ): + logger.debug(f"Invalid license plate box {license_plate}") + return + + license_plate_frame = cv2.cvtColor(frame, cv2.COLOR_YUV2BGR_I420) + + # Expand the license_plate_box by 30% + box_array = np.array(license_plate_box) + expansion = (box_array[2:] - box_array[:2]) * 0.30 + expanded_box = np.array( + [ + license_plate_box[0] - expansion[0], + license_plate_box[1] - expansion[1], + license_plate_box[2] + expansion[0], + license_plate_box[3] + expansion[1], + ] + ).clip( + 0, [license_plate_frame.shape[1], license_plate_frame.shape[0]] * 2 + ) + + # Crop using the expanded box + license_plate_frame = license_plate_frame[ + int(expanded_box[1]) : int(expanded_box[3]), + int(expanded_box[0]) : int(expanded_box[2]), + ] + + # double the size of the license plate frame for better OCR + license_plate_frame = cv2.resize( + license_plate_frame, + ( + int(2 * license_plate_frame.shape[1]), + int(2 * license_plate_frame.shape[0]), + ), + ) + + if WRITE_DEBUG_IMAGES: + cv2.imwrite( + f"debug/frames/license_plate_frame_{current_time}.jpg", + license_plate_frame, + ) # run detection, returns results sorted by confidence, best first + start = datetime.datetime.now().timestamp() license_plates, confidences, areas = self._process_license_plate( license_plate_frame ) - self.__update_lpr_metrics(datetime.datetime.now().timestamp() - start) - logger.debug(f"Text boxes: {license_plates}") - logger.debug(f"Confidences: {confidences}") - logger.debug(f"Areas: {areas}") - if license_plates: for plate, confidence, text_area in zip(license_plates, confidences, areas): avg_confidence = ( @@ -1012,7 +1187,6 @@ class LicensePlateProcessingMixin: f"Detected text: {plate} (average confidence: {avg_confidence:.2f}, area: {text_area} pixels)" ) else: - # no plates found logger.debug("No text detected") return @@ -1027,6 +1201,46 @@ class LicensePlateProcessingMixin: else 0 ) + # Check against minimum confidence threshold + if avg_confidence < self.lpr_config.recognition_threshold: + logger.debug( + f"Average confidence {avg_confidence} is less than threshold ({self.lpr_config.recognition_threshold})" + ) + return + + # For LPR cameras, match or assign plate ID using Jaro-Winkler distance + if dedicated_lpr: + plate_id = None + + for existing_id, data in self.detected_license_plates.items(): + if ( + data["camera"] == camera + and data["last_seen"] is not None + and current_time - data["last_seen"] + <= self.config.cameras[camera].lpr.expire_time + ): + similarity = jaro_winkler(data["plate"], top_plate) + if similarity >= self.similarity_threshold: + plate_id = existing_id + logger.debug( + f"Matched plate {top_plate} to {data['plate']} (similarity: {similarity:.3f})" + ) + break + if plate_id is None: + plate_id = self._generate_plate_event( + obj_data, top_plate, avg_confidence + ) + logger.debug( + f"New plate event for dedicated LPR camera {plate_id}: {top_plate}" + ) + else: + logger.debug( + f"Matched existing plate event for dedicated LPR camera {plate_id}: {top_plate}" + ) + self.detected_license_plates[plate_id]["last_seen"] = current_time + + id = plate_id + # Check if we have a previously detected plate for this ID if id in self.detected_license_plates: if self._should_keep_previous_plate( @@ -1035,13 +1249,6 @@ class LicensePlateProcessingMixin: logger.debug("Keeping previous plate") return - # Check against minimum confidence threshold - if avg_confidence < self.lpr_config.recognition_threshold: - logger.debug( - f"Average confidence {avg_confidence} is less than threshold ({self.lpr_config.recognition_threshold})" - ) - return - # Determine subLabel based on known plates, use regex matching # Default to the detected plate, use label name if there's a match sub_label = next( @@ -1068,11 +1275,23 @@ class LicensePlateProcessingMixin: (id, top_plate, avg_confidence), ) + if dedicated_lpr: + # save the best snapshot + logger.debug(f"Writing snapshot for {id}, {top_plate}, {current_time}") + frame_bgr = cv2.cvtColor(frame, cv2.COLOR_YUV2BGR_I420) + _, buffer = cv2.imencode(".jpg", frame_bgr) + self.sub_label_publisher.publish( + EventMetadataTypeEnum.save_lpr_snapshot, + (base64.b64encode(buffer).decode("ASCII"), id, camera), + ) + self.detected_license_plates[id] = { "plate": top_plate, "char_confidences": top_char_confidences, "area": top_area, "obj_data": obj_data, + "camera": camera, + "last_seen": current_time if dedicated_lpr else None, } def handle_request(self, topic, request_data) -> dict[str, any] | None: diff --git a/frigate/data_processing/real_time/license_plate.py b/frigate/data_processing/real_time/license_plate.py index d2cb9f2a5..95d53a343 100644 --- a/frigate/data_processing/real_time/license_plate.py +++ b/frigate/data_processing/real_time/license_plate.py @@ -35,9 +35,14 @@ class LicensePlateRealTimeProcessor(LicensePlateProcessingMixin, RealTimeProcess self.sub_label_publisher = sub_label_publisher super().__init__(config, metrics) - def process_frame(self, obj_data: dict[str, any], frame: np.ndarray): + def process_frame( + self, + obj_data: dict[str, any], + frame: np.ndarray, + dedicated_lpr: bool | None = False, + ): """Look for license plates in image.""" - self.lpr_process(obj_data, frame) + self.lpr_process(obj_data, frame, dedicated_lpr) def handle_request(self, topic, request_data) -> dict[str, any] | None: return diff --git a/frigate/embeddings/maintainer.py b/frigate/embeddings/maintainer.py index 2fa3eeb2c..0d8d22762 100644 --- a/frigate/embeddings/maintainer.py +++ b/frigate/embeddings/maintainer.py @@ -1,6 +1,7 @@ """Maintain embeddings in SQLite-vec.""" import base64 +import datetime import logging import os import threading @@ -13,6 +14,7 @@ import numpy as np from peewee import DoesNotExist from playhouse.sqliteq import SqliteQueueDatabase +from frigate.comms.detections_updater import DetectionSubscriber, DetectionTypeEnum from frigate.comms.embeddings_updater import EmbeddingsRequestEnum, EmbeddingsResponder from frigate.comms.event_metadata_updater import ( EventMetadataPublisher, @@ -26,6 +28,7 @@ from frigate.comms.recordings_updater import ( RecordingsDataTypeEnum, ) from frigate.config import FrigateConfig +from frigate.config.camera.camera import CameraTypeEnum from frigate.const import ( CLIPS_DIR, UPDATE_EVENT_DESCRIPTION, @@ -97,6 +100,7 @@ class EmbeddingMaintainer(threading.Thread): self.recordings_subscriber = RecordingsDataSubscriber( RecordingsDataTypeEnum.recordings_available_through ) + self.detection_subscriber = DetectionSubscriber(DetectionTypeEnum.video) self.embeddings_responder = EmbeddingsResponder() self.frame_manager = SharedMemoryFrameManager() @@ -162,12 +166,15 @@ class EmbeddingMaintainer(threading.Thread): self._process_requests() self._process_updates() self._process_recordings_updates() + self._process_dedicated_lpr() + self._expire_dedicated_lpr() self._process_finalized() self._process_event_metadata() self.event_subscriber.stop() self.event_end_subscriber.stop() self.recordings_subscriber.stop() + self.detection_subscriber.stop() self.event_metadata_publisher.stop() self.event_metadata_subscriber.stop() self.embeddings_responder.stop() @@ -317,6 +324,7 @@ class EmbeddingMaintainer(threading.Thread): if ( recordings_available is not None and event_id in self.detected_license_plates + and self.config.cameras[camera].type != "lpr" ): processor.process_data( { @@ -374,6 +382,26 @@ class EmbeddingMaintainer(threading.Thread): if event_id in self.tracked_events: del self.tracked_events[event_id] + def _expire_dedicated_lpr(self) -> None: + """Remove plates not seen for longer than expiration timeout for dedicated lpr cameras.""" + now = datetime.datetime.now().timestamp() + + to_remove = [] + + for id, data in self.detected_license_plates.items(): + last_seen = data.get("last_seen", 0) + if not last_seen: + continue + + if now - last_seen > self.config.cameras[data["camera"]].lpr.expire_time: + to_remove.append(id) + for id in to_remove: + self.event_metadata_publisher.publish( + EventMetadataTypeEnum.manual_event_end, + (id, now), + ) + self.detected_license_plates.pop(id) + def _process_recordings_updates(self) -> None: """Process recordings updates.""" while True: @@ -406,6 +434,42 @@ class EmbeddingMaintainer(threading.Thread): event_id, RegenerateDescriptionEnum(source) ) + def _process_dedicated_lpr(self) -> None: + """Process event updates""" + (topic, data) = self.detection_subscriber.check_for_update(timeout=0.01) + + if topic is None: + return + + camera, frame_name, _, _, motion_boxes, _ = data + + if not camera or not self.config.lpr.enabled or len(motion_boxes) == 0: + return + + camera_config = self.config.cameras[camera] + + if not camera_config.type == CameraTypeEnum.lpr: + return + + try: + yuv_frame = self.frame_manager.get( + frame_name, camera_config.frame_shape_yuv + ) + except FileNotFoundError: + pass + + if yuv_frame is None: + logger.debug( + "Unable to process dedicated LPR update because frame is unavailable." + ) + return + + for processor in self.realtime_processors: + if isinstance(processor, LicensePlateRealTimeProcessor): + processor.process_frame(camera, yuv_frame, True) + + self.frame_manager.close(frame_name) + def _create_thumbnail(self, yuv_frame, box, height=500) -> Optional[bytes]: """Return jpg thumbnail of a region of the frame.""" frame = cv2.cvtColor(yuv_frame, cv2.COLOR_YUV2BGR_I420) diff --git a/frigate/events/maintainer.py b/frigate/events/maintainer.py index 7788c83e9..844039b0d 100644 --- a/frigate/events/maintainer.py +++ b/frigate/events/maintainer.py @@ -278,6 +278,13 @@ class EventProcessor(threading.Thread): "top_score": event_data["score"], }, } + if event_data.get("recognized_license_plate") is not None: + event[Event.data]["recognized_license_plate"] = event_data[ + "recognized_license_plate" + ] + event[Event.data]["recognized_license_plate_score"] = event_data[ + "score" + ] Event.insert(event).execute() elif event_type == EventStateEnum.end: event = { diff --git a/frigate/record/maintainer.py b/frigate/record/maintainer.py index 1cabbfdda..9f08f88a9 100644 --- a/frigate/record/maintainer.py +++ b/frigate/record/maintainer.py @@ -577,7 +577,7 @@ class RecordingMaintainer(threading.Thread): audio_detections, ) ) - elif topic == DetectionTypeEnum.api: + elif topic == DetectionTypeEnum.api or DetectionTypeEnum.lpr: continue if frame_time < run_start - stale_frame_count_threshold: diff --git a/frigate/review/maintainer.py b/frigate/review/maintainer.py index d9e8bdaa9..52f0448f6 100644 --- a/frigate/review/maintainer.py +++ b/frigate/review/maintainer.py @@ -513,7 +513,7 @@ class ReviewSegmentMaintainer(threading.Thread): _, audio_detections, ) = data - elif topic == DetectionTypeEnum.api: + elif topic == DetectionTypeEnum.api or DetectionTypeEnum.lpr: ( camera, frame_time, @@ -572,13 +572,21 @@ class ReviewSegmentMaintainer(threading.Thread): or audio in camera_config.review.detections.labels ) and camera_config.review.detections.enabled: current_segment.audio.add(audio) - elif topic == DetectionTypeEnum.api: + elif topic == DetectionTypeEnum.api or topic == DetectionTypeEnum.lpr: if manual_info["state"] == ManualEventState.complete: current_segment.detections[manual_info["event_id"]] = ( manual_info["label"] ) - if self.config.cameras[camera].review.alerts.enabled: + if ( + topic == DetectionTypeEnum.api + and self.config.cameras[camera].review.alerts.enabled + ): current_segment.severity = SeverityEnum.alert + elif ( + topic == DetectionTypeEnum.lpr + and self.config.cameras[camera].review.detections.enabled + ): + current_segment.severity = SeverityEnum.detection current_segment.last_update = manual_info["end_time"] elif manual_info["state"] == ManualEventState.start: self.indefinite_events[camera][manual_info["event_id"]] = ( @@ -587,8 +595,16 @@ class ReviewSegmentMaintainer(threading.Thread): current_segment.detections[manual_info["event_id"]] = ( manual_info["label"] ) - if self.config.cameras[camera].review.alerts.enabled: + if ( + topic == DetectionTypeEnum.api + and self.config.cameras[camera].review.alerts.enabled + ): current_segment.severity = SeverityEnum.alert + elif ( + topic == DetectionTypeEnum.lpr + and self.config.cameras[camera].review.detections.enabled + ): + current_segment.severity = SeverityEnum.detection # temporarily make it so this event can not end current_segment.last_update = sys.maxsize @@ -676,6 +692,34 @@ class ReviewSegmentMaintainer(threading.Thread): logger.warning( f"Manual event API has been called for {camera}, but alerts are disabled. This manual event will not appear as an alert." ) + elif topic == DetectionTypeEnum.lpr: + if self.config.cameras[camera].review.detections.enabled: + self.active_review_segments[camera] = PendingReviewSegment( + camera, + frame_time, + SeverityEnum.detection, + {manual_info["event_id"]: manual_info["label"]}, + {}, + [], + set(), + ) + + if manual_info["state"] == ManualEventState.start: + self.indefinite_events[camera][manual_info["event_id"]] = ( + manual_info["label"] + ) + # temporarily make it so this event can not end + self.active_review_segments[ + camera + ].last_update = sys.maxsize + elif manual_info["state"] == ManualEventState.complete: + self.active_review_segments[ + camera + ].last_update = manual_info["end_time"] + else: + logger.warning( + f"Dedicated LPR camera API has been called for {camera}, but detections are disabled. LPR events will not appear as a detection." + ) self.record_config_subscriber.stop() self.review_config_subscriber.stop() diff --git a/frigate/track/object_processing.py b/frigate/track/object_processing.py index 4593da5a4..35bca4d5e 100644 --- a/frigate/track/object_processing.py +++ b/frigate/track/object_processing.py @@ -1,3 +1,4 @@ +import base64 import datetime import json import logging @@ -7,6 +8,7 @@ from collections import defaultdict from enum import Enum from multiprocessing.synchronize import Event as MpEvent +import cv2 import numpy as np from peewee import DoesNotExist @@ -394,6 +396,19 @@ class TrackedObjectProcessor(threading.Thread): return True + def save_lpr_snapshot(self, payload: tuple) -> None: + # save the snapshot image + (frame, event_id, camera) = payload + + img = cv2.imdecode( + np.frombuffer(base64.b64decode(frame), dtype=np.uint8), + cv2.IMREAD_COLOR, + ) + + self.camera_states[camera].save_manual_event_image( + img, event_id, "license_plate", {} + ) + def create_manual_event(self, payload: tuple) -> None: ( frame_time, @@ -409,7 +424,9 @@ class TrackedObjectProcessor(threading.Thread): ) = payload # save the snapshot image - self.camera_states[camera_name].save_manual_event_image(event_id, label, draw) + self.camera_states[camera_name].save_manual_event_image( + None, event_id, label, draw + ) end_time = frame_time + duration if duration is not None else None # send event to event maintainer @@ -456,6 +473,59 @@ class TrackedObjectProcessor(threading.Thread): DetectionTypeEnum.api.value, ) + def create_lpr_event(self, payload: tuple) -> None: + ( + frame_time, + camera_name, + label, + event_id, + include_recording, + score, + sub_label, + plate, + ) = payload + + # send event to event maintainer + self.event_sender.publish( + ( + EventTypeEnum.api, + EventStateEnum.start, + camera_name, + "", + { + "id": event_id, + "label": label, + "sub_label": sub_label, + "score": score, + "camera": camera_name, + "start_time": frame_time + - self.config.cameras[camera_name].record.event_pre_capture, + "end_time": None, + "has_clip": self.config.cameras[camera_name].record.enabled + and include_recording, + "has_snapshot": True, + "type": "api", + "recognized_license_plate": plate, + "recognized_license_plate_score": score, + }, + ) + ) + + self.ongoing_manual_events[event_id] = camera_name + self.detection_publisher.publish( + ( + camera_name, + frame_time, + { + "state": ManualEventState.start, + "label": f"{label}: {sub_label}" if sub_label else label, + "event_id": event_id, + "end_time": None, + }, + ), + DetectionTypeEnum.lpr.value, + ) + def end_manual_event(self, payload: tuple) -> None: (event_id, end_time) = payload @@ -560,6 +630,10 @@ class TrackedObjectProcessor(threading.Thread): self.set_recognized_license_plate( event_id, recognized_license_plate, score ) + elif topic.endswith(EventMetadataTypeEnum.lpr_event_create.value): + self.create_lpr_event(payload) + elif topic.endswith(EventMetadataTypeEnum.save_lpr_snapshot.value): + self.save_lpr_snapshot(payload) elif topic.endswith(EventMetadataTypeEnum.manual_event_create.value): self.create_manual_event(payload) elif topic.endswith(EventMetadataTypeEnum.manual_event_end.value): From 18af06237c301cfb6d08eb4b07a97fefe5a5cb0b Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Sun, 23 Mar 2025 14:02:16 -0600 Subject: [PATCH 37/97] Support RF-DETR models with OpenVINO (#17321) * Add support for openvino to run rf-detr models * Add more inference time examples * organize * Add example to docs * Add support for yolo generic --- docs/docs/configuration/object_detectors.md | 27 +++++++++++ docs/docs/frigate/hardware.md | 52 ++++++++++----------- frigate/detectors/plugins/onnx.py | 5 +- frigate/detectors/plugins/openvino.py | 22 +++++++-- 4 files changed, 74 insertions(+), 32 deletions(-) diff --git a/docs/docs/configuration/object_detectors.md b/docs/docs/configuration/object_detectors.md index 174343ef4..e027596d2 100644 --- a/docs/docs/configuration/object_detectors.md +++ b/docs/docs/configuration/object_detectors.md @@ -340,6 +340,33 @@ model: Note that the labelmap uses a subset of the complete COCO label set that has only 80 objects. +#### RF-DETR + +[RF-DETR](https://github.com/roboflow/rf-detr) is a DETR based model. The ONNX exported models are supported, but not included by default. See [the models section](#downloading-rf-detr-model) for more informatoin on downloading the RF-DETR model for use in Frigate. + +:::warning + +Due to the size and complexity of the RF-DETR model, it is only recommended to be run with discrete Arc Graphics Cards. + +::: + +After placing the downloaded onnx model in your `config/model_cache` folder, you can use the following configuration: + +```yaml +detectors: + ov: + type: openvino + device: GPU + +model: + model_type: rfdetr + width: 560 + height: 560 + input_tensor: nchw + input_dtype: float + path: /config/model_cache/rfdetr.onnx +``` + #### D-FINE [D-FINE](https://github.com/Peterande/D-FINE) is a DETR based model. The ONNX exported models are supported, but not included by default. See [the models section](#downloading-d-fine-model) for more information on downloading the D-FINE model for use in Frigate. diff --git a/docs/docs/frigate/hardware.md b/docs/docs/frigate/hardware.md index c9bfe16d6..62152996d 100644 --- a/docs/docs/frigate/hardware.md +++ b/docs/docs/frigate/hardware.md @@ -40,7 +40,7 @@ Frigate supports multiple different detectors that work on different types of ha - [Hailo](#hailo-8): The Hailo8 and Hailo8L AI Acceleration module is available in m.2 format with a HAT for RPi devices offering a wide range of compatibility with devices. - [Supports many model architectures](../../configuration/object_detectors#configuration) - Runs best with tiny or small size models - + - [Google Coral EdgeTPU](#google-coral-tpu): The Google Coral EdgeTPU is available in USB and m.2 format allowing for a wide range of compatibility with devices. - [Supports primarily ssdlite and mobilenet model architectures](../../configuration/object_detectors#edge-tpu-detector) @@ -89,7 +89,7 @@ In real-world deployments, even with multiple cameras running concurrently, Frig ### Google Coral TPU -Frigate supports both the USB and M.2 versions of the Google Coral. +Frigate supports both the USB and M.2 versions of the Google Coral. - The USB version is compatible with the widest variety of hardware and does not require a driver on the host machine. However, it does lack the automatic throttling features of the other versions. - The PCIe and M.2 versions require installation of a driver on the host. Follow the instructions for your version from https://coral.ai @@ -107,23 +107,19 @@ More information is available [in the detector docs](/configuration/object_detec Inference speeds vary greatly depending on the CPU or GPU used, some known examples of GPU inference times are below: -| Name | MobileNetV2 Inference Time | YOLO-NAS Inference Time | Notes | -| -------------------- | -------------------------- | ------------------------- | -------------------------------------- | -| Intel Celeron J4105 | ~ 25 ms | | Can only run one detector instance | -| Intel Celeron N3060 | 130 - 150 ms | | Can only run one detector instance | -| Intel Celeron N3205U | ~ 120 ms | | Can only run one detector instance | -| Intel Celeron N4020 | 50 - 200 ms | | Inference speed depends on other loads | -| Intel i3 6100T | 15 - 35 ms | | Can only run one detector instance | -| Intel i3 8100 | ~ 15 ms | | | -| Intel i5 4590 | ~ 20 ms | | | -| Intel i5 6500 | ~ 15 ms | | | -| Intel i5 7200u | 15 - 25 ms | | | -| Intel i5 7500 | ~ 15 ms | | | -| Intel i5 1135G7 | 10 - 15 ms | | | -| Intel i3 12000 | | 320: ~ 19 ms 640: ~ 54 ms | | -| Intel i5 12600K | ~ 15 ms | 320: ~ 20 ms 640: ~ 46 ms | | -| Intel Arc A380 | ~ 6 ms | 320: ~ 10 ms | | -| Intel Arc A750 | ~ 4 ms | 320: ~ 8 ms | | +| Name | MobileNetV2 Inference Time | YOLO-NAS Inference Time | RF-DETR Inference Time | Notes | +| -------------------- | -------------------------- | ------------------------- | ------------------------- | -------------------------------------- | +| Intel i3 6100T | 15 - 35 ms | | | Can only run one detector instance | +| Intel i5 6500 | ~ 15 ms | | | | +| Intel i5 7200u | 15 - 25 ms | | | | +| Intel i5 7500 | ~ 15 ms | | | | +| Intel i3 8100 | ~ 15 ms | | | | +| Intel i5 1135G7 | 10 - 15 ms | | | | +| Intel i3 12000 | | 320: ~ 19 ms 640: ~ 54 ms | | | +| Intel i5 12600K | ~ 15 ms | 320: ~ 20 ms 640: ~ 46 ms | | | +| Intel i7 12650H | ~ 15 ms | 320: ~ 20 ms 640: ~ 42 ms | 336: 50 ms | | +| Intel Arc A380 | ~ 6 ms | 320: ~ 10 ms | | | +| Intel Arc A750 | ~ 4 ms | 320: ~ 8 ms | | | ### TensorRT - Nvidia GPU @@ -132,15 +128,15 @@ The TensortRT detector is able to run on x86 hosts that have an Nvidia GPU which Inference speeds will vary greatly depending on the GPU and the model used. `tiny` variants are faster than the equivalent non-tiny model, some known examples are below: -| Name | YoloV7 Inference Time | YOLO-NAS Inference Time | -| --------------- | --------------------- | ------------------------- | -| GTX 1060 6GB | ~ 7 ms | | -| GTX 1070 | ~ 6 ms | | -| GTX 1660 SUPER | ~ 4 ms | | -| RTX 3050 | 5 - 7 ms | 320: ~ 10 ms 640: ~ 16 ms | -| RTX 3070 Mobile | ~ 5 ms | | -| Quadro P400 2GB | 20 - 25 ms | | -| Quadro P2000 | ~ 12 ms | | +| Name | YoloV7 Inference Time | YOLO-NAS Inference Time | RF-DETR Inference Time | +| --------------- | --------------------- | ------------------------- | ------------------------- | +| GTX 1060 6GB | ~ 7 ms | | | +| GTX 1070 | ~ 6 ms | | | +| GTX 1660 SUPER | ~ 4 ms | | | +| RTX 3050 | 5 - 7 ms | 320: ~ 10 ms 640: ~ 16 ms | 336: ~ 16 ms 560: ~ 40 ms | +| RTX 3070 Mobile | ~ 5 ms | | | +| Quadro P400 2GB | 20 - 25 ms | | | +| Quadro P2000 | ~ 12 ms | | | ### AMD GPUs diff --git a/frigate/detectors/plugins/onnx.py b/frigate/detectors/plugins/onnx.py index 2679185a9..a10447b48 100644 --- a/frigate/detectors/plugins/onnx.py +++ b/frigate/detectors/plugins/onnx.py @@ -97,7 +97,10 @@ class ONNXDetector(DetectionApi): x_max / self.w, ] return detections - elif self.onnx_model_type == ModelTypeEnum.yolov9: + elif ( + self.onnx_model_type == ModelTypeEnum.yolov9 + or self.onnx_model_type == ModelTypeEnum.yologeneric + ): predictions: np.ndarray = tensor_output[0] return post_process_yolov9(predictions, self.w, self.h) else: diff --git a/frigate/detectors/plugins/openvino.py b/frigate/detectors/plugins/openvino.py index 75d956500..d90352772 100644 --- a/frigate/detectors/plugins/openvino.py +++ b/frigate/detectors/plugins/openvino.py @@ -10,7 +10,11 @@ from typing_extensions import Literal from frigate.const import MODEL_CACHE_DIR from frigate.detectors.detection_api import DetectionApi from frigate.detectors.detector_config import BaseDetectorConfig, ModelTypeEnum -from frigate.util.model import post_process_dfine, post_process_yolov9 +from frigate.util.model import ( + post_process_dfine, + post_process_rfdetr, + post_process_yolov9, +) logger = logging.getLogger(__name__) @@ -25,11 +29,13 @@ class OvDetectorConfig(BaseDetectorConfig): class OvDetector(DetectionApi): type_key = DETECTOR_KEY supported_models = [ + ModelTypeEnum.dfine, + ModelTypeEnum.rfdetr, ModelTypeEnum.ssd, ModelTypeEnum.yolonas, ModelTypeEnum.yolov9, + ModelTypeEnum.yologeneric, ModelTypeEnum.yolox, - ModelTypeEnum.dfine, ] def __init__(self, detector_config: OvDetectorConfig): @@ -185,6 +191,13 @@ class OvDetector(DetectionApi): if self.model_invalid: return detections + elif self.ov_model_type == ModelTypeEnum.rfdetr: + return post_process_rfdetr( + [ + infer_request.get_output_tensor(0).data, + infer_request.get_output_tensor(1).data, + ] + ) elif self.ov_model_type == ModelTypeEnum.ssd: results = infer_request.get_output_tensor(0).data[0][0] @@ -219,7 +232,10 @@ class OvDetector(DetectionApi): x_max / self.w, ] return detections - elif self.ov_model_type == ModelTypeEnum.yolov9: + elif ( + self.ov_model_type == ModelTypeEnum.yolov9 + or self.ov_model_type == ModelTypeEnum.yologeneric + ): out_tensor = infer_request.get_output_tensor(0).data return post_process_yolov9(out_tensor, self.w, self.h) elif self.ov_model_type == ModelTypeEnum.yolox: From e129fa6819794077ef8e962ddd84a6aaf9aed616 Mon Sep 17 00:00:00 2001 From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> Date: Sun, 23 Mar 2025 16:16:29 -0500 Subject: [PATCH 38/97] Fix missing key in example config (#17322) --- docs/docs/configuration/license_plate_recognition.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/docs/configuration/license_plate_recognition.md b/docs/docs/configuration/license_plate_recognition.md index 8f30728ec..979e42bd1 100644 --- a/docs/docs/configuration/license_plate_recognition.md +++ b/docs/docs/configuration/license_plate_recognition.md @@ -156,6 +156,7 @@ cameras: mask: 0.704,0.007,0.709,0.052,0.989,0.055,0.993,0.001 # ensure your camera's timestamp is masked record: enabled: True # disable recording if you only want snapshots + review: detections: enabled: True retain: From 678ae87c62af3fabf7b9e45b332b7633c6c669df Mon Sep 17 00:00:00 2001 From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> Date: Sun, 23 Mar 2025 17:15:15 -0500 Subject: [PATCH 39/97] Dedicated LPR fixes (#17325) * docs fps recommendation * add detection_threshold to example * send uncoverted yuv frame * copy and convert * ensure thumbnail is encoded as webp * keep as jpeg --- docs/docs/configuration/license_plate_recognition.md | 4 ++-- frigate/data_processing/common/license_plate/mixin.py | 3 +-- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/docs/docs/configuration/license_plate_recognition.md b/docs/docs/configuration/license_plate_recognition.md index 979e42bd1..47dcfaa09 100644 --- a/docs/docs/configuration/license_plate_recognition.md +++ b/docs/docs/configuration/license_plate_recognition.md @@ -133,8 +133,8 @@ Users with a dedicated LPR camera can run Frigate's LPR by specifying a camera t # LPR global configuration lpr: enabled: True - min_area: 2000 min_plate_length: 4 + detection_threshold: 0.7 # change if necessary # Dedicated LPR camera configuration cameras: @@ -146,7 +146,7 @@ cameras: ffmpeg: ... detect: enabled: False # optional, disable Frigate's standard object detection pipeline - fps: 5 + fps: 5 # keep this at 5, higher values are unnecessary for dedicated LPR mode and could overwhelm the detector width: 1920 height: 1080 motion: diff --git a/frigate/data_processing/common/license_plate/mixin.py b/frigate/data_processing/common/license_plate/mixin.py index 3bf94a550..9bf2119f9 100644 --- a/frigate/data_processing/common/license_plate/mixin.py +++ b/frigate/data_processing/common/license_plate/mixin.py @@ -1279,10 +1279,9 @@ class LicensePlateProcessingMixin: # save the best snapshot logger.debug(f"Writing snapshot for {id}, {top_plate}, {current_time}") frame_bgr = cv2.cvtColor(frame, cv2.COLOR_YUV2BGR_I420) - _, buffer = cv2.imencode(".jpg", frame_bgr) self.sub_label_publisher.publish( EventMetadataTypeEnum.save_lpr_snapshot, - (base64.b64encode(buffer).decode("ASCII"), id, camera), + (base64.b64encode(frame_bgr).decode("ASCII"), id, camera), ) self.detected_license_plates[id] = { From 1f9816237728186c1d33765ca28b2867645c39a0 Mon Sep 17 00:00:00 2001 From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> Date: Mon, 24 Mar 2025 07:34:18 -0500 Subject: [PATCH 40/97] Fixes (#17334) * more docs updates * debug view note * hide notifications submenu if camera is disabled * fix value replacement from incorrect i18n changes --- docs/docs/configuration/license_plate_recognition.md | 9 +++++---- web/src/components/input/InputWithTags.tsx | 2 +- web/src/components/menu/LiveContextMenu.tsx | 2 +- 3 files changed, 7 insertions(+), 6 deletions(-) diff --git a/docs/docs/configuration/license_plate_recognition.md b/docs/docs/configuration/license_plate_recognition.md index 47dcfaa09..e4c8c1167 100644 --- a/docs/docs/configuration/license_plate_recognition.md +++ b/docs/docs/configuration/license_plate_recognition.md @@ -63,8 +63,8 @@ Fine-tune the LPR feature using these optional parameters: - **`detection_threshold`**: License plate object detection confidence score required before recognition runs. - Default: `0.7` - Note: This is field only applies to the standalone license plate detection model, `min_score` should be used to filter for models that have license plate detection built in. -- **`min_area`**: Defines the minimum size (in pixels) a license plate must be before recognition runs. - - Default: `1000` pixels. +- **`min_area`**: Defines the minimum area (in pixels) a license plate must be before recognition runs. + - Default: `1000` pixels. Note: this is intentionally set very low as it is an _area_ measurement (length x width). For reference, 1000 pixels represents a ~32x32 pixel square in your camera image. - Depending on the resolution of your camera's `detect` stream, you can increase this value to ignore small or distant plates. ### Recognition @@ -94,7 +94,7 @@ These configuration parameters are available at the global level of your config. ```yaml lpr: enabled: True - min_area: 1500 # Ignore plates smaller than 1500 pixels + min_area: 1500 # Ignore plates with an area (length x width) smaller than 1500 pixels min_plate_length: 4 # Only recognize plates with 4 or more characters known_plates: Wife's Car: @@ -111,7 +111,7 @@ lpr: ```yaml lpr: enabled: True - min_area: 4000 # Run recognition on larger plates only + min_area: 4000 # Run recognition on larger plates only (4000 pixels represents a 63x63 pixel square in your image) recognition_threshold: 0.85 format: "^[A-Z]{2} [A-Z][0-9]{4}$" # Only recognize plates that are two letters, followed by a space, followed by a single letter and 4 numbers match_distance: 1 # Allow one character variation in plate matching @@ -175,6 +175,7 @@ When using `type: "lpr"` for a camera, a non-standard object detection pipeline - Snapshots will always be saved. - Tracked objects are retained according to your retain settings for `record` and `snapshots`. - Zones and object masks cannot be used. +- Debug view may not show `license_plate` bounding boxes, even if you are using a Frigate+ model for your standard object detection pipeline. - The `frigate/events` MQTT topic will not publish tracked object updates, though `frigate/reviews` will if recordings are enabled. ::: diff --git a/web/src/components/input/InputWithTags.tsx b/web/src/components/input/InputWithTags.tsx index c82b60b04..13977b8a7 100644 --- a/web/src/components/input/InputWithTags.tsx +++ b/web/src/components/input/InputWithTags.tsx @@ -806,7 +806,7 @@ export default function InputWithTags({ className="inline-flex items-center whitespace-nowrap rounded-full bg-green-100 px-2 py-0.5 text-sm capitalize text-green-800" > {t("filter.label." + filterType)}:{" "} - {formatFilterValues(filterType, value)} + {value.replaceAll("_", " ")}
From f8b0329b37f3224af4861fa651bac75f79a4bf79 Mon Sep 17 00:00:00 2001 From: Felipe Santos Date: Mon, 24 Mar 2025 11:05:59 -0300 Subject: [PATCH 42/97] Move database and config from homeassistant /config to addon /config (#16337) * Move database and config from homeassistant /config to addon /config * Re-implement config migration for the add-on * Align some terms * Improve function name * Use local variables * Add model.path migration * Fix homeassistant config path * Ensure migration scripts run before go2rtc and frigate * Migrate all files I know * Add ffmpeg.path migration * Update docker/main/rootfs/etc/s6-overlay/s6-rc.d/prepare/run Co-authored-by: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> * Improve some variable names and organization * Update docs to reflect addon config dir * Update live.md with /addon_configs * Move addon config section to configuration doc * Align several terminologies and improve text * Fix webrtc example config title * Capitalize Add-on in more places * Improve specific add-on config dir docs * Align bash and python scripts to prefer config.yml over config.yaml * Support config.json in migration shell scripts * Change docs to reflect config.yml is preferred over config.yaml * If previous config was yaml, migrate to yaml * Fix typo in edgetpu.md * Fix formatting of Python files * Remove HailoRT Beta add-on variant from docs * Add migration for labelmap and certs * Fix variable name * Fix new_config_file var unset * Fix addon config directories table * Improve db migration to avoid migrating files like .db.bak * Fix echo location --------- Co-authored-by: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> --- .../DISCUSSION_TEMPLATE/camera-support.yml | 4 +- .../DISCUSSION_TEMPLATE/config-support.yml | 4 +- .../DISCUSSION_TEMPLATE/detector-support.yml | 2 +- .../DISCUSSION_TEMPLATE/general-support.yml | 2 +- .../hardware-acceleration-support.yml | 2 +- .github/DISCUSSION_TEMPLATE/report-a-bug.yml | 4 +- docker-compose.yml | 1 - docker/main/Dockerfile | 2 +- .../rootfs/etc/s6-overlay/s6-rc.d/frigate/run | 35 +---- .../go2rtc/dependencies.d/{base => prepare} | 0 .../rootfs/etc/s6-overlay/s6-rc.d/go2rtc/run | 2 +- .../s6-rc.d/prepare/dependencies.d/base | 0 .../rootfs/etc/s6-overlay/s6-rc.d/prepare/run | 142 ++++++++++++++++++ .../etc/s6-overlay/s6-rc.d/prepare/type | 1 + .../rootfs/etc/s6-overlay/s6-rc.d/prepare/up | 1 + .../usr/local/ffmpeg/get_ffmpeg_path.py | 9 +- .../rootfs/usr/local/go2rtc/create_config.py | 8 +- .../usr/local/nginx/get_tls_settings.py | 14 +- docker/rockchip/conv2rknn.py | 2 +- docs/docs/configuration/advanced.md | 2 +- docs/docs/configuration/authentication.md | 6 +- docs/docs/configuration/birdseye.md | 2 +- .../configuration/hardware_acceleration.md | 11 +- docs/docs/configuration/index.md | 27 +++- docs/docs/configuration/live.md | 8 +- docs/docs/configuration/motion_detection.md | 2 +- docs/docs/configuration/object_detectors.md | 4 +- docs/docs/configuration/record.md | 2 +- docs/docs/configuration/reference.md | 2 +- docs/docs/development/contributing.md | 10 +- docs/docs/frigate/installation.md | 50 +++--- docs/docs/guides/getting_started.md | 7 +- docs/docs/guides/ha_network_storage.md | 16 +- docs/docs/integrations/home-assistant.md | 21 ++- docs/docs/integrations/plus.md | 4 +- .../integrations/third_party_extensions.md | 2 +- docs/docs/plus/index.md | 2 +- docs/docs/troubleshooting/edgetpu.md | 2 +- docs/docs/troubleshooting/recordings.md | 3 +- frigate/api/auth.py | 4 +- frigate/plus.py | 2 +- 41 files changed, 265 insertions(+), 159 deletions(-) rename docker/main/rootfs/etc/s6-overlay/s6-rc.d/go2rtc/dependencies.d/{base => prepare} (100%) create mode 100644 docker/main/rootfs/etc/s6-overlay/s6-rc.d/prepare/dependencies.d/base create mode 100755 docker/main/rootfs/etc/s6-overlay/s6-rc.d/prepare/run create mode 100644 docker/main/rootfs/etc/s6-overlay/s6-rc.d/prepare/type create mode 100644 docker/main/rootfs/etc/s6-overlay/s6-rc.d/prepare/up diff --git a/.github/DISCUSSION_TEMPLATE/camera-support.yml b/.github/DISCUSSION_TEMPLATE/camera-support.yml index a76fd5caf..521d65ded 100644 --- a/.github/DISCUSSION_TEMPLATE/camera-support.yml +++ b/.github/DISCUSSION_TEMPLATE/camera-support.yml @@ -73,7 +73,7 @@ body: attributes: label: Operating system options: - - HassOS + - Home Assistant OS - Debian - Other Linux - Proxmox @@ -87,7 +87,7 @@ body: attributes: label: Install method options: - - HassOS Addon + - Home Assistant Add-on - Docker Compose - Docker CLI - Proxmox via Docker diff --git a/.github/DISCUSSION_TEMPLATE/config-support.yml b/.github/DISCUSSION_TEMPLATE/config-support.yml index 4934d7936..575f7f640 100644 --- a/.github/DISCUSSION_TEMPLATE/config-support.yml +++ b/.github/DISCUSSION_TEMPLATE/config-support.yml @@ -59,7 +59,7 @@ body: attributes: label: Operating system options: - - HassOS + - Home Assistant OS - Debian - Other Linux - Proxmox @@ -73,7 +73,7 @@ body: attributes: label: Install method options: - - HassOS Addon + - Home Assistant Add-on - Docker Compose - Docker CLI - Proxmox via Docker diff --git a/.github/DISCUSSION_TEMPLATE/detector-support.yml b/.github/DISCUSSION_TEMPLATE/detector-support.yml index 442b2527a..fb994500f 100644 --- a/.github/DISCUSSION_TEMPLATE/detector-support.yml +++ b/.github/DISCUSSION_TEMPLATE/detector-support.yml @@ -53,7 +53,7 @@ body: attributes: label: Install method options: - - HassOS Addon + - Home Assistant Add-on - Docker Compose - Docker CLI - Proxmox via Docker diff --git a/.github/DISCUSSION_TEMPLATE/general-support.yml b/.github/DISCUSSION_TEMPLATE/general-support.yml index 7af52bdf5..0b9f225b6 100644 --- a/.github/DISCUSSION_TEMPLATE/general-support.yml +++ b/.github/DISCUSSION_TEMPLATE/general-support.yml @@ -73,7 +73,7 @@ body: attributes: label: Install method options: - - HassOS Addon + - Home Assistant Add-on - Docker Compose - Docker CLI - Proxmox via Docker diff --git a/.github/DISCUSSION_TEMPLATE/hardware-acceleration-support.yml b/.github/DISCUSSION_TEMPLATE/hardware-acceleration-support.yml index 43fb3503b..861156696 100644 --- a/.github/DISCUSSION_TEMPLATE/hardware-acceleration-support.yml +++ b/.github/DISCUSSION_TEMPLATE/hardware-acceleration-support.yml @@ -69,7 +69,7 @@ body: attributes: label: Install method options: - - HassOS Addon + - Home Assistant Add-on - Docker Compose - Docker CLI - Proxmox via Docker diff --git a/.github/DISCUSSION_TEMPLATE/report-a-bug.yml b/.github/DISCUSSION_TEMPLATE/report-a-bug.yml index dba6d695e..21e4f746f 100644 --- a/.github/DISCUSSION_TEMPLATE/report-a-bug.yml +++ b/.github/DISCUSSION_TEMPLATE/report-a-bug.yml @@ -97,7 +97,7 @@ body: attributes: label: Operating system options: - - HassOS + - Home Assistant OS - Debian - Other Linux - Proxmox @@ -111,7 +111,7 @@ body: attributes: label: Install method options: - - HassOS Addon + - Home Assistant Add-on - Docker Compose - Docker CLI validations: diff --git a/docker-compose.yml b/docker-compose.yml index 2d905d385..c8eb765ab 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,4 +1,3 @@ -version: "3" services: devcontainer: container_name: frigate-devcontainer diff --git a/docker/main/Dockerfile b/docker/main/Dockerfile index 2a7d388bc..fb23940f8 100644 --- a/docker/main/Dockerfile +++ b/docker/main/Dockerfile @@ -262,7 +262,7 @@ HEALTHCHECK --start-period=300s --start-interval=5s --interval=15s --timeout=5s # Frigate deps with Node.js and NPM for devcontainer FROM deps AS devcontainer -# Do not start the actual Frigate service on devcontainer as it will be started by VSCode +# Do not start the actual Frigate service on devcontainer as it will be started by VS Code # But start a fake service for simulating the logs COPY docker/main/fake_frigate_run /etc/s6-overlay/s6-rc.d/frigate/run diff --git a/docker/main/rootfs/etc/s6-overlay/s6-rc.d/frigate/run b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/frigate/run index f764fd6b0..a3a34e4f6 100755 --- a/docker/main/rootfs/etc/s6-overlay/s6-rc.d/frigate/run +++ b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/frigate/run @@ -9,39 +9,6 @@ set -o errexit -o nounset -o pipefail # Tell S6-Overlay not to restart this service s6-svc -O . -function migrate_db_path() { - # Find config file in yaml or yml, but prefer yaml - local config_file="${CONFIG_FILE:-"/config/config.yml"}" - local config_file_yaml="${config_file//.yml/.yaml}" - if [[ -f "${config_file_yaml}" ]]; then - config_file="${config_file_yaml}" - elif [[ ! -f "${config_file}" ]]; then - # Frigate will create the config file on startup - return 0 - fi - unset config_file_yaml - - # Use yq to check if database.path is set - local user_db_path - user_db_path=$(yq eval '.database.path' "${config_file}") - - if [[ "${user_db_path}" == "null" ]]; then - local previous_db_path="/media/frigate/frigate.db" - local new_db_dir="/config" - if [[ -f "${previous_db_path}" ]]; then - if mountpoint --quiet "${new_db_dir}"; then - # /config is a mount point, move the db - echo "[INFO] Moving db from '${previous_db_path}' to the '${new_db_dir}' dir..." - # Move all files that starts with frigate.db to the new directory - mv -vf "${previous_db_path}"* "${new_db_dir}" - else - echo "[ERROR] Trying to migrate the db path from '${previous_db_path}' to the '${new_db_dir}' dir, but '${new_db_dir}' is not a mountpoint, please mount the '${new_db_dir}' dir" - return 1 - fi - fi - fi -} - function set_libva_version() { local ffmpeg_path ffmpeg_path=$(python3 /usr/local/ffmpeg/get_ffmpeg_path.py) @@ -50,8 +17,8 @@ function set_libva_version() { } echo "[INFO] Preparing Frigate..." -migrate_db_path set_libva_version + echo "[INFO] Starting Frigate..." cd /opt/frigate || echo "[ERROR] Failed to change working directory to /opt/frigate" diff --git a/docker/main/rootfs/etc/s6-overlay/s6-rc.d/go2rtc/dependencies.d/base b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/go2rtc/dependencies.d/prepare similarity index 100% rename from docker/main/rootfs/etc/s6-overlay/s6-rc.d/go2rtc/dependencies.d/base rename to docker/main/rootfs/etc/s6-overlay/s6-rc.d/go2rtc/dependencies.d/prepare diff --git a/docker/main/rootfs/etc/s6-overlay/s6-rc.d/go2rtc/run b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/go2rtc/run index 2c3a7ab6f..46bc3175f 100755 --- a/docker/main/rootfs/etc/s6-overlay/s6-rc.d/go2rtc/run +++ b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/go2rtc/run @@ -61,7 +61,7 @@ if [[ ! -f "/dev/shm/go2rtc.yaml" ]]; then echo "[INFO] Preparing new go2rtc config..." if [[ -n "${SUPERVISOR_TOKEN:-}" ]]; then - # Running as a Home Assistant add-on, infer the IP address and port + # Running as a Home Assistant Add-on, infer the IP address and port get_ip_and_port_from_supervisor fi diff --git a/docker/main/rootfs/etc/s6-overlay/s6-rc.d/prepare/dependencies.d/base b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/prepare/dependencies.d/base new file mode 100644 index 000000000..e69de29bb diff --git a/docker/main/rootfs/etc/s6-overlay/s6-rc.d/prepare/run b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/prepare/run new file mode 100755 index 000000000..0460cd2b4 --- /dev/null +++ b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/prepare/run @@ -0,0 +1,142 @@ +#!/command/with-contenv bash +# shellcheck shell=bash +# Do preparation tasks before starting the main services + +set -o errexit -o nounset -o pipefail + +function migrate_addon_config_dir() { + local home_assistant_config_dir="/homeassistant" + + if ! mountpoint --quiet "${home_assistant_config_dir}"; then + # Not running as a Home Assistant Add-on + return 0 + fi + + local config_dir="/config" + local new_config_file="${config_dir}/config.yml" + local new_config_file_yaml="${new_config_file//.yml/.yaml}" + if [[ -f "${new_config_file_yaml}" || -f "${new_config_file}" ]]; then + # Already migrated + return 0 + fi + + local old_config_file="${home_assistant_config_dir}/frigate.yml" + local old_config_file_yaml="${old_config_file//.yml/.yaml}" + if [[ -f "${old_config_file}" ]]; then + : + elif [[ -f "${old_config_file_yaml}" ]]; then + old_config_file="${old_config_file_yaml}" + new_config_file="${new_config_file_yaml}" + else + # Nothing to migrate + return 0 + fi + unset old_config_file_yaml new_config_file_yaml + + echo "[INFO] Starting migration from Home Assistant config dir to Add-on config dir..." >&2 + + local db_path + db_path=$(yq -r '.database.path' "${old_config_file}") + if [[ "${db_path}" == "null" ]]; then + db_path="${config_dir}/frigate.db" + fi + if [[ "${db_path}" == "${config_dir}/"* ]]; then + # replace /config/ prefix with /homeassistant/ + local old_db_path="${home_assistant_config_dir}/${db_path:8}" + + if [[ -f "${old_db_path}" ]]; then + local new_db_dir + new_db_dir="$(dirname "${db_path}")" + echo "[INFO] Migrating database from '${old_db_path}' to '${new_db_dir}' dir..." >&2 + mkdir -vp "${new_db_dir}" + mv -vf "${old_db_path}" "${new_db_dir}" + local db_file + for db_file in "${old_db_path}"-shm "${old_db_path}"-wal; do + if [[ -f "${db_file}" ]]; then + mv -vf "${db_file}" "${new_db_dir}" + fi + done + unset db_file + fi + fi + + local config_entry + for config_entry in .model.path .model.labelmap_path .ffmpeg.path .mqtt.tls_ca_certs .mqtt.tls_client_cert .mqtt.tls_client_key; do + local config_entry_path + config_entry_path=$(yq -r "${config_entry}" "${old_config_file}") + if [[ "${config_entry_path}" == "${config_dir}/"* ]]; then + # replace /config/ prefix with /homeassistant/ + local old_config_entry_path="${home_assistant_config_dir}/${config_entry_path:8}" + + if [[ -f "${old_config_entry_path}" ]]; then + local new_config_entry_entry + new_config_entry_entry="$(dirname "${config_entry_path}")" + echo "[INFO] Migrating ${config_entry} from '${old_config_entry_path}' to '${config_entry_path}'..." >&2 + mkdir -vp "${new_config_entry_entry}" + mv -vf "${old_config_entry_path}" "${config_entry_path}" + fi + fi + done + + local old_model_cache_path="${home_assistant_config_dir}/model_cache" + if [[ -d "${old_model_cache_path}" ]]; then + echo "[INFO] Migrating '${old_model_cache_path}' to '${config_dir}'..." >&2 + mv -f "${old_model_cache_path}" "${config_dir}" + fi + + echo "[INFO] Migrating other files from '${home_assistant_config_dir}' to '${config_dir}'..." >&2 + local file + for file in .exports .jwt_secret .timeline .vacuum go2rtc; do + file="${home_assistant_config_dir}/${file}" + if [[ -f "${file}" ]]; then + mv -vf "${file}" "${config_dir}" + fi + done + + echo "[INFO] Migrating config file from '${old_config_file}' to '${new_config_file}'..." >&2 + mv -vf "${old_config_file}" "${new_config_file}" + + echo "[INFO] Migration from Home Assistant config dir to Add-on config dir completed." >&2 +} + +function migrate_db_from_media_to_config() { + # Find config file in yml or yaml, but prefer yml + local config_file="${CONFIG_FILE:-"/config/config.yml"}" + local config_file_yaml="${config_file//.yml/.yaml}" + if [[ -f "${config_file}" ]]; then + : + elif [[ -f "${config_file_yaml}" ]]; then + config_file="${config_file_yaml}" + else + # Frigate will create the config file on startup + return 0 + fi + unset config_file_yaml + + local user_db_path + user_db_path=$(yq -r '.database.path' "${config_file}") + if [[ "${user_db_path}" == "null" ]]; then + local old_db_path="/media/frigate/frigate.db" + local new_db_dir="/config" + if [[ -f "${old_db_path}" ]]; then + echo "[INFO] Migrating database from '${old_db_path}' to '${new_db_dir}' dir..." >&2 + if mountpoint --quiet "${new_db_dir}"; then + # /config is a mount point, move the db + mv -vf "${old_db_path}" "${new_db_dir}" + local db_file + for db_file in "${old_db_path}"-shm "${old_db_path}"-wal; do + if [[ -f "${db_file}" ]]; then + mv -vf "${db_file}" "${new_db_dir}" + fi + done + unset db_file + else + echo "[ERROR] Trying to migrate the database path from '${old_db_path}' to '${new_db_dir}' dir, but '${new_db_dir}' is not a mountpoint, please mount the '${new_db_dir}' dir" >&2 + return 1 + fi + fi + fi +} + +migrate_addon_config_dir +migrate_db_from_media_to_config diff --git a/docker/main/rootfs/etc/s6-overlay/s6-rc.d/prepare/type b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/prepare/type new file mode 100644 index 000000000..bdd22a185 --- /dev/null +++ b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/prepare/type @@ -0,0 +1 @@ +oneshot diff --git a/docker/main/rootfs/etc/s6-overlay/s6-rc.d/prepare/up b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/prepare/up new file mode 100644 index 000000000..ea17af548 --- /dev/null +++ b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/prepare/up @@ -0,0 +1 @@ +/etc/s6-overlay/s6-rc.d/prepare/run diff --git a/docker/main/rootfs/usr/local/ffmpeg/get_ffmpeg_path.py b/docker/main/rootfs/usr/local/ffmpeg/get_ffmpeg_path.py index ed7f6a891..3de7d9f4a 100644 --- a/docker/main/rootfs/usr/local/ffmpeg/get_ffmpeg_path.py +++ b/docker/main/rootfs/usr/local/ffmpeg/get_ffmpeg_path.py @@ -1,5 +1,4 @@ import json -import os import sys from ruamel.yaml import YAML @@ -9,17 +8,13 @@ from frigate.const import ( DEFAULT_FFMPEG_VERSION, INCLUDED_FFMPEG_VERSIONS, ) +from frigate.util.config import find_config_file sys.path.remove("/opt/frigate") yaml = YAML() -config_file = os.environ.get("CONFIG_FILE", "/config/config.yml") - -# Check if we can use .yaml instead of .yml -config_file_yaml = config_file.replace(".yml", ".yaml") -if os.path.isfile(config_file_yaml): - config_file = config_file_yaml +config_file = find_config_file() try: with open(config_file) as f: diff --git a/docker/main/rootfs/usr/local/go2rtc/create_config.py b/docker/main/rootfs/usr/local/go2rtc/create_config.py index d7c21c7f7..ac44f1fe4 100644 --- a/docker/main/rootfs/usr/local/go2rtc/create_config.py +++ b/docker/main/rootfs/usr/local/go2rtc/create_config.py @@ -15,6 +15,7 @@ from frigate.const import ( LIBAVFORMAT_VERSION_MAJOR, ) from frigate.ffmpeg_presets import parse_preset_hardware_acceleration_encode +from frigate.util.config import find_config_file sys.path.remove("/opt/frigate") @@ -29,12 +30,7 @@ if os.path.isdir("/run/secrets"): Path(os.path.join("/run/secrets", secret_file)).read_text().strip() ) -config_file = os.environ.get("CONFIG_FILE", "/config/config.yml") - -# Check if we can use .yaml instead of .yml -config_file_yaml = config_file.replace(".yml", ".yaml") -if os.path.isfile(config_file_yaml): - config_file = config_file_yaml +config_file = find_config_file() try: with open(config_file) as f: diff --git a/docker/main/rootfs/usr/local/nginx/get_tls_settings.py b/docker/main/rootfs/usr/local/nginx/get_tls_settings.py index f1a4c85de..2ababa282 100644 --- a/docker/main/rootfs/usr/local/nginx/get_tls_settings.py +++ b/docker/main/rootfs/usr/local/nginx/get_tls_settings.py @@ -1,18 +1,18 @@ """Prints the tls config as json to stdout.""" import json -import os +import sys from ruamel.yaml import YAML +sys.path.insert(0, "/opt/frigate") +from frigate.util.config import find_config_file + +sys.path.remove("/opt/frigate") + yaml = YAML() -config_file = os.environ.get("CONFIG_FILE", "/config/config.yml") - -# Check if we can use .yaml instead of .yml -config_file_yaml = config_file.replace(".yml", ".yaml") -if os.path.isfile(config_file_yaml): - config_file = config_file_yaml +config_file = find_config_file() try: with open(config_file) as f: diff --git a/docker/rockchip/conv2rknn.py b/docker/rockchip/conv2rknn.py index 4f4a315e1..4880d9868 100644 --- a/docker/rockchip/conv2rknn.py +++ b/docker/rockchip/conv2rknn.py @@ -14,7 +14,7 @@ try: with open("/config/conv2rknn.yaml", "r") as config_file: configuration = yaml.safe_load(config_file) except FileNotFoundError: - raise Exception("Please place a config.yaml file in /config/conv2rknn.yaml") + raise Exception("Please place a config file at /config/conv2rknn.yaml") if configuration["config"] != None: rknn_config = configuration["config"] diff --git a/docs/docs/configuration/advanced.md b/docs/docs/configuration/advanced.md index 1e128e0e3..818440fae 100644 --- a/docs/docs/configuration/advanced.md +++ b/docs/docs/configuration/advanced.md @@ -44,7 +44,7 @@ go2rtc: ### `environment_vars` -This section can be used to set environment variables for those unable to modify the environment of the container (ie. within HassOS) +This section can be used to set environment variables for those unable to modify the environment of the container, like within Home Assistant OS. Example: diff --git a/docs/docs/configuration/authentication.md b/docs/docs/configuration/authentication.md index 36994381d..129547d1b 100644 --- a/docs/docs/configuration/authentication.md +++ b/docs/docs/configuration/authentication.md @@ -43,13 +43,13 @@ Restarting Frigate will reset the rate limits. If you are running Frigate behind a proxy, you will want to set `trusted_proxies` or these rate limits will apply to the upstream proxy IP address. This means that a brute force attack will rate limit login attempts from other devices and could temporarily lock you out of your instance. In order to ensure rate limits only apply to the actual IP address where the requests are coming from, you will need to list the upstream networks that you want to trust. These trusted proxies are checked against the `X-Forwarded-For` header when looking for the IP address where the request originated. -If you are running a reverse proxy in the same docker compose file as Frigate, here is an example of how your auth config might look: +If you are running a reverse proxy in the same Docker Compose file as Frigate, here is an example of how your auth config might look: ```yaml auth: failed_login_rate_limit: "1/second;5/minute;20/hour" trusted_proxies: - - 172.18.0.0/16 # <---- this is the subnet for the internal docker compose network + - 172.18.0.0/16 # <---- this is the subnet for the internal Docker Compose network ``` ## JWT Token Secret @@ -66,7 +66,7 @@ Frigate looks for a JWT token secret in the following order: 1. An environment variable named `FRIGATE_JWT_SECRET` 2. A docker secret named `FRIGATE_JWT_SECRET` in `/run/secrets/` -3. A `jwt_secret` option from the Home Assistant Addon options +3. A `jwt_secret` option from the Home Assistant Add-on options 4. A `.jwt_secret` file in the config directory If no secret is found on startup, Frigate generates one and stores it in a `.jwt_secret` file in the config directory. diff --git a/docs/docs/configuration/birdseye.md b/docs/docs/configuration/birdseye.md index 2c9fbbdf4..d4bd1a15e 100644 --- a/docs/docs/configuration/birdseye.md +++ b/docs/docs/configuration/birdseye.md @@ -4,7 +4,7 @@ In addition to Frigate's Live camera dashboard, Birdseye allows a portable heads Birdseye can be viewed by adding the "Birdseye" camera to a Camera Group in the Web UI. Add a Camera Group by pressing the "+" icon on the Live page, and choose "Birdseye" as one of the cameras. -Birdseye can also be used in HomeAssistant dashboards, cast to media devices, etc. +Birdseye can also be used in Home Assistant dashboards, cast to media devices, etc. ## Birdseye Behavior diff --git a/docs/docs/configuration/hardware_acceleration.md b/docs/docs/configuration/hardware_acceleration.md index e3bff0a0e..e05a76e62 100644 --- a/docs/docs/configuration/hardware_acceleration.md +++ b/docs/docs/configuration/hardware_acceleration.md @@ -14,7 +14,7 @@ Depending on your system, these parameters may not be compatible. More informati ## Raspberry Pi 3/4 Ensure you increase the allocated RAM for your GPU to at least 128 (`raspi-config` > Performance Options > GPU Memory). -If you are using the HA addon, you may need to use the full access variant and turn off `Protection mode` for hardware acceleration. +If you are using the HA Add-on, you may need to use the full access variant and turn off _Protection mode_ for hardware acceleration. ```yaml # if you want to decode a h264 stream @@ -28,8 +28,8 @@ ffmpeg: :::note -If running Frigate in Docker, you either need to run in privileged mode or -map the `/dev/video*` devices to Frigate. With Docker compose add: +If running Frigate through Docker, you either need to run in privileged mode or +map the `/dev/video*` devices to Frigate. With Docker Compose add: ```yaml services: @@ -80,7 +80,7 @@ Or map in all the `/dev/video*` devices. :::note -The default driver is `iHD`. You may need to change the driver to `i965` by adding the following environment variable `LIBVA_DRIVER_NAME=i965` to your docker-compose file or [in the `frigate.yaml` for HA OS users](advanced.md#environment_vars). +The default driver is `iHD`. You may need to change the driver to `i965` by adding the following environment variable `LIBVA_DRIVER_NAME=i965` to your docker-compose file or [in the `config.yml` for HA Add-on users](advanced.md#environment_vars). See [The Intel Docs](https://www.intel.com/content/www/us/en/support/articles/000005505/processors.html) to figure out what generation your CPU is. @@ -191,7 +191,7 @@ VAAPI supports automatic profile selection so it will work automatically with bo :::note -You need to change the driver to `radeonsi` by adding the following environment variable `LIBVA_DRIVER_NAME=radeonsi` to your docker-compose file or [in the `frigate.yaml` for HA OS users](advanced.md#environment_vars). +You need to change the driver to `radeonsi` by adding the following environment variable `LIBVA_DRIVER_NAME=radeonsi` to your docker-compose file or [in the `config.yml` for HA Add-on users](advanced.md#environment_vars). ::: @@ -312,7 +312,6 @@ docker run -d \ ### Docker Compose - Jetson ```yaml -version: '2.4' services: frigate: ... diff --git a/docs/docs/configuration/index.md b/docs/docs/configuration/index.md index a60da3499..b1fa876f9 100644 --- a/docs/docs/configuration/index.md +++ b/docs/docs/configuration/index.md @@ -3,10 +3,12 @@ id: index title: Frigate Configuration --- -For Home Assistant Addon installations, the config file needs to be in the root of your Home Assistant config directory (same location as `configuration.yaml`). It can be named `frigate.yaml` or `frigate.yml`, but if both files exist `frigate.yaml` will be preferred and `frigate.yml` will be ignored. +For Home Assistant Add-on installations, the config file should be at `/addon_configs//config.yml`, where `` is specific to the variant of the Frigate Add-on you are running. See the list of directories [here](#accessing-add-on-config-dir). For all other installation types, the config file should be mapped to `/config/config.yml` inside the container. +It can be named `config.yml` or `config.yaml`, but if both files exist `config.yml` will be preferred and `config.yaml` will be ignored. + It is recommended to start with a minimal configuration and add to it as described in [this guide](../guides/getting_started.md) and use the built in configuration editor in Frigate's UI which supports validation. ```yaml @@ -23,9 +25,24 @@ cameras: - detect ``` -## VSCode Configuration Schema +## Accessing the Home Assistant Add-on configuration directory {#accessing-add-on-config-dir} -VSCode supports JSON schemas for automatically validating configuration files. You can enable this feature by adding `# yaml-language-server: $schema=http://frigate_host:5000/api/config/schema.json` to the beginning of the configuration file. Replace `frigate_host` with the IP address or hostname of your Frigate server. If you're using both VSCode and Frigate as an add-on, you should use `ccab4aaf-frigate` instead. Make sure to expose the internal unauthenticated port `5000` when accessing the config from VSCode on another machine. +When running Frigate through the HA Add-on, the Frigate `/config` directory is mapped to `/addon_configs/` in the host, where `` is specific to the variant of the Frigate Add-on you are running. + +| Add-on Variant | Configuration directory | +| -------------------------- | -------------------------------------------- | +| Frigate | `/addon_configs/ccab4aaf_frigate` | +| Frigate (Full Access) | `/addon_configs/ccab4aaf_frigate-fa` | +| Frigate Beta | `/addon_configs/ccab4aaf_frigate-beta` | +| Frigate Beta (Full Access) | `/addon_configs/ccab4aaf_frigate-fa-beta` | + +**Whenever you see `/config` in the documentation, it refers to this directory.** + +If for example you are running the standard Add-on variant and use the [VS Code Add-on](https://github.com/hassio-addons/addon-vscode) to browse your files, you can click _File_ > _Open folder..._ and navigate to `/addon_configs/ccab4aaf_frigate` to access the Frigate `/config` directory and edit the `config.yaml` file. You can also use the built-in file editor in the Frigate UI to edit the configuration file. + +## VS Code Configuration Schema + +VS Code supports JSON schemas for automatically validating configuration files. You can enable this feature by adding `# yaml-language-server: $schema=http://frigate_host:5000/api/config/schema.json` to the beginning of the configuration file. Replace `frigate_host` with the IP address or hostname of your Frigate server. If you're using both VS Code and Frigate as an Add-on, you should use `ccab4aaf-frigate` instead. Make sure to expose the internal unauthenticated port `5000` when accessing the config from VS Code on another machine. ## Environment Variable Substitution @@ -65,10 +82,10 @@ genai: Here are some common starter configuration examples. Refer to the [reference config](./reference.md) for detailed information about all the config values. -### Raspberry Pi Home Assistant Addon with USB Coral +### Raspberry Pi Home Assistant Add-on with USB Coral - Single camera with 720p, 5fps stream for detect -- MQTT connected to home assistant mosquitto addon +- MQTT connected to the Home Assistant Mosquitto Add-on - Hardware acceleration for decoding video - USB Coral detector - Save all video with any detectable motion for 7 days regardless of whether any objects were detected or not diff --git a/docs/docs/configuration/live.md b/docs/docs/configuration/live.md index 494413682..cee8b3dca 100644 --- a/docs/docs/configuration/live.md +++ b/docs/docs/configuration/live.md @@ -104,9 +104,9 @@ cameras: WebRTC works by creating a TCP or UDP connection on port `8555`. However, it requires additional configuration: - For external access, over the internet, setup your router to forward port `8555` to port `8555` on the Frigate device, for both TCP and UDP. -- For internal/local access, unless you are running through the add-on, you will also need to set the WebRTC candidates list in the go2rtc config. For example, if `192.168.1.10` is the local IP of the device running Frigate: +- For internal/local access, unless you are running through the HA Add-on, you will also need to set the WebRTC candidates list in the go2rtc config. For example, if `192.168.1.10` is the local IP of the device running Frigate: - ```yaml title="/config/frigate.yaml" + ```yaml title="config.yml" go2rtc: streams: test_cam: ... @@ -121,9 +121,9 @@ WebRTC works by creating a TCP or UDP connection on port `8555`. However, it req :::tip -This extra configuration may not be required if Frigate has been installed as a Home Assistant add-on, as Frigate uses the Supervisor's API to generate a WebRTC candidate. +This extra configuration may not be required if Frigate has been installed as a Home Assistant Add-on, as Frigate uses the Supervisor's API to generate a WebRTC candidate. -However, it is recommended if issues occur to define the candidates manually. You should do this if the Frigate add-on fails to generate a valid candidate. If an error occurs you will see some warnings like the below in the add-on logs page during the initialization: +However, it is recommended if issues occur to define the candidates manually. You should do this if the Frigate Add-on fails to generate a valid candidate. If an error occurs you will see some warnings like the below in the Add-on logs page during the initialization: ```log [WARN] Failed to get IP address from supervisor diff --git a/docs/docs/configuration/motion_detection.md b/docs/docs/configuration/motion_detection.md index 7621489ff..ec6d7ca25 100644 --- a/docs/docs/configuration/motion_detection.md +++ b/docs/docs/configuration/motion_detection.md @@ -77,7 +77,7 @@ At this point if motion is working as desired there is no reason to continue wit Once daytime motion detection is tuned, there is a chance that the settings will work well for motion detection during the night as well. If this is the case then the preferred settings can be written to the config file and left alone. -However, if the preferred day settings do not work well at night it is recommended to use HomeAssistant or some other solution to automate changing the settings. That way completely separate sets of motion settings can be used for optimal day and night motion detection. +However, if the preferred day settings do not work well at night it is recommended to use Home Assistant or some other solution to automate changing the settings. That way completely separate sets of motion settings can be used for optimal day and night motion detection. ## Tuning For Large Changes In Motion diff --git a/docs/docs/configuration/object_detectors.md b/docs/docs/configuration/object_detectors.md index e027596d2..aadd0d053 100644 --- a/docs/docs/configuration/object_detectors.md +++ b/docs/docs/configuration/object_detectors.md @@ -519,7 +519,7 @@ $ docker run --device=/dev/kfd --device=/dev/dri \ ... ``` -When using docker compose: +When using Docker Compose: ```yaml services: @@ -551,7 +551,7 @@ $ docker run -e HSA_OVERRIDE_GFX_VERSION=9.0.0 \ ... ``` -When using docker compose: +When using Docker Compose: ```yaml services: diff --git a/docs/docs/configuration/record.md b/docs/docs/configuration/record.md index f84d84cee..28065f0b0 100644 --- a/docs/docs/configuration/record.md +++ b/docs/docs/configuration/record.md @@ -146,7 +146,7 @@ The above configuration example can be added globally or on a per camera basis. ## Can I have "continuous" recordings, but only at certain times? -Using Frigate UI, HomeAssistant, or MQTT, cameras can be automated to only record in certain situations or at certain times. +Using Frigate UI, Home Assistant, or MQTT, cameras can be automated to only record in certain situations or at certain times. ## How do I export recordings? diff --git a/docs/docs/configuration/reference.md b/docs/docs/configuration/reference.md index 3f099ba25..8e24db0b7 100644 --- a/docs/docs/configuration/reference.md +++ b/docs/docs/configuration/reference.md @@ -890,7 +890,7 @@ telemetry: # NOTE: The container must either be privileged or have cap_net_admin, cap_net_raw capabilities enabled. network_bandwidth: False # Optional: Enable the latest version outbound check (default: shown below) - # NOTE: If you use the HomeAssistant integration, disabling this will prevent it from reporting new versions + # NOTE: If you use the Home Assistant integration, disabling this will prevent it from reporting new versions version_check: True # Optional: Camera groups (default: no groups are setup) diff --git a/docs/docs/development/contributing.md b/docs/docs/development/contributing.md index 6ef5a0fc2..a28320339 100644 --- a/docs/docs/development/contributing.md +++ b/docs/docs/development/contributing.md @@ -17,15 +17,15 @@ From here, follow the guides for: - [Web Interface](#web-interface) - [Documentation](#documentation) -### Frigate Home Assistant Addon +### Frigate Home Assistant Add-on -This repository holds the Home Assistant Addon, for use with Home Assistant OS and compatible installations. It is the piece that allows you to run Frigate from your Home Assistant Supervisor tab. +This repository holds the Home Assistant Add-on, for use with Home Assistant OS and compatible installations. It is the piece that allows you to run Frigate from your Home Assistant Supervisor tab. Fork [blakeblackshear/frigate-hass-addons](https://github.com/blakeblackshear/frigate-hass-addons) to your own Github profile, then clone the forked repo to your local machine. ### Frigate Home Assistant Integration -This repository holds the custom integration that allows your Home Assistant installation to automatically create entities for your Frigate instance, whether you run that with the [addon](#frigate-home-assistant-addon) or in a separate Docker instance. +This repository holds the custom integration that allows your Home Assistant installation to automatically create entities for your Frigate instance, whether you are running Frigate as a standalone Docker container or as a [Home Assistant Add-on](#frigate-home-assistant-add-on). Fork [blakeblackshear/frigate-hass-integration](https://github.com/blakeblackshear/frigate-hass-integration) to your own GitHub profile, then clone the forked repo to your local machine. @@ -77,14 +77,14 @@ Create and place these files in a `debug` folder in the root of the repo. This i #### 4. Run Frigate from the command line -VSCode will start the docker compose file for you and open a terminal window connected to `frigate-dev`. +VS Code will start the Docker Compose file for you and open a terminal window connected to `frigate-dev`. - Run `python3 -m frigate` to start the backend. - In a separate terminal window inside VS Code, change into the `web` directory and run `npm install && npm run dev` to start the frontend. #### 5. Teardown -After closing VSCode, you may still have containers running. To close everything down, just run `docker-compose down -v` to cleanup all containers. +After closing VS Code, you may still have containers running. To close everything down, just run `docker-compose down -v` to cleanup all containers. ### Testing diff --git a/docs/docs/frigate/installation.md b/docs/docs/frigate/installation.md index b270df5ff..fe4262313 100644 --- a/docs/docs/frigate/installation.md +++ b/docs/docs/frigate/installation.md @@ -3,11 +3,11 @@ id: installation title: Installation --- -Frigate is a Docker container that can be run on any Docker host including as a [HassOS Addon](https://www.home-assistant.io/addons/). Note that a Home Assistant Addon is **not** the same thing as the integration. The [integration](/integrations/home-assistant) is required to integrate Frigate into Home Assistant. +Frigate is a Docker container that can be run on any Docker host including as a [Home Assistant Add-on](https://www.home-assistant.io/addons/). Note that the Home Assistant Add-on is **not** the same thing as the integration. The [integration](/integrations/home-assistant) is required to integrate Frigate into Home Assistant, whether you are running Frigate as a standalone Docker container or as a Home Assistant Add-on. :::tip -If you already have Frigate installed as a Home Assistant addon, check out the [getting started guide](../guides/getting_started#configuring-frigate) to configure Frigate. +If you already have Frigate installed as a Home Assistant Add-on, check out the [getting started guide](../guides/getting_started#configuring-frigate) to configure Frigate. ::: @@ -45,7 +45,7 @@ The following ports are used by Frigate and can be mapped via docker as required | `8554` | RTSP restreaming. By default, these streams are unauthenticated. Authentication can be configured in go2rtc section of config. | | `8555` | WebRTC connections for low latency live views. | -#### Common docker compose storage configurations +#### Common Docker Compose storage configurations Writing to a local disk or external USB drive: @@ -73,7 +73,7 @@ Users of the Snapcraft build of Docker cannot use storage locations outside your Frigate utilizes shared memory to store frames during processing. The default `shm-size` provided by Docker is **64MB**. -The default shm size of **128MB** is fine for setups with **2 cameras** detecting at **720p**. If Frigate is exiting with "Bus error" messages, it is likely because you have too many high resolution cameras and you need to specify a higher shm size, using [`--shm-size`](https://docs.docker.com/engine/reference/run/#runtime-constraints-on-resources) (or [`service.shm_size`](https://docs.docker.com/compose/compose-file/compose-file-v2/#shm_size) in docker-compose). +The default shm size of **128MB** is fine for setups with **2 cameras** detecting at **720p**. If Frigate is exiting with "Bus error" messages, it is likely because you have too many high resolution cameras and you need to specify a higher shm size, using [`--shm-size`](https://docs.docker.com/engine/reference/run/#runtime-constraints-on-resources) (or [`service.shm_size`](https://docs.docker.com/compose/compose-file/compose-file-v2/#shm_size) in Docker Compose). The Frigate container also stores logs in shm, which can take up to **40MB**, so make sure to take this into account in your math as well. @@ -184,10 +184,9 @@ Next, you should configure [hardware object detection](/configuration/object_det ## Docker -Running in Docker with compose is the recommended install method. +Running through Docker with Docker Compose is the recommended install method. ```yaml -version: "3.9" services: frigate: container_name: frigate @@ -219,7 +218,7 @@ services: FRIGATE_RTSP_PASSWORD: "password" ``` -If you can't use docker compose, you can run the container with something similar to this: +If you can't use Docker Compose, you can run the container with something similar to this: ```bash docker run -d \ @@ -255,13 +254,13 @@ The community supported docker image tags for the current stable version are: - `stable-rocm` - Frigate build for [AMD GPUs](../configuration/object_detectors.md#amdrocm-gpu-detector) - `stable-h8l` - Frigate build for the Hailo-8L M.2 PICe Raspberry Pi 5 hat -## Home Assistant Addon +## Home Assistant Add-on :::warning -As of HomeAssistant OS 10.2 and Core 2023.6 defining separate network storage for media is supported. +As of Home Assistant Operating System 10.2 and Home Assistant 2023.6 defining separate network storage for media is supported. -There are important limitations in Home Assistant Operating System to be aware of: +There are important limitations in HA OS to be aware of: - Separate local storage for media is not yet supported by Home Assistant - AMD GPUs are not supported because HA OS does not include the mesa driver. @@ -275,24 +274,27 @@ See [the network storage guide](/guides/ha_network_storage.md) for instructions ::: -HassOS users can install via the addon repository. +Home Assistant OS users can install via the Add-on repository. -1. Navigate to Supervisor > Add-on Store > Repositories -2. Add https://github.com/blakeblackshear/frigate-hass-addons -3. Install your desired Frigate NVR Addon and navigate to it's page +1. In Home Assistant, navigate to _Settings_ > _Add-ons_ > _Add-on Store_ > _Repositories_ +2. Add `https://github.com/blakeblackshear/frigate-hass-addons` +3. Install the desired variant of the Frigate Add-on (see below) 4. Setup your network configuration in the `Configuration` tab -5. (not for proxy addon) Create the file `frigate.yaml` in your `config` directory with your detailed Frigate configuration -6. Start the addon container -7. (not for proxy addon) If you are using hardware acceleration for ffmpeg, you may need to disable "Protection mode" +5. Start the Add-on +6. Use the _Open Web UI_ button to access the Frigate UI, then click in the _cog icon_ > _Configuration editor_ and configure Frigate to your liking -There are several versions of the addon available: +There are several variants of the Add-on available: -| Addon Version | Description | -| ------------------------------ | ---------------------------------------------------------- | -| Frigate NVR | Current release with protection mode on | -| Frigate NVR (Full Access) | Current release with the option to disable protection mode | -| Frigate NVR Beta | Beta release with protection mode on | -| Frigate NVR Beta (Full Access) | Beta release with the option to disable protection mode | +| Add-on Variant | Description | +| -------------------------- | ---------------------------------------------------------- | +| Frigate | Current release with protection mode on | +| Frigate (Full Access) | Current release with the option to disable protection mode | +| Frigate Beta | Beta release with protection mode on | +| Frigate Beta (Full Access) | Beta release with the option to disable protection mode | + +If you are using hardware acceleration for ffmpeg, you **may** need to use the _Full Access_ variant of the Add-on. This is because the Frigate Add-on runs in a container with limited access to the host system. The _Full Access_ variant allows you to disable _Protection mode_ and give Frigate full access to the host system. + +You can also edit the Frigate configuration file through the [VS Code Add-on](https://github.com/hassio-addons/addon-vscode) or similar. In that case, the configuration file will be at `/addon_configs//config.yml`, where `` is specific to the variant of the Frigate Add-on you are running. See the list of directories [here](../configuration/index.md#accessing-add-on-config-dir). ## Kubernetes diff --git a/docs/docs/guides/getting_started.md b/docs/docs/guides/getting_started.md index 6fe3a8e22..5420fbb6d 100644 --- a/docs/docs/guides/getting_started.md +++ b/docs/docs/guides/getting_started.md @@ -9,7 +9,7 @@ title: Getting started If you already have an environment with Linux and Docker installed, you can continue to [Installing Frigate](#installing-frigate) below. -If you already have Frigate installed in Docker or as a Home Assistant addon, you can continue to [Configuring Frigate](#configuring-frigate) below. +If you already have Frigate installed through Docker or through a Home Assistant Add-on, you can continue to [Configuring Frigate](#configuring-frigate) below. ::: @@ -81,7 +81,7 @@ Now you have a minimal Debian server that requires very little maintenance. ## Installing Frigate -This section shows how to create a minimal directory structure for a Docker installation on Debian. If you have installed Frigate as a Home Assistant addon or another way, you can continue to [Configuring Frigate](#configuring-frigate). +This section shows how to create a minimal directory structure for a Docker installation on Debian. If you have installed Frigate as a Home Assistant Add-on or another way, you can continue to [Configuring Frigate](#configuring-frigate). ### Setup directories @@ -110,7 +110,6 @@ This `docker-compose.yml` file is just a starter for amd64 devices. You will nee `docker-compose.yml` ```yaml -version: "3.9" services: frigate: container_name: frigate @@ -170,7 +169,6 @@ Here is an example configuration with hardware acceleration configured to work w `docker-compose.yml` (after modifying, you will need to run `docker compose up -d` to apply changes) ```yaml -version: "3.9" services: frigate: ... @@ -199,7 +197,6 @@ By default, Frigate will use a single CPU detector. If you have a USB Coral, you `docker-compose.yml` (after modifying, you will need to run `docker compose up -d` to apply changes) ```yaml -version: "3.9" services: frigate: ... diff --git a/docs/docs/guides/ha_network_storage.md b/docs/docs/guides/ha_network_storage.md index fe00311ab..78cddddeb 100644 --- a/docs/docs/guides/ha_network_storage.md +++ b/docs/docs/guides/ha_network_storage.md @@ -3,24 +3,18 @@ id: ha_network_storage title: Home Assistant network storage --- -As of Home Assistant Core 2023.6, Network Mounted Storage is supported for addons. +As of Home Assistant 2023.6, Network Mounted Storage is supported for Add-ons. ## Setting Up Remote Storage For Frigate ### Prerequisites -- HA Core 2023.6 or newer is installed -- Running HA OS 10.2 or newer OR Running Supervised with latest os-agent installed (this is required for supervised install) +- Home Assistant 2023.6 or newer is installed +- Running Home Assistant Operating System 10.2 or newer OR Running Supervised with latest os-agent installed (this is required for supervised install) ### Initial Setup -1. Stop the Frigate addon -2. Update your [config](configuration/index.md) so the DB is stored in the /config directory by adding: - -```yaml -database: - path: /config/frigate.db -``` +1. Stop the Frigate Add-on ### Move current data @@ -43,4 +37,4 @@ Keeping the current data is optional, but the data will need to be moved regardl 4. Fill out the additional required info for your particular NAS 5. Connect 6. Move files from `/media/frigate_tmp` to `/media/frigate` if they were kept in previous step -7. Start the Frigate addon +7. Start the Frigate Add-on diff --git a/docs/docs/integrations/home-assistant.md b/docs/docs/integrations/home-assistant.md index 19330b6b8..34a76d4fe 100644 --- a/docs/docs/integrations/home-assistant.md +++ b/docs/docs/integrations/home-assistant.md @@ -51,7 +51,7 @@ When configuring the integration, you will be asked for the `URL` of your Frigat ### Docker Compose Examples -If you are running Home Assistant Core and Frigate with Docker Compose on the same device, here are some examples. +If you are running Home Assistant and Frigate with Docker Compose on the same device, here are some examples. #### Home Assistant running with host networking @@ -60,7 +60,6 @@ It is not recommended to run Frigate in host networking mode. In this example, y ```yaml services: homeassistant: - container_name: hass image: ghcr.io/home-assistant/home-assistant:stable network_mode: host ... @@ -80,7 +79,6 @@ In this example, it is recommended to connect to the authenticated port, for exa ```yaml services: homeassistant: - container_name: hass image: ghcr.io/home-assistant/home-assistant:stable # network_mode: host ... @@ -93,17 +91,16 @@ services: ... ``` -### HassOS Addon +### Home Assistant Add-on -If you are using HassOS with the addon, the URL should be one of the following depending on which addon version you are using. Note that if you are using the Proxy Addon, you do NOT point the integration at the proxy URL. Just enter the URL used to access Frigate directly from your network. +If you are using Home Assistant Add-on, the URL should be one of the following depending on which Add-on variant you are using. Note that if you are using the Proxy Add-on, you should NOT point the integration at the proxy URL. Just enter the same URL used to access Frigate directly from your network. -| Addon Version | URL | -| ------------------------------ | ----------------------------------------- | -| Frigate NVR | `http://ccab4aaf-frigate:5000` | -| Frigate NVR (Full Access) | `http://ccab4aaf-frigate-fa:5000` | -| Frigate NVR Beta | `http://ccab4aaf-frigate-beta:5000` | -| Frigate NVR Beta (Full Access) | `http://ccab4aaf-frigate-fa-beta:5000` | -| Frigate NVR HailoRT Beta | `http://ccab4aaf-frigate-hailo-beta:5000` | +| Add-on Variant | URL | +| -------------------------- | ----------------------------------------- | +| Frigate | `http://ccab4aaf-frigate:5000` | +| Frigate (Full Access) | `http://ccab4aaf-frigate-fa:5000` | +| Frigate Beta | `http://ccab4aaf-frigate-beta:5000` | +| Frigate Beta (Full Access) | `http://ccab4aaf-frigate-fa-beta:5000` | ### Frigate running on a separate machine diff --git a/docs/docs/integrations/plus.md b/docs/docs/integrations/plus.md index 0a2b7f2d0..f270f1d73 100644 --- a/docs/docs/integrations/plus.md +++ b/docs/docs/integrations/plus.md @@ -19,11 +19,11 @@ Once logged in, you can generate an API key for Frigate in Settings. ### Set your API key -In Frigate, you can use an environment variable or a docker secret named `PLUS_API_KEY` to enable the `Frigate+` buttons on the Explore page. Home Assistant Addon users can set it under Settings > Addons > Frigate NVR > Configuration > Options (be sure to toggle the "Show unused optional configuration options" switch). +In Frigate, you can use an environment variable or a docker secret named `PLUS_API_KEY` to enable the `Frigate+` buttons on the Explore page. Home Assistant Addon users can set it under Settings > Add-ons > Frigate > Configuration > Options (be sure to toggle the "Show unused optional configuration options" switch). :::warning -You cannot use the `environment_vars` section of your Frigate configuration file to set this environment variable. It must be defined as an environment variable in the docker config or HA addon config. +You cannot use the `environment_vars` section of your Frigate configuration file to set this environment variable. It must be defined as an environment variable in the docker config or Home Assistant Add-on config. ::: diff --git a/docs/docs/integrations/third_party_extensions.md b/docs/docs/integrations/third_party_extensions.md index e1f9a1053..c90e98c96 100644 --- a/docs/docs/integrations/third_party_extensions.md +++ b/docs/docs/integrations/third_party_extensions.md @@ -21,7 +21,7 @@ This is a fork (with fixed errors and new features) of [original Double Take](ht ## [Frigate Notify](https://github.com/0x2142/frigate-notify) -[Frigate Notify](https://github.com/0x2142/frigate-notify) is a simple app designed to send notifications from Frigate NVR to your favorite platforms. Intended to be used with standalone Frigate installations - Home Assistant not required, MQTT is optional but recommended. +[Frigate Notify](https://github.com/0x2142/frigate-notify) is a simple app designed to send notifications from Frigate to your favorite platforms. Intended to be used with standalone Frigate installations - Home Assistant not required, MQTT is optional but recommended. ## [Frigate telegram](https://github.com/OldTyT/frigate-telegram) diff --git a/docs/docs/plus/index.md b/docs/docs/plus/index.md index 589adca72..a727f571f 100644 --- a/docs/docs/plus/index.md +++ b/docs/docs/plus/index.md @@ -3,7 +3,7 @@ id: index title: Models --- -Frigate+ offers models trained on images submitted by Frigate+ users from their security cameras and is specifically designed for the way Frigate NVR analyzes video footage. These models offer higher accuracy with less resources. The images you upload are used to fine tune a baseline model trained from images uploaded by all Frigate+ users. This fine tuning process results in a model that is optimized for accuracy in your specific conditions. +Frigate+ offers models trained on images submitted by Frigate+ users from their security cameras and is specifically designed for the way Frigate analyzes video footage. These models offer higher accuracy with less resources. The images you upload are used to fine tune a baseline model trained from images uploaded by all Frigate+ users. This fine tuning process results in a model that is optimized for accuracy in your specific conditions. :::info diff --git a/docs/docs/troubleshooting/edgetpu.md b/docs/docs/troubleshooting/edgetpu.md index 90006c41e..5c2656405 100644 --- a/docs/docs/troubleshooting/edgetpu.md +++ b/docs/docs/troubleshooting/edgetpu.md @@ -32,7 +32,7 @@ The USB coral can draw up to 900mA and this can be too much for some on-device U The USB coral has different IDs when it is uninitialized and initialized. - When running Frigate in a VM, Proxmox lxc, etc. you must ensure both device IDs are mapped. -- When running HA OS you may need to run the Full Access version of the Frigate addon with the `Protected Mode` switch disabled so that the coral can be accessed. +- When running through the Home Assistant OS you may need to run the Full Access variant of the Frigate Add-on with the _Protection mode_ switch disabled so that the coral can be accessed. ### Synology 716+II running DSM 7.2.1-69057 Update 5 diff --git a/docs/docs/troubleshooting/recordings.md b/docs/docs/troubleshooting/recordings.md index 667ea1e8f..d26a3614e 100644 --- a/docs/docs/troubleshooting/recordings.md +++ b/docs/docs/troubleshooting/recordings.md @@ -47,10 +47,9 @@ On linux, some helpful tools/commands in diagnosing would be: On modern linux kernels, the system will utilize some swap if enabled. Setting vm.swappiness=1 no longer means that the kernel will only swap in order to avoid OOM. To prevent any swapping inside a container, set allocations memory and memory+swap to be the same and disable swapping by setting the following docker/podman run parameters: -**Compose example** +**Docker Compose example** ```yaml -version: "3.9" services: frigate: ... diff --git a/frigate/api/auth.py b/frigate/api/auth.py index fc0bda6ed..7d7c2ba8d 100644 --- a/frigate/api/auth.py +++ b/frigate/api/auth.py @@ -109,11 +109,11 @@ def get_jwt_secret() -> str: jwt_secret = ( Path(os.path.join("/run/secrets", JWT_SECRET_ENV_VAR)).read_text().strip() ) - # check for the addon options file + # check for the add-on options file elif os.path.isfile("/data/options.json"): with open("/data/options.json") as f: raw_options = f.read() - logger.debug("Using jwt secret from Home Assistant addon options file.") + logger.debug("Using jwt secret from Home Assistant Add-on options file.") options = json.loads(raw_options) jwt_secret = options.get("jwt_secret") diff --git a/frigate/plus.py b/frigate/plus.py index 758089b85..8ec578c64 100644 --- a/frigate/plus.py +++ b/frigate/plus.py @@ -45,7 +45,7 @@ class PlusApi: self.key = ( Path(os.path.join("/run/secrets", PLUS_ENV_VAR)).read_text().strip() ) - # check for the addon options file + # check for the add-on options file elif os.path.isfile("/data/options.json"): with open("/data/options.json") as f: raw_options = f.read() From a8fa77c6b992928e3e4bf0ed47578b965817e201 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Mon, 24 Mar 2025 08:12:42 -0600 Subject: [PATCH 43/97] Classification fixes (#17337) * Cleanup classification settings behavior * Cleanup response * restore --- frigate/embeddings/maintainer.py | 3 +++ web/src/views/settings/ClassificationSettingsView.tsx | 8 +++++++- 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/frigate/embeddings/maintainer.py b/frigate/embeddings/maintainer.py index 0d8d22762..9b90f6f2c 100644 --- a/frigate/embeddings/maintainer.py +++ b/frigate/embeddings/maintainer.py @@ -206,12 +206,15 @@ class EmbeddingMaintainer(threading.Thread): self.embeddings.embed_description("", data, upsert=False), pack=False, ) + processors = [self.realtime_processors, self.post_processors] for processor_list in processors: for processor in processor_list: resp = processor.handle_request(topic, data) if resp is not None: return resp + + return None except Exception as e: logger.error(f"Unable to handle embeddings request {e}", exc_info=True) diff --git a/web/src/views/settings/ClassificationSettingsView.tsx b/web/src/views/settings/ClassificationSettingsView.tsx index dac136e0c..07926182b 100644 --- a/web/src/views/settings/ClassificationSettingsView.tsx +++ b/web/src/views/settings/ClassificationSettingsView.tsx @@ -170,9 +170,15 @@ export default function ClassificationSettingsView({ ); }) .finally(() => { + addMessage( + "search_settings", + `Restart Required (Classification settings changed)`, + undefined, + "search_settings", + ); setIsLoading(false); }); - }, [updateConfig, classificationSettings, t]); + }, [classificationSettings, t, addMessage, updateConfig]); const onCancel = useCallback(() => { setClassificationSettings(origSearchSettings); From c239721021ae28d8bb5f688c2cb1132d9aeaa62b Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Mon, 24 Mar 2025 08:39:51 -0600 Subject: [PATCH 44/97] Ensure thumb camera directory exists before saving (#17339) --- frigate/camera/state.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/frigate/camera/state.py b/frigate/camera/state.py index 0495be1fc..98f808bf6 100644 --- a/frigate/camera/state.py +++ b/frigate/camera/state.py @@ -461,9 +461,9 @@ class CameraState: # create thumbnail with max height of 175 and save width = int(175 * img_frame.shape[1] / img_frame.shape[0]) thumb = cv2.resize(img_frame, dsize=(width, 175), interpolation=cv2.INTER_AREA) - cv2.imwrite( - os.path.join(THUMB_DIR, self.camera_config.name, f"{event_id}.webp"), thumb - ) + thumb_path = os.path.join(THUMB_DIR, self.camera_config.name) + os.makedirs(thumb_path, exist_ok=True) + cv2.imwrite(os.path.join(thumb_path, f"{event_id}.webp"), thumb) def shutdown(self) -> None: for obj in self.tracked_objects.values(): From 05d39f79b025f2c2a4a76af171996b861f47d243 Mon Sep 17 00:00:00 2001 From: leccelecce <24962424+leccelecce@users.noreply.github.com> Date: Mon, 24 Mar 2025 15:19:58 +0000 Subject: [PATCH 45/97] Add ability to update Frigate+ model to latest from UI (#17324) * Add ability to update Frigate+ model to latest * UI tweaks * further UI tweaks * UI tweaks: add width and height, fix select * Add placeholder while API call in progress * Fix Frigate+ enabled check * Fix config change lost when reloading page * Add persistent message requiring restart * Drop down supported detectors and dimensions * Add width and height to display * Update FrigatePlusSettingsView.tsx * Temp fix for Codespaces not loading * Add i18n, format * remove unneeded brackets * missing colon * Revert "Temp fix for Codespaces not loading" This reverts commit 75b19674ce3c33e69308358c29e80bf2774f377d. --- frigate/api/app.py | 42 +++ frigate/plus.py | 8 + web/public/locales/en/views/settings.json | 11 +- web/src/pages/Settings.tsx | 4 +- web/src/types/frigateConfig.ts | 2 + .../settings/FrigatePlusSettingsView.tsx | 302 +++++++++++++++++- 6 files changed, 356 insertions(+), 13 deletions(-) diff --git a/frigate/api/app.py b/frigate/api/app.py index 0d391035e..8a1310b93 100644 --- a/frigate/api/app.py +++ b/frigate/api/app.py @@ -640,6 +640,48 @@ def get_sub_labels(split_joined: Optional[int] = None): return JSONResponse(content=sub_labels) +@router.get("/plus/models") +def plusModels(request: Request, filterByCurrentModelDetector: bool = False): + if not request.app.frigate_config.plus_api.is_active(): + return JSONResponse( + content=({"success": False, "message": "Frigate+ is not enabled"}), + status_code=400, + ) + + models: dict[any, any] = request.app.frigate_config.plus_api.get_models() + + if not models["list"]: + return JSONResponse( + content=({"success": False, "message": "No models found"}), + status_code=400, + ) + + modelList = models["list"] + + # current model type + modelType = request.app.frigate_config.model.model_type + + # current detectorType for comparing to supportedDetectors + detectorType = list(request.app.frigate_config.detectors.values())[0].type + + validModels = [] + + for model in sorted( + filter( + lambda m: ( + not filterByCurrentModelDetector + or (detectorType in m["supportedDetectors"] and modelType in m["type"]) + ), + modelList, + ), + key=(lambda m: m["trainDate"]), + reverse=True, + ): + validModels.append(model) + + return JSONResponse(content=validModels) + + @router.get("/recognized_license_plates") def get_recognized_license_plates(split_joined: Optional[int] = None): try: diff --git a/frigate/plus.py b/frigate/plus.py index 8ec578c64..197b6e48d 100644 --- a/frigate/plus.py +++ b/frigate/plus.py @@ -234,3 +234,11 @@ class PlusApi: raise Exception(r.text) return r.json() + + def get_models(self) -> Any: + r = self._get("model/list") + + if not r.ok: + raise Exception(r.text) + + return r.json() diff --git a/web/public/locales/en/views/settings.json b/web/public/locales/en/views/settings.json index ed9d291a1..d642c12e6 100644 --- a/web/public/locales/en/views/settings.json +++ b/web/public/locales/en/views/settings.json @@ -540,14 +540,21 @@ }, "modelInfo": { "title": "Model Information", - "modelId": "Model ID", "modelType": "Model Type", "trainDate": "Train Date", "baseModel": "Base Model", "supportedDetectors": "Supported Detectors", + "dimensions": "Dimensions", "cameras": "Cameras", "loading": "Loading model information...", - "error": "Failed to load model information" + "error": "Failed to load model information", + "availableModels": "Available Models", + "loadingAvailableModels": "Loading available models...", + "modelSelect": "Your available models on Frigate+ can be selected here. Note that only models compatible with your current detector configuration can be selected." + }, + "toast": { + "success": "Frigate+ settings have been saved. Restart Frigate to apply changes.", + "error": "Failed to save config changes: {{errorMessage}}" } } } diff --git a/web/src/pages/Settings.tsx b/web/src/pages/Settings.tsx index b00d3255c..3588f6491 100644 --- a/web/src/pages/Settings.tsx +++ b/web/src/pages/Settings.tsx @@ -278,7 +278,9 @@ export default function Settings() { {page == "notifications" && ( )} - {page == "frigateplus" && } + {page == "frigateplus" && ( + + )}
{confirmationDialogOpen && ( ("config"); +type FrigatePlusModel = { + id: string; + type: string; + supportedDetectors: string[]; + trainDate: string; + baseModel: string; + width: number; + height: number; +}; + +type FrigatePlusSettings = { + model: { + id?: string; + }; +}; + +type FrigateSettingsViewProps = { + setUnsavedChanges: React.Dispatch>; +}; + +export default function FrigatePlusSettingsView({ + setUnsavedChanges, +}: FrigateSettingsViewProps) { const { t } = useTranslation("views/settings"); + const { data: config, mutate: updateConfig } = + useSWR("config"); + const [changedValue, setChangedValue] = useState(false); + const [isLoading, setIsLoading] = useState(false); + + const { addMessage, removeMessage } = useContext(StatusBarMessagesContext)!; + + const [frigatePlusSettings, setFrigatePlusSettings] = + useState({ + model: { + id: undefined, + }, + }); + + const [origPlusSettings, setOrigPlusSettings] = useState( + { + model: { + id: undefined, + }, + }, + ); + + const { data: availableModels = {} } = useSWR< + Record + >("/plus/models", { + fallbackData: {}, + fetcher: async (url) => { + const res = await axios.get(url, { withCredentials: true }); + return res.data.reduce( + (obj: Record, model: FrigatePlusModel) => { + obj[model.id] = model; + return obj; + }, + {}, + ); + }, + }); + + useEffect(() => { + if (config) { + if (frigatePlusSettings?.model.id == undefined) { + setFrigatePlusSettings({ + model: { + id: config.model.plus?.id, + }, + }); + } + + setOrigPlusSettings({ + model: { + id: config.model.plus?.id, + }, + }); + } + // we know that these deps are correct + // eslint-disable-next-line react-hooks/exhaustive-deps + }, [config]); + + const handleFrigatePlusConfigChange = ( + newConfig: Partial, + ) => { + setFrigatePlusSettings((prevConfig) => ({ + model: { + ...prevConfig.model, + ...newConfig.model, + }, + })); + setUnsavedChanges(true); + setChangedValue(true); + }; + + const saveToConfig = useCallback(async () => { + setIsLoading(true); + + axios + .put(`config/set?model.path=plus://${frigatePlusSettings.model.id}`, { + requires_restart: 0, + }) + .then((res) => { + if (res.status === 200) { + toast.success(t("frigatePlus.toast.success"), { + position: "top-center", + }); + setChangedValue(false); + addMessage( + "plus_restart", + "Restart required (Frigate+ model changed)", + undefined, + "plus_restart", + ); + updateConfig(); + } else { + toast.error( + t("frigatePlus.toast.error", { errorMessage: res.statusText }), + { + position: "top-center", + }, + ); + } + }) + .catch((error) => { + const errorMessage = + error.response?.data?.message || + error.response?.data?.detail || + "Unknown error"; + toast.error( + t("toast.save.error.title", { errorMessage, ns: "common" }), + { + position: "top-center", + }, + ); + }) + .finally(() => { + setIsLoading(false); + }); + }, [updateConfig, addMessage, frigatePlusSettings, t]); + + const onCancel = useCallback(() => { + setFrigatePlusSettings(origPlusSettings); + setChangedValue(false); + removeMessage("plus_settings", "plus_settings"); + }, [origPlusSettings, removeMessage]); + + useEffect(() => { + if (changedValue) { + addMessage( + "plus_settings", + `Unsaved Frigate+ settings changes`, + undefined, + "plus_settings", + ); + } else { + removeMessage("plus_settings", "plus_settings"); + } + // we know that these deps are correct + // eslint-disable-next-line react-hooks/exhaustive-deps + }, [changedValue]); useEffect(() => { document.title = t("documentTitle.frigatePlus"); @@ -28,6 +198,10 @@ export default function FrigatePlusSettingsView() { ); }; + if (!config) { + return ; + } + return ( <>
@@ -101,7 +275,13 @@ export default function FrigatePlusSettingsView() { -

{config.model.plus.name}

+

+ {config.model.plus.name} ( + {config.model.plus.width + + "x" + + config.model.plus.height} + ) +

-
- -

{config.model.plus.id}

-
+
+
+
+ {t("frigatePlus.modelInfo.availableModels")} +
+
+

+ + frigatePlus.modelInfo.modelSelect + +

+
+
+ +
)}
@@ -227,6 +481,34 @@ export default function FrigatePlusSettingsView() { )}
+ + + +
+ + +
From eabc316c7b5bfe2532282a2c8a1635329b89a868 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Mon, 24 Mar 2025 11:25:36 -0600 Subject: [PATCH 46/97] Various fixes (#17342) * Remove imutils * Ensure that state is maintained when setting search params * Change script for version of setuptools * Fix * Fix --- .cspell/frigate-dictionary.txt | 1 - docker/main/Dockerfile | 5 ++++- docker/main/requirements-wheels.txt | 1 - docker/rocm/Dockerfile | 1 + docker/tensorrt/Dockerfile.arm64 | 7 ++++--- frigate/motion/frigate_motion.py | 4 ++-- frigate/motion/improved_motion.py | 4 ++-- frigate/util/image.py | 13 +++++++++++++ web/src/hooks/use-overlay-state.tsx | 5 +++-- 9 files changed, 29 insertions(+), 12 deletions(-) diff --git a/.cspell/frigate-dictionary.txt b/.cspell/frigate-dictionary.txt index dbab9600e..77e4ede62 100644 --- a/.cspell/frigate-dictionary.txt +++ b/.cspell/frigate-dictionary.txt @@ -108,7 +108,6 @@ imagestream imdecode imencode imread -imutils imwrite interp iostat diff --git a/docker/main/Dockerfile b/docker/main/Dockerfile index fb23940f8..a71250813 100644 --- a/docker/main/Dockerfile +++ b/docker/main/Dockerfile @@ -78,8 +78,9 @@ COPY docker/main/requirements-ov.txt /requirements-ov.txt RUN apt-get -qq update \ && apt-get -qq install -y wget python3 python3-dev python3-distutils gcc pkg-config libhdf5-dev \ && wget -q https://bootstrap.pypa.io/get-pip.py -O get-pip.py \ + && sed -i 's/args.append("setuptools")/args.append("setuptools==77.0.3")/' get-pip.py \ && python3 get-pip.py "pip" \ - && pip install -r /requirements-ov.txt + && pip3 install -r /requirements-ov.txt # Get OpenVino Model RUN --mount=type=bind,source=docker/main/build_ov_model.py,target=/build_ov_model.py \ @@ -172,6 +173,7 @@ RUN apt-get -qq update \ RUN update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.11 1 RUN wget -q https://bootstrap.pypa.io/get-pip.py -O get-pip.py \ + && sed -i 's/args.append("setuptools")/args.append("setuptools==77.0.3")/' get-pip.py \ && python3 get-pip.py "pip" COPY docker/main/requirements.txt /requirements.txt @@ -235,6 +237,7 @@ ENV DEFAULT_FFMPEG_VERSION="7.0" ENV INCLUDED_FFMPEG_VERSIONS="${DEFAULT_FFMPEG_VERSION}:5.0" RUN wget -q https://bootstrap.pypa.io/get-pip.py -O get-pip.py \ + && sed -i 's/args.append("setuptools")/args.append("setuptools==77.0.3")/' get-pip.py \ && python3 get-pip.py "pip" RUN --mount=type=bind,from=wheels,source=/wheels,target=/deps/wheels \ diff --git a/docker/main/requirements-wheels.txt b/docker/main/requirements-wheels.txt index 9368cabcd..4ab7e03e6 100644 --- a/docker/main/requirements-wheels.txt +++ b/docker/main/requirements-wheels.txt @@ -7,7 +7,6 @@ starlette-context == 0.3.6 fastapi == 0.115.* uvicorn == 0.30.* slowapi == 0.1.* -imutils == 0.5.* joserfc == 1.0.* pathvalidate == 3.2.* markupsafe == 3.0.* diff --git a/docker/rocm/Dockerfile b/docker/rocm/Dockerfile index 78f91b96f..d04e93df3 100644 --- a/docker/rocm/Dockerfile +++ b/docker/rocm/Dockerfile @@ -39,6 +39,7 @@ WORKDIR /opt/frigate COPY --from=rootfs / / RUN wget -q https://bootstrap.pypa.io/get-pip.py -O get-pip.py \ + && sed -i 's/args.append("setuptools")/args.append("setuptools==77.0.3")/' get-pip.py \ && python3 get-pip.py "pip" --break-system-packages RUN python3 -m pip config set global.break-system-packages true diff --git a/docker/tensorrt/Dockerfile.arm64 b/docker/tensorrt/Dockerfile.arm64 index 5d5d5d939..6e2e0280f 100644 --- a/docker/tensorrt/Dockerfile.arm64 +++ b/docker/tensorrt/Dockerfile.arm64 @@ -9,9 +9,9 @@ ARG DEBIAN_FRONTEND # Add deadsnakes PPA for python3.11 RUN apt-get -qq update && \ - apt-get -qq install -y --no-install-recommends \ - software-properties-common \ - && add-apt-repository ppa:deadsnakes/ppa + apt-get -qq install -y --no-install-recommends \ + software-properties-common \ + && add-apt-repository ppa:deadsnakes/ppa # Use a separate container to build wheels to prevent build dependencies in final image RUN apt-get -qq update \ @@ -24,6 +24,7 @@ RUN apt-get -qq update \ RUN update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.11 1 RUN wget -q https://bootstrap.pypa.io/get-pip.py -O get-pip.py \ + && sed -i 's/args.append("setuptools")/args.append("setuptools==77.0.3")/' get-pip.py \ && python3 get-pip.py "pip" FROM build-wheels AS trt-wheels diff --git a/frigate/motion/frigate_motion.py b/frigate/motion/frigate_motion.py index 72097667b..fd362de34 100644 --- a/frigate/motion/frigate_motion.py +++ b/frigate/motion/frigate_motion.py @@ -1,9 +1,9 @@ import cv2 -import imutils import numpy as np from frigate.config import MotionConfig from frigate.motion import MotionDetector +from frigate.util.image import grab_cv2_contours class FrigateMotionDetector(MotionDetector): @@ -103,7 +103,7 @@ class FrigateMotionDetector(MotionDetector): contours = cv2.findContours( thresh_dilated, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE ) - contours = imutils.grab_contours(contours) + contours = grab_cv2_contours(contours) # loop over the contours for c in contours: diff --git a/frigate/motion/improved_motion.py b/frigate/motion/improved_motion.py index aae5167a4..69de6d015 100644 --- a/frigate/motion/improved_motion.py +++ b/frigate/motion/improved_motion.py @@ -1,7 +1,6 @@ import logging import cv2 -import imutils import numpy as np from scipy.ndimage import gaussian_filter @@ -9,6 +8,7 @@ from frigate.camera import PTZMetrics from frigate.comms.config_updater import ConfigSubscriber from frigate.config import MotionConfig from frigate.motion import MotionDetector +from frigate.util.image import grab_cv2_contours logger = logging.getLogger(__name__) @@ -147,7 +147,7 @@ class ImprovedMotionDetector(MotionDetector): contours = cv2.findContours( thresh_dilated, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE ) - contours = imutils.grab_contours(contours) + contours = grab_cv2_contours(contours) # loop over the contours total_contour_area = 0 diff --git a/frigate/util/image.py b/frigate/util/image.py index 0b80efe88..93827747c 100644 --- a/frigate/util/image.py +++ b/frigate/util/image.py @@ -265,6 +265,19 @@ def draw_box_with_label( ) +def grab_cv2_contours(cnts): + # if the length the contours tuple returned by cv2.findContours + # is '2' then we are using either OpenCV v2.4, v4-beta, or + # v4-official + if len(cnts) == 2: + return cnts[0] + + # if the length of the contours tuple is '3' then we are using + # either OpenCV v3, v4-pre, or v4-alpha + elif len(cnts) == 3: + return cnts[1] + + def is_label_printable(label) -> bool: """Check if label is printable.""" return not bool(set(label) - set(printable)) diff --git a/web/src/hooks/use-overlay-state.tsx b/web/src/hooks/use-overlay-state.tsx index 7a43383d4..5b41ca302 100644 --- a/web/src/hooks/use-overlay-state.tsx +++ b/web/src/hooks/use-overlay-state.tsx @@ -109,6 +109,7 @@ export function useSearchEffect( key: string, callback: (value: string) => boolean, ) { + const location = useLocation(); const [searchParams, setSearchParams] = useSearchParams(); const param = useMemo(() => { @@ -129,7 +130,7 @@ export function useSearchEffect( const remove = callback(param[1]); if (remove) { - setSearchParams(undefined, { replace: true }); + setSearchParams(undefined, { state: location.state, replace: true }); } - }, [param, callback, setSearchParams]); + }, [param, location.state, callback, setSearchParams]); } From 5c20cf456396e9b2122806211648e3c058053251 Mon Sep 17 00:00:00 2001 From: leccelecce <24962424+leccelecce@users.noreply.github.com> Date: Mon, 24 Mar 2025 17:56:27 +0000 Subject: [PATCH 47/97] Don't require /dev/bus/usb to be present (#17341) --- docker-compose.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docker-compose.yml b/docker-compose.yml index c8eb765ab..4322dd23c 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -23,8 +23,8 @@ services: # capabilities: [gpu] environment: YOLO_MODELS: "" - devices: - - /dev/bus/usb:/dev/bus/usb + # devices: + # - /dev/bus/usb:/dev/bus/usb # Uncomment for Google Coral USB # - /dev/dri:/dev/dri # for intel hwaccel, needs to be updated for your hardware volumes: - .:/workspace/frigate:cached @@ -32,7 +32,7 @@ services: - /etc/localtime:/etc/localtime:ro - ./config:/config - ./debug:/media/frigate - - /dev/bus/usb:/dev/bus/usb + # - /dev/bus/usb:/dev/bus/usb # Uncomment for Google Coral USB mqtt: container_name: mqtt image: eclipse-mosquitto:1.6 From 6ded9a1bc616aac97991bbc9a2c3f2051034c8df Mon Sep 17 00:00:00 2001 From: leccelecce <24962424+leccelecce@users.noreply.github.com> Date: Mon, 24 Mar 2025 18:50:57 +0000 Subject: [PATCH 48/97] Update contributing.md (#17343) --- docs/docs/development/contributing.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/docs/development/contributing.md b/docs/docs/development/contributing.md index a28320339..72b1af7ea 100644 --- a/docs/docs/development/contributing.md +++ b/docs/docs/development/contributing.md @@ -79,6 +79,7 @@ Create and place these files in a `debug` folder in the root of the repo. This i VS Code will start the Docker Compose file for you and open a terminal window connected to `frigate-dev`. +- Depending on what hardware you're developing on, you may need to amend `docker-compose.yml` in the project root to pass through a USB Coral or GPU for hardware acceleration. - Run `python3 -m frigate` to start the backend. - In a separate terminal window inside VS Code, change into the `web` directory and run `npm install && npm run dev` to start the frontend. From 7e8b3c389d471fdf6b244339fe8226bdec341d91 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Mon, 24 Mar 2025 17:31:32 -0600 Subject: [PATCH 49/97] Update face_recognition.md (#17349) --- docs/docs/configuration/face_recognition.md | 27 +++++++++++++++++---- 1 file changed, 22 insertions(+), 5 deletions(-) diff --git a/docs/docs/configuration/face_recognition.md b/docs/docs/configuration/face_recognition.md index aac1be9b5..472cacada 100644 --- a/docs/docs/configuration/face_recognition.md +++ b/docs/docs/configuration/face_recognition.md @@ -74,19 +74,36 @@ The accuracy of face recognition is heavily dependent on the quality of data giv When choosing images to include in the face training set it is recommended to always follow these recommendations: - If it is difficult to make out details in a persons face it will not be helpful in training. -- Avoid images with under/over-exposure. +- Avoid images with extreme under/over-exposure. - Avoid blurry / pixelated images. -- Be careful when uploading images of people when they are wearing clothing that covers a lot of their face as this may confuse the training. -- Do not upload too many images at the same time, it is recommended to train 4-6 images for each person each day so it is easier to know if the previously added images helped or hurt performance. +- Be careful when uploading images of people when they are wearing clothing that covers a lot of their face as this may confuse the model. +- Do not upload too many similar images at the same time, it is recommended to train no more than 4-6 similar images for each person to avoid overfitting. ::: ### Step 1 - Building a Strong Foundation -When first enabling face recognition it is important to build a foundation of strong images. It is recommended to start by uploading 1-2 photos taken by a smartphone for each person. It is important that the person's face in the photo is straight-on and not turned which will ensure a good starting point. +When first enabling face recognition it is important to build a foundation of strong images. It is recommended to start by uploading 1-5 "portrait" photos for each person. It is important that the person's face in the photo is straight-on and not turned which will ensure a good starting point. -Then it is recommended to use the `Face Library` tab in Frigate to select and train images for each person as they are detected. When building a strong foundation it is strongly recommended to only train on images that are straight-on. Ignore images from cameras that recognize faces from an angle. Once a person starts to be consistently recognized correctly on images that are straight-on, it is time to move on to the next step. +Then it is recommended to use the `Face Library` tab in Frigate to select and train images for each person as they are detected. When building a strong foundation it is strongly recommended to only train on images that are straight-on. Ignore images from cameras that recognize faces from an angle. + +Aim to strike a balance between the quality of images while also having a range of conditions (day / night, different weather conditions, different times of day, etc.) in order to have diversity in the images used for each person and not have overfitting. + +Once a person starts to be consistently recognized correctly on images that are straight-on, it is time to move on to the next step. ### Step 2 - Expanding The Dataset Once straight-on images are performing well, start choosing slightly off-angle images to include for training. It is important to still choose images where enough face detail is visible to recognize someone. + +## FAQ + +### Why is every face tagged as a known face and not unknown? + +Any recognized face with a score >= `min_score` will show in the `Train` tab along with the recognition score. A low scoring face is effectively the same as `unknown`, but includes more information. This does not mean the recognition is not working well, and is part of the importance of choosing the correct `recognition_threshold`. + +### Why do unknown people score similarly to known people? + +This can happen for a few different reasons, but this is usually an indicator that the training set needs to be improved. This is often related to overfitting: +- If you train with only a few images per person, especially if those images are very similar, the recognition model becomes overly specialized to those specific images. +- When you provide images with different poses, lighting, and expressions, the algorithm extracts features that are consistent across those variations. +- By training on a diverse set of images, the algorithm becomes less sensitive to minor variations and noise in the input image. From 983dd87ffb1f4f73e8fa9e3b098b759698a8c396 Mon Sep 17 00:00:00 2001 From: leccelecce <24962424+leccelecce@users.noreply.github.com> Date: Tue, 25 Mar 2025 11:48:06 +0000 Subject: [PATCH 50/97] Revert go2rtc call checks for enabled camera state (#17355) * Revert "Fix camera enabled check (#17331)" This reverts commit 1e45f63a7ce8768f712a166a5f6a6a95c8e16bfa. * Revert "Fix webUI generating HTTP500s when camera disabled (#17305)" This reverts commit 644faaf65b9effbd0b1c6071cfb46209b9a35022. --- .../settings/CameraStreamingDialog.tsx | 9 +------- web/src/views/live/LiveCameraView.tsx | 21 +++++++++---------- 2 files changed, 11 insertions(+), 19 deletions(-) diff --git a/web/src/components/settings/CameraStreamingDialog.tsx b/web/src/components/settings/CameraStreamingDialog.tsx index cb16acaf2..b9bd8a4b6 100644 --- a/web/src/components/settings/CameraStreamingDialog.tsx +++ b/web/src/components/settings/CameraStreamingDialog.tsx @@ -1,5 +1,4 @@ import { useState, useCallback, useEffect, useMemo } from "react"; -import { useCameraActivity } from "@/hooks/use-camera-activity"; import { IoIosWarning } from "react-icons/io"; import { Button } from "@/components/ui/button"; import { @@ -24,7 +23,6 @@ import { Checkbox } from "@/components/ui/checkbox"; import { Label } from "@/components/ui/label"; import { FrigateConfig, - CameraConfig, GroupStreamingSettings, StreamType, } from "@/types/frigateConfig"; @@ -65,11 +63,6 @@ export function CameraStreamingDialog({ // metadata - // camera enabled state - const { enabled: isCameraEnabled } = useCameraActivity( - config?.cameras[camera] ?? ({} as CameraConfig), - ); - const isRestreamed = useMemo( () => config && @@ -78,7 +71,7 @@ export function CameraStreamingDialog({ ); const { data: cameraMetadata } = useSWR( - isCameraEnabled && isRestreamed ? `go2rtc/streams/${streamName}` : null, + isRestreamed ? `go2rtc/streams/${streamName}` : null, { revalidateOnFocus: false, }, diff --git a/web/src/views/live/LiveCameraView.tsx b/web/src/views/live/LiveCameraView.tsx index e067db157..34d61d684 100644 --- a/web/src/views/live/LiveCameraView.tsx +++ b/web/src/views/live/LiveCameraView.tsx @@ -112,7 +112,6 @@ import { SelectTrigger, } from "@/components/ui/select"; import { usePersistence } from "@/hooks/use-persistence"; -import { useCameraActivity } from "@/hooks/use-camera-activity"; import { Label } from "@/components/ui/label"; import { Switch } from "@/components/ui/switch"; import axios from "axios"; @@ -143,12 +142,8 @@ export default function LiveCameraView({ const [{ width: windowWidth, height: windowHeight }] = useResizeObserver(window); - // camera enabled state - const { enabled: isCameraEnabled } = useCameraActivity( - config?.cameras[camera.name] ?? ({} as CameraConfig), - ); - // supported features + const [streamName, setStreamName] = usePersistence( `${camera.name}-stream`, Object.values(camera.live.streams)[0], @@ -162,7 +157,7 @@ export default function LiveCameraView({ ); const { data: cameraMetadata } = useSWR( - isCameraEnabled && isRestreamed ? `go2rtc/streams/${streamName}` : null, + isRestreamed ? `go2rtc/streams/${streamName}` : null, { revalidateOnFocus: false, }, @@ -197,6 +192,10 @@ export default function LiveCameraView({ ); }, [cameraMetadata]); + // camera enabled state + const { payload: enabledState } = useEnabledState(camera.name); + const cameraEnabled = enabledState === "ON"; + // click overlay for ptzs const [clickOverlay, setClickOverlay] = useState(false); @@ -522,7 +521,7 @@ export default function LiveCameraView({ setPip(false); } }} - disabled={!isCameraEnabled} + disabled={!cameraEnabled} /> )} {supports2WayTalk && ( @@ -544,7 +543,7 @@ export default function LiveCameraView({ setAudio(true); } }} - disabled={!isCameraEnabled} + disabled={!cameraEnabled} /> )} {supportsAudioOutput && preferredLiveMode != "jsmpeg" && ( @@ -561,7 +560,7 @@ export default function LiveCameraView({ t("button.cameraAudio", { ns: "common" }) } onClick={() => setAudio(!audio)} - disabled={!isCameraEnabled} + disabled={!cameraEnabled} /> )}
From 9080305070ff789b6f39feb478354deba948bdcb Mon Sep 17 00:00:00 2001 From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> Date: Tue, 25 Mar 2025 08:16:56 -0500 Subject: [PATCH 51/97] Increase the initial stall timeout of the MSE player (#17359) --- web/src/components/player/MsePlayer.tsx | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/web/src/components/player/MsePlayer.tsx b/web/src/components/player/MsePlayer.tsx index 554eb5af1..f3ef17a24 100644 --- a/web/src/components/player/MsePlayer.tsx +++ b/web/src/components/player/MsePlayer.tsx @@ -461,6 +461,7 @@ function MSEPlayer({ setBufferTimeout(undefined); } + const timeoutDuration = bufferTime == 0 ? 5000 : 3000; setBufferTimeout( setTimeout(() => { if ( @@ -471,7 +472,7 @@ function MSEPlayer({ onDisconnect(); onError("stalled"); } - }, 3000), + }, timeoutDuration), ); } }, [ From e80caabee64d6398d58c86bd2759d9d5d8003b35 Mon Sep 17 00:00:00 2001 From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> Date: Tue, 25 Mar 2025 11:46:24 -0500 Subject: [PATCH 52/97] Live and go2rtc docs updates (#17363) --- docs/docs/configuration/live.md | 4 +++- docs/docs/guides/configuring_go2rtc.md | 4 ++++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/docs/docs/configuration/live.md b/docs/docs/configuration/live.md index cee8b3dca..b5fa0cdd7 100644 --- a/docs/docs/configuration/live.md +++ b/docs/docs/configuration/live.md @@ -205,7 +205,9 @@ Note that disabling a camera through the config file (`enabled: False`) removes When you have go2rtc configured, Live view initially attempts to load and play back your stream with a clearer, fluent stream technology (MSE). An initial timeout, a low bandwidth condition that would cause buffering of the stream, or decoding errors in the stream will cause Frigate to switch to the stream defined by the `detect` role, using the jsmpeg format. This is what the UI labels as "low bandwidth mode". On Live dashboards, the mode will automatically reset when smart streaming is configured and activity stops. Continuous streaming mode does not have an automatic reset mechanism, but you can use the _Reset_ option to force a reload of your stream. - If you are still experiencing Frigate falling back to low bandwidth mode, you may need to adjust your camera's settings per the (recommendations above)[#camera_settings_recommendations]. + If you are using continuous streaming or you are loading more than a few high resolution streams at once on the dashboard, your browser may struggle to begin playback of your streams before the timeout. Frigate always prioritizes showing a live stream as quickly as possible, even if it is a lower quality jsmpeg stream. You can use the "Reset" link/button to try loading your high resolution stream again. + + If you are still experiencing Frigate falling back to low bandwidth mode, you may need to adjust your camera's settings per the [recommendations above](#camera_settings_recommendations). 3. **It doesn't seem like my cameras are streaming on the Live dashboard. Why?** diff --git a/docs/docs/guides/configuring_go2rtc.md b/docs/docs/guides/configuring_go2rtc.md index 3b8a8af1f..652aa3b26 100644 --- a/docs/docs/guides/configuring_go2rtc.md +++ b/docs/docs/guides/configuring_go2rtc.md @@ -115,3 +115,7 @@ section. 1. If the stream you added to go2rtc is also used by Frigate for the `record` or `detect` role, you can migrate your config to pull from the RTSP restream to reduce the number of connections to your camera as shown [here](/configuration/restream#reduce-connections-to-camera). 2. You may also prefer to [setup WebRTC](/configuration/live#webrtc-extra-configuration) for slightly lower latency than MSE. Note that WebRTC only supports h264 and specific audio formats and may require opening ports on your router. + +## Important considerations + +If you are configuring go2rtc to publish HomeKit camera streams, on pairing the configuration is written to the `/dev/shm/go2rtc.yaml` file inside the container. These changes must be manually copied across to the `go2rtc` section of your Frigate configuration in order to persist through restarts. From 2c3ea5b74e3e512bce78b138b7f1eb93a3ad0426 Mon Sep 17 00:00:00 2001 From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> Date: Tue, 25 Mar 2025 15:08:40 -0500 Subject: [PATCH 53/97] Make all object path points clickable (#17367) --- .../overlay/detail/ObjectLifecycle.tsx | 17 ++++++++++++++--- .../components/overlay/detail/ObjectPath.tsx | 6 ++---- 2 files changed, 16 insertions(+), 7 deletions(-) diff --git a/web/src/components/overlay/detail/ObjectLifecycle.tsx b/web/src/components/overlay/detail/ObjectLifecycle.tsx index c359ee91d..0f77ecfbf 100644 --- a/web/src/components/overlay/detail/ObjectLifecycle.tsx +++ b/web/src/components/overlay/detail/ObjectLifecycle.tsx @@ -285,9 +285,16 @@ export default function ObjectLifecycle({ useEffect(() => { if (eventSequence && eventSequence.length > 0) { - setTimeIndex(eventSequence?.[current].timestamp); - handleSetBox(eventSequence?.[current].data.box ?? []); - setLifecycleZones(eventSequence?.[current].data.zones); + if (current == -1) { + // normal path point + setBoxStyle(null); + setLifecycleZones([]); + } else { + // lifecycle point + setTimeIndex(eventSequence?.[current].timestamp); + handleSetBox(eventSequence?.[current].data.box ?? []); + setLifecycleZones(eventSequence?.[current].data.zones); + } setSelectedZone(""); } }, [current, imgLoaded, handleSetBox, eventSequence]); @@ -322,6 +329,10 @@ export default function ObjectLifecycle({ mainApi.scrollTo(sequenceIndex); thumbnailApi.scrollTo(sequenceIndex); setCurrent(sequenceIndex); + } else { + // click on a normal path point, not a lifecycle point + setCurrent(-1); + setTimeIndex(pathPoints[index].timestamp); } }, [mainApi, thumbnailApi, eventSequence, pathPoints], diff --git a/web/src/components/overlay/detail/ObjectPath.tsx b/web/src/components/overlay/detail/ObjectPath.tsx index 80f454470..f8ee02080 100644 --- a/web/src/components/overlay/detail/ObjectPath.tsx +++ b/web/src/components/overlay/detail/ObjectPath.tsx @@ -95,10 +95,8 @@ export function ObjectPath({ fill={getPointColor(color, pos.lifecycle_item?.class_type)} stroke="white" strokeWidth={width / 2} - onClick={() => - pos.lifecycle_item && onPointClick && onPointClick(index) - } - style={{ cursor: pos.lifecycle_item ? "pointer" : "default" }} + onClick={() => onPointClick && onPointClick(index)} + style={{ cursor: "pointer" }} /> From b18d1fb9709a3d2585c3c3a7c47496bf37d29ca2 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Tue, 25 Mar 2025 18:59:03 -0600 Subject: [PATCH 54/97] Refactor face recognition (#17368) * Refactor face recognition to allow for running lbph or embedding * Cleanup * Use weighted average for faces * Set correct url * Cleanup * Update docs * Update docs * Use scipy trimmed mean * Normalize * Handle color and gray landmark detection * Upgrade to new arcface model * Implement sigmoid function * Rename * Rename to arcface * Fix * Add face recognition model size to ui config * Update toast --- docs/docs/configuration/face_recognition.md | 19 +- frigate/config/classification.py | 3 + frigate/data_processing/common/face/model.py | 308 ++++++++++++++++++ frigate/data_processing/real_time/face.py | 252 ++++---------- frigate/embeddings/onnx/base_embedding.py | 2 + frigate/embeddings/onnx/facenet.py | 98 ++++++ web/public/locales/en/views/settings.json | 16 +- web/src/types/frigateConfig.ts | 1 + .../settings/ClassificationSettingsView.tsx | 62 +++- 9 files changed, 573 insertions(+), 188 deletions(-) create mode 100644 frigate/data_processing/common/face/model.py create mode 100644 frigate/embeddings/onnx/facenet.py diff --git a/docs/docs/configuration/face_recognition.md b/docs/docs/configuration/face_recognition.md index 472cacada..a3cb4e308 100644 --- a/docs/docs/configuration/face_recognition.md +++ b/docs/docs/configuration/face_recognition.md @@ -7,7 +7,7 @@ Face recognition identifies known individuals by matching detected faces with pr ## Model Requirements -Frigate has support for CV2 Local Binary Pattern Face Recognizer to recognize faces, which runs locally. A lightweight face landmark detection model is also used to align faces before running them through the face recognizer. +### Face Detection Users running a Frigate+ model (or any custom model that natively detects faces) should ensure that `face` is added to the [list of objects to track](../plus/#available-label-types) either globally or for a specific camera. This will allow face detection to run at the same time as object detection and be more efficient. @@ -19,9 +19,19 @@ Frigate needs to first detect a `face` before it can recognize a face. ::: +### Face Recognition + +Frigate has support for two face recognition model types: +- **small**: Frigate will use CV2 Local Binary Pattern Face Recognizer to recognize faces, which runs locally on the CPU. +- **large**: Frigate will run a face embedding model, this is only recommended to be run when an integrated or dedicated GPU is available. + +In both cases a lightweight face landmark detection model is also used to align faces before running them through the face recognizer. + ## Minimum System Requirements -Face recognition is lightweight and runs on the CPU, there are no significantly different system requirements than running Frigate itself. +Face recognition is lightweight and runs on the CPU, there are no significantly different system requirements than running Frigate itself when using the `small` model. + +When using the `large` model an integrated or discrete GPU is recommended. ## Configuration @@ -47,6 +57,7 @@ Fine-tune face recognition with these optional parameters: ### Recognition +- `model_size`: Which model size to use, options are `small` or `large` - `recognition_threshold`: Recognition confidence score required to add the face to the object as a sub label. - Default: `0.9`. - `blur_confidence_filter`: Enables a filter that calculates how blurry the face is and adjusts the confidence based on this. @@ -107,3 +118,7 @@ This can happen for a few different reasons, but this is usually an indicator th - If you train with only a few images per person, especially if those images are very similar, the recognition model becomes overly specialized to those specific images. - When you provide images with different poses, lighting, and expressions, the algorithm extracts features that are consistent across those variations. - By training on a diverse set of images, the algorithm becomes less sensitive to minor variations and noise in the input image. + +### I see scores above the threshold in the train tab, but a sub label wasn't assigned? + +The Frigate face recognizer collects face recognition scores from all of the frames across the person objects lifecycle. The scores are continually weighted based on the area of the face, and a sub label will only be assigned to person if there is a prominent person recognized. This avoids cases where a single high confidence recognition result would throw off the results. diff --git a/frigate/config/classification.py b/frigate/config/classification.py index 0070569a8..25c379546 100644 --- a/frigate/config/classification.py +++ b/frigate/config/classification.py @@ -51,6 +51,9 @@ class SemanticSearchConfig(FrigateBaseModel): class FaceRecognitionConfig(FrigateBaseModel): enabled: bool = Field(default=False, title="Enable face recognition.") + model_size: str = Field( + default="small", title="The size of the embeddings model used." + ) min_score: float = Field( title="Minimum face distance score required to save the attempt.", default=0.8, diff --git a/frigate/data_processing/common/face/model.py b/frigate/data_processing/common/face/model.py new file mode 100644 index 000000000..a006734ce --- /dev/null +++ b/frigate/data_processing/common/face/model.py @@ -0,0 +1,308 @@ +import logging +import os +from abc import ABC, abstractmethod + +import cv2 +import numpy as np +from scipy import stats + +from frigate.config import FrigateConfig +from frigate.const import MODEL_CACHE_DIR +from frigate.embeddings.onnx.facenet import ArcfaceEmbedding + +logger = logging.getLogger(__name__) + + +class FaceRecognizer(ABC): + """Face recognition runner.""" + + def __init__(self, config: FrigateConfig) -> None: + self.config = config + self.landmark_detector = cv2.face.createFacemarkLBF() + self.landmark_detector.loadModel( + os.path.join(MODEL_CACHE_DIR, "facedet/landmarkdet.yaml") + ) + + @abstractmethod + def build(self) -> None: + """Build face recognition model.""" + pass + + @abstractmethod + def clear(self) -> None: + """Clear current built model.""" + pass + + @abstractmethod + def classify(self, face_image: np.ndarray) -> tuple[str, float] | None: + pass + + def align_face( + self, + image: np.ndarray, + output_width: int, + output_height: int, + ) -> np.ndarray: + # landmark is run on grayscale images + + if image.ndim == 3: + land_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) + else: + land_image = image + + _, lands = self.landmark_detector.fit( + land_image, np.array([(0, 0, land_image.shape[1], land_image.shape[0])]) + ) + landmarks: np.ndarray = lands[0][0] + + # get landmarks for eyes + leftEyePts = landmarks[42:48] + rightEyePts = landmarks[36:42] + + # compute the center of mass for each eye + leftEyeCenter = leftEyePts.mean(axis=0).astype("int") + rightEyeCenter = rightEyePts.mean(axis=0).astype("int") + + # compute the angle between the eye centroids + dY = rightEyeCenter[1] - leftEyeCenter[1] + dX = rightEyeCenter[0] - leftEyeCenter[0] + angle = np.degrees(np.arctan2(dY, dX)) - 180 + + # compute the desired right eye x-coordinate based on the + # desired x-coordinate of the left eye + desiredRightEyeX = 1.0 - 0.35 + + # determine the scale of the new resulting image by taking + # the ratio of the distance between eyes in the *current* + # image to the ratio of distance between eyes in the + # *desired* image + dist = np.sqrt((dX**2) + (dY**2)) + desiredDist = desiredRightEyeX - 0.35 + desiredDist *= output_width + scale = desiredDist / dist + + # compute center (x, y)-coordinates (i.e., the median point) + # between the two eyes in the input image + # grab the rotation matrix for rotating and scaling the face + eyesCenter = ( + int((leftEyeCenter[0] + rightEyeCenter[0]) // 2), + int((leftEyeCenter[1] + rightEyeCenter[1]) // 2), + ) + M = cv2.getRotationMatrix2D(eyesCenter, angle, scale) + + # update the translation component of the matrix + tX = output_width * 0.5 + tY = output_height * 0.35 + M[0, 2] += tX - eyesCenter[0] + M[1, 2] += tY - eyesCenter[1] + + # apply the affine transformation + return cv2.warpAffine( + image, M, (output_width, output_height), flags=cv2.INTER_CUBIC + ) + + def get_blur_factor(self, input: np.ndarray) -> float: + """Calculates the factor for the confidence based on the blur of the image.""" + if not self.config.face_recognition.blur_confidence_filter: + return 1.0 + + variance = cv2.Laplacian(input, cv2.CV_64F).var() + + if variance < 60: # image is very blurry + return 0.96 + elif variance < 70: # image moderately blurry + return 0.98 + elif variance < 80: # image is slightly blurry + return 0.99 + else: + return 1.0 + + +class LBPHRecognizer(FaceRecognizer): + def __init__(self, config: FrigateConfig): + super().__init__(config) + self.label_map: dict[int, str] = {} + self.recognizer: cv2.face.LBPHFaceRecognizer | None = None + + def clear(self) -> None: + self.face_recognizer = None + self.label_map = {} + + def build(self): + if not self.landmark_detector: + return None + + labels = [] + faces = [] + idx = 0 + + dir = "/media/frigate/clips/faces" + for name in os.listdir(dir): + if name == "train": + continue + + face_folder = os.path.join(dir, name) + + if not os.path.isdir(face_folder): + continue + + self.label_map[idx] = name + for image in os.listdir(face_folder): + img = cv2.imread(os.path.join(face_folder, image)) + + if img is None: + continue + + img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) + img = self.align_face(img, img.shape[1], img.shape[0]) + faces.append(img) + labels.append(idx) + + idx += 1 + + if not faces: + return + + self.recognizer: cv2.face.LBPHFaceRecognizer = ( + cv2.face.LBPHFaceRecognizer_create( + radius=2, threshold=(1 - self.config.face_recognition.min_score) * 1000 + ) + ) + self.recognizer.train(faces, np.array(labels)) + + def classify(self, face_image: np.ndarray) -> tuple[str, float] | None: + if not self.landmark_detector: + return None + + if not self.label_map or not self.recognizer: + self.build() + + if not self.recognizer: + return None + + # face recognition is best run on grayscale images + img = cv2.cvtColor(face_image, cv2.COLOR_BGR2GRAY) + + # get blur factor before aligning face + blur_factor = self.get_blur_factor(img) + logger.debug(f"face detected with bluriness {blur_factor}") + + # align face and run recognition + img = self.align_face(img, img.shape[1], img.shape[0]) + index, distance = self.recognizer.predict(img) + + if index == -1: + return None + + score = (1.0 - (distance / 1000)) * blur_factor + return self.label_map[index], round(score, 2) + + +class ArcFaceRecognizer(FaceRecognizer): + def __init__(self, config: FrigateConfig): + super().__init__(config) + self.mean_embs: dict[int, np.ndarray] = {} + self.face_embedder: ArcfaceEmbedding = ArcfaceEmbedding() + + def clear(self) -> None: + self.mean_embs = {} + + def build(self): + if not self.landmark_detector: + return None + + face_embeddings_map: dict[str, list[np.ndarray]] = {} + idx = 0 + + dir = "/media/frigate/clips/faces" + for name in os.listdir(dir): + if name == "train": + continue + + face_folder = os.path.join(dir, name) + + if not os.path.isdir(face_folder): + continue + + face_embeddings_map[name] = [] + for image in os.listdir(face_folder): + img = cv2.imread(os.path.join(face_folder, image)) + + if img is None: + continue + + img = self.align_face(img, img.shape[1], img.shape[0]) + emb = self.face_embedder([img])[0].squeeze() + face_embeddings_map[name].append(emb) + + idx += 1 + + if not face_embeddings_map: + return + + for name, embs in face_embeddings_map.items(): + self.mean_embs[name] = stats.trim_mean(embs, 0.15) + + def similarity_to_confidence( + self, cosine_similarity: float, median=0.3, range_width=0.6, slope_factor=12 + ): + """ + Default sigmoid function to map cosine similarity to confidence. + + Args: + cosine_similarity (float): The input cosine similarity. + median (float): Assumed median of cosine similarity distribution. + range_width (float): Assumed range of cosine similarity distribution (90th percentile - 10th percentile). + slope_factor (float): Adjusts the steepness of the curve. + + Returns: + float: The confidence score. + """ + + # Calculate slope and bias + slope = slope_factor / range_width + bias = median + + # Calculate confidence + confidence = 1 / (1 + np.exp(-slope * (cosine_similarity - bias))) + return confidence + + def classify(self, face_image): + if not self.landmark_detector: + return None + + if not self.mean_embs: + self.build() + + if not self.mean_embs: + return None + + # face recognition is best run on grayscale images + + # get blur factor before aligning face + blur_factor = self.get_blur_factor(face_image) + logger.debug(f"face detected with bluriness {blur_factor}") + + # align face and run recognition + img = self.align_face(face_image, face_image.shape[1], face_image.shape[0]) + embedding = self.face_embedder([img])[0].squeeze() + + score = 0 + label = "" + + for name, mean_emb in self.mean_embs.items(): + dot_product = np.dot(embedding, mean_emb) + magnitude_A = np.linalg.norm(embedding) + magnitude_B = np.linalg.norm(mean_emb) + + cosine_similarity = dot_product / (magnitude_A * magnitude_B) + confidence = self.similarity_to_confidence(cosine_similarity) + + if cosine_similarity > score: + score = confidence + label = name + + if score < self.config.face_recognition.min_score: + return None + + return label, round(score * blur_factor, 2) diff --git a/frigate/data_processing/real_time/face.py b/frigate/data_processing/real_time/face.py index 7b49a2f47..e20dad633 100644 --- a/frigate/data_processing/real_time/face.py +++ b/frigate/data_processing/real_time/face.py @@ -19,6 +19,11 @@ from frigate.comms.event_metadata_updater import ( ) from frigate.config import FrigateConfig from frigate.const import FACE_DIR, MODEL_CACHE_DIR +from frigate.data_processing.common.face.model import ( + ArcFaceRecognizer, + FaceRecognizer, + LBPHRecognizer, +) from frigate.util.image import area from ..types import DataProcessorMetrics @@ -31,6 +36,36 @@ MAX_DETECTION_HEIGHT = 1080 MIN_MATCHING_FACES = 2 +def weighted_average_by_area(results_list: list[tuple[str, float, int]]): + if len(results_list) < 3: + return "unknown", 0.0 + + score_count = {} + weighted_scores = {} + total_face_areas = {} + + for name, score, face_area in results_list: + if name not in weighted_scores: + score_count[name] = 1 + weighted_scores[name] = 0.0 + total_face_areas[name] = 0.0 + else: + score_count[name] += 1 + + weighted_scores[name] += score * face_area + total_face_areas[name] += face_area + + prominent_name = max(score_count) + + # if a single name is not prominent in the history then we are not confident + if score_count[prominent_name] / len(results_list) < 0.65: + return "unknown", 0.0 + + return prominent_name, weighted_scores[prominent_name] / total_face_areas[ + prominent_name + ] + + class FaceRealTimeProcessor(RealTimeProcessorApi): def __init__( self, @@ -42,10 +77,9 @@ class FaceRealTimeProcessor(RealTimeProcessorApi): self.face_config = config.face_recognition self.sub_label_publisher = sub_label_publisher self.face_detector: cv2.FaceDetectorYN = None - self.landmark_detector: cv2.face.FacemarkLBF = None - self.recognizer: cv2.face.LBPHFaceRecognizer = None self.requires_face_detection = "face" not in self.config.objects.all_objects - self.detected_faces: dict[str, float] = {} + self.person_face_history: dict[str, list[tuple[str, float, int]]] = {} + self.recognizer: FaceRecognizer | None = None download_path = os.path.join(MODEL_CACHE_DIR, "facedet") self.model_files = { @@ -72,7 +106,13 @@ class FaceRealTimeProcessor(RealTimeProcessorApi): self.__build_detector() self.label_map: dict[int, str] = {} - self.__build_classifier() + + if self.face_config.model_size == "small": + self.recognizer = LBPHRecognizer(self.config) + else: + self.recognizer = ArcFaceRecognizer(self.config) + + self.recognizer.build() def __download_models(self, path: str) -> None: try: @@ -92,126 +132,6 @@ class FaceRealTimeProcessor(RealTimeProcessorApi): score_threshold=0.5, nms_threshold=0.3, ) - self.landmark_detector = cv2.face.createFacemarkLBF() - self.landmark_detector.loadModel( - os.path.join(MODEL_CACHE_DIR, "facedet/landmarkdet.yaml") - ) - - def __build_classifier(self) -> None: - if not self.landmark_detector: - return None - - labels = [] - faces = [] - - dir = "/media/frigate/clips/faces" - for idx, name in enumerate(os.listdir(dir)): - if name == "train": - continue - - face_folder = os.path.join(dir, name) - - if not os.path.isdir(face_folder): - continue - - self.label_map[idx] = name - for image in os.listdir(face_folder): - img = cv2.imread(os.path.join(face_folder, image)) - - if img is None: - continue - - img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) - img = self.__align_face(img, img.shape[1], img.shape[0]) - faces.append(img) - labels.append(idx) - - if not faces: - return - - self.recognizer: cv2.face.LBPHFaceRecognizer = ( - cv2.face.LBPHFaceRecognizer_create( - radius=2, threshold=(1 - self.face_config.min_score) * 1000 - ) - ) - self.recognizer.train(faces, np.array(labels)) - - def __align_face( - self, - image: np.ndarray, - output_width: int, - output_height: int, - ) -> np.ndarray: - _, lands = self.landmark_detector.fit( - image, np.array([(0, 0, image.shape[1], image.shape[0])]) - ) - landmarks: np.ndarray = lands[0][0] - - # get landmarks for eyes - leftEyePts = landmarks[42:48] - rightEyePts = landmarks[36:42] - - # compute the center of mass for each eye - leftEyeCenter = leftEyePts.mean(axis=0).astype("int") - rightEyeCenter = rightEyePts.mean(axis=0).astype("int") - - # compute the angle between the eye centroids - dY = rightEyeCenter[1] - leftEyeCenter[1] - dX = rightEyeCenter[0] - leftEyeCenter[0] - angle = np.degrees(np.arctan2(dY, dX)) - 180 - - # compute the desired right eye x-coordinate based on the - # desired x-coordinate of the left eye - desiredRightEyeX = 1.0 - 0.35 - - # determine the scale of the new resulting image by taking - # the ratio of the distance between eyes in the *current* - # image to the ratio of distance between eyes in the - # *desired* image - dist = np.sqrt((dX**2) + (dY**2)) - desiredDist = desiredRightEyeX - 0.35 - desiredDist *= output_width - scale = desiredDist / dist - - # compute center (x, y)-coordinates (i.e., the median point) - # between the two eyes in the input image - # grab the rotation matrix for rotating and scaling the face - eyesCenter = ( - int((leftEyeCenter[0] + rightEyeCenter[0]) // 2), - int((leftEyeCenter[1] + rightEyeCenter[1]) // 2), - ) - M = cv2.getRotationMatrix2D(eyesCenter, angle, scale) - - # update the translation component of the matrix - tX = output_width * 0.5 - tY = output_height * 0.35 - M[0, 2] += tX - eyesCenter[0] - M[1, 2] += tY - eyesCenter[1] - - # apply the affine transformation - return cv2.warpAffine( - image, M, (output_width, output_height), flags=cv2.INTER_CUBIC - ) - - def __get_blur_factor(self, input: np.ndarray) -> float: - """Calculates the factor for the confidence based on the blur of the image.""" - if not self.face_config.blur_confidence_filter: - return 1.0 - - variance = cv2.Laplacian(input, cv2.CV_64F).var() - - if variance < 60: # image is very blurry - return 0.96 - elif variance < 70: # image moderately blurry - return 0.98 - elif variance < 80: # image is slightly blurry - return 0.99 - else: - return 1.0 - - def __clear_classifier(self) -> None: - self.face_recognizer = None - self.label_map = {} def __detect_face( self, input: np.ndarray, threshold: float @@ -254,33 +174,6 @@ class FaceRealTimeProcessor(RealTimeProcessorApi): return face - def __classify_face(self, face_image: np.ndarray) -> tuple[str, float] | None: - if not self.landmark_detector: - return None - - if not self.label_map or not self.recognizer: - self.__build_classifier() - - if not self.recognizer: - return None - - # face recognition is best run on grayscale images - img = cv2.cvtColor(face_image, cv2.COLOR_BGR2GRAY) - - # get blur factor before aligning face - blur_factor = self.__get_blur_factor(img) - logger.debug(f"face detected with bluriness {blur_factor}") - - # align face and run recognition - img = self.__align_face(img, img.shape[1], img.shape[0]) - index, distance = self.recognizer.predict(img) - - if index == -1: - return None - - score = (1.0 - (distance / 1000)) * blur_factor - return self.label_map[index], round(score, 2) - def __update_metrics(self, duration: float) -> None: self.metrics.face_rec_fps.value = ( self.metrics.face_rec_fps.value * 9 + duration @@ -301,7 +194,7 @@ class FaceRealTimeProcessor(RealTimeProcessorApi): # don't overwrite sub label for objects that have a sub label # that is not a face - if obj_data.get("sub_label") and id not in self.detected_faces: + if obj_data.get("sub_label") and id not in self.person_face_history: logger.debug( f"Not processing face due to existing sub label: {obj_data.get('sub_label')}." ) @@ -370,53 +263,46 @@ class FaceRealTimeProcessor(RealTimeProcessorApi): max(0, face_box[0]) : min(frame.shape[1], face_box[2]), ] - res = self.__classify_face(face_frame) + res = self.recognizer.classify(face_frame) if not res: + self.__update_metrics(datetime.datetime.now().timestamp() - start) return sub_label, score = res - # calculate the overall face score as the probability * area of face - # this will help to reduce false positives from small side-angle faces - # if a large front-on face image may have scored slightly lower but - # is more likely to be accurate due to the larger face area - face_score = round(score * face_frame.shape[0] * face_frame.shape[1], 2) - logger.debug( - f"Detected best face for person as: {sub_label} with probability {score} and overall face score {face_score}" + f"Detected best face for person as: {sub_label} with probability {score}" ) if self.config.face_recognition.save_attempts: # write face to library folder = os.path.join(FACE_DIR, "train") - file = os.path.join(folder, f"{id}-{sub_label}-{score}-{face_score}.webp") + file = os.path.join(folder, f"{id}-{sub_label}-{score}-0.webp") os.makedirs(folder, exist_ok=True) cv2.imwrite(file, face_frame) - if score < self.config.face_recognition.recognition_threshold: - logger.debug( - f"Recognized face distance {score} is less than threshold {self.config.face_recognition.recognition_threshold}" - ) - self.__update_metrics(datetime.datetime.now().timestamp() - start) - return + if id not in self.person_face_history: + self.person_face_history[id] = [] - if id in self.detected_faces and face_score <= self.detected_faces[id]: - logger.debug( - f"Recognized face distance {score} and overall score {face_score} is less than previous overall face score ({self.detected_faces.get(id)})." - ) - self.__update_metrics(datetime.datetime.now().timestamp() - start) - return - - self.sub_label_publisher.publish( - EventMetadataTypeEnum.sub_label, (id, sub_label, score) + self.person_face_history[id].append( + (sub_label, score, face_frame.shape[0] * face_frame.shape[1]) ) - self.detected_faces[id] = face_score + (weighted_sub_label, weighted_score) = weighted_average_by_area( + self.person_face_history[id] + ) + + if weighted_score >= self.face_config.recognition_threshold: + self.sub_label_publisher.publish( + EventMetadataTypeEnum.sub_label, + (id, weighted_sub_label, weighted_score), + ) + self.__update_metrics(datetime.datetime.now().timestamp() - start) def handle_request(self, topic, request_data) -> dict[str, any] | None: if topic == EmbeddingsRequestEnum.clear_face_classifier.value: - self.__clear_classifier() + self.recognizer.clear() elif topic == EmbeddingsRequestEnum.recognize_face.value: img = cv2.imdecode( np.frombuffer(base64.b64decode(request_data["image"]), dtype=np.uint8), @@ -431,7 +317,7 @@ class FaceRealTimeProcessor(RealTimeProcessorApi): return {"message": "No face was detected.", "success": False} face = img[face_box[1] : face_box[3], face_box[0] : face_box[2]] - res = self.__classify_face(face) + res = self.recognizer.classify(face) if not res: return {"success": False, "message": "No face was recognized."} @@ -480,7 +366,7 @@ class FaceRealTimeProcessor(RealTimeProcessorApi): with open(file, "wb") as output: output.write(thumbnail.tobytes()) - self.__clear_classifier() + self.recognizer.clear() return { "message": "Successfully registered face.", "success": True, @@ -500,7 +386,7 @@ class FaceRealTimeProcessor(RealTimeProcessorApi): "success": False, } - res = self.__classify_face(img) + res = self.recognizer.classify(img) if not res: return @@ -527,5 +413,5 @@ class FaceRealTimeProcessor(RealTimeProcessorApi): os.unlink(os.path.join(folder, files[-1])) def expire_object(self, object_id: str): - if object_id in self.detected_faces: - self.detected_faces.pop(object_id) + if object_id in self.person_face_history: + self.person_face_history.pop(object_id) diff --git a/frigate/embeddings/onnx/base_embedding.py b/frigate/embeddings/onnx/base_embedding.py index a2ea92674..7403f0ac1 100644 --- a/frigate/embeddings/onnx/base_embedding.py +++ b/frigate/embeddings/onnx/base_embedding.py @@ -69,6 +69,8 @@ class BaseEmbedding(ABC): image = Image.open(BytesIO(response.content)).convert(output) elif isinstance(image, bytes): image = Image.open(BytesIO(image)).convert(output) + elif isinstance(image, np.ndarray): + image = Image.fromarray(image) return image diff --git a/frigate/embeddings/onnx/facenet.py b/frigate/embeddings/onnx/facenet.py new file mode 100644 index 000000000..3439620a0 --- /dev/null +++ b/frigate/embeddings/onnx/facenet.py @@ -0,0 +1,98 @@ +"""Facenet Embeddings.""" + +import logging +import os + +import numpy as np + +from frigate.const import MODEL_CACHE_DIR +from frigate.util.downloader import ModelDownloader + +from .base_embedding import BaseEmbedding +from .runner import ONNXModelRunner + +logger = logging.getLogger(__name__) + +FACE_EMBEDDING_SIZE = 112 + + +class ArcfaceEmbedding(BaseEmbedding): + def __init__( + self, + device: str = "AUTO", + ): + super().__init__( + model_name="facedet", + model_file="arcface.onnx", + download_urls={ + "arcface.onnx": "https://github.com/NickM-27/facenet-onnx/releases/download/v1.0/arcface.onnx", + }, + ) + self.device = device + self.download_path = os.path.join(MODEL_CACHE_DIR, self.model_name) + self.tokenizer = None + self.feature_extractor = None + self.runner = None + files_names = list(self.download_urls.keys()) + + if not all( + os.path.exists(os.path.join(self.download_path, n)) for n in files_names + ): + logger.debug(f"starting model download for {self.model_name}") + self.downloader = ModelDownloader( + model_name=self.model_name, + download_path=self.download_path, + file_names=files_names, + download_func=self._download_model, + ) + self.downloader.ensure_model_files() + else: + self.downloader = None + self._load_model_and_utils() + logger.debug(f"models are already downloaded for {self.model_name}") + + def _load_model_and_utils(self): + if self.runner is None: + if self.downloader: + self.downloader.wait_for_download() + + self.runner = ONNXModelRunner( + os.path.join(self.download_path, self.model_file), + self.device, + ) + + def _preprocess_inputs(self, raw_inputs): + pil = self._process_image(raw_inputs[0]) + + # handle images larger than input size + width, height = pil.size + if width != FACE_EMBEDDING_SIZE or height != FACE_EMBEDDING_SIZE: + if width > height: + new_height = int(((height / width) * FACE_EMBEDDING_SIZE) // 4 * 4) + pil = pil.resize((FACE_EMBEDDING_SIZE, new_height)) + else: + new_width = int(((width / height) * FACE_EMBEDDING_SIZE) // 4 * 4) + pil = pil.resize((new_width, FACE_EMBEDDING_SIZE)) + + og = np.array(pil).astype(np.float32) + + # Image must be FACE_EMBEDDING_SIZExFACE_EMBEDDING_SIZE + og_h, og_w, channels = og.shape + frame = np.zeros( + (FACE_EMBEDDING_SIZE, FACE_EMBEDDING_SIZE, channels), dtype=np.float32 + ) + + # compute center offset + x_center = (FACE_EMBEDDING_SIZE - og_w) // 2 + y_center = (FACE_EMBEDDING_SIZE - og_h) // 2 + + # copy img image into center of result image + frame[y_center : y_center + og_h, x_center : x_center + og_w] = og + + # run arcface normalization + normalized_image = frame.astype(np.float32) / 255.0 + frame = (normalized_image - 0.5) / 0.5 + + frame = np.transpose(frame, (2, 0, 1)) + frame = np.expand_dims(frame, axis=0) + return [{"data": frame}] diff --git a/web/public/locales/en/views/settings.json b/web/public/locales/en/views/settings.json index d642c12e6..ffb2434ff 100644 --- a/web/public/locales/en/views/settings.json +++ b/web/public/locales/en/views/settings.json @@ -107,7 +107,19 @@ "faceRecognition": { "title": "Face Recognition", "desc": "Face recognition allows people to be assigned names and when their face is recognized Frigate will assign the person's name as a sub label. This information is included in the UI, filters, as well as in notifications.", - "readTheDocumentation": "Read the Documentation" + "readTheDocumentation": "Read the Documentation", + "modelSize": { + "label": "Model Size", + "desc": "The size of the model used for face recognition.", + "small": { + "title": "small", + "desc": "Using small employs a Local Binary Pattern Histogram model via OpenCV that runs efficiently on most CPUs." + }, + "large": { + "title": "large", + "desc": "Using large employs an ArcFace Face embedding model and will automatically run on the GPU if applicable." + } + } }, "licensePlateRecognition": { "title": "License Plate Recognition", @@ -115,7 +127,7 @@ "readTheDocumentation": "Read the Documentation" }, "toast": { - "success": "Classification settings have been saved.", + "success": "Classification settings have been saved. Restart Frigate to apply your changes.", "error": "Failed to save config changes: {{errorMessage}}" } }, diff --git a/web/src/types/frigateConfig.ts b/web/src/types/frigateConfig.ts index 947c5cde9..5312bed8c 100644 --- a/web/src/types/frigateConfig.ts +++ b/web/src/types/frigateConfig.ts @@ -333,6 +333,7 @@ export interface FrigateConfig { face_recognition: { enabled: boolean; + model_size: SearchModelSize; detection_threshold: number; recognition_threshold: number; }; diff --git a/web/src/views/settings/ClassificationSettingsView.tsx b/web/src/views/settings/ClassificationSettingsView.tsx index 07926182b..2ce958f9e 100644 --- a/web/src/views/settings/ClassificationSettingsView.tsx +++ b/web/src/views/settings/ClassificationSettingsView.tsx @@ -30,6 +30,7 @@ type ClassificationSettings = { }; face: { enabled?: boolean; + model_size?: SearchModelSize; }; lpr: { enabled?: boolean; @@ -59,6 +60,7 @@ export default function ClassificationSettingsView({ }, face: { enabled: undefined, + model_size: undefined, }, lpr: { enabled: undefined, @@ -74,6 +76,7 @@ export default function ClassificationSettingsView({ }, face: { enabled: undefined, + model_size: undefined, }, lpr: { enabled: undefined, @@ -91,6 +94,7 @@ export default function ClassificationSettingsView({ }, face: { enabled: config.face_recognition.enabled, + model_size: config.face_recognition.model_size, }, lpr: { enabled: config.lpr.enabled, @@ -106,6 +110,7 @@ export default function ClassificationSettingsView({ }, face: { enabled: config.face_recognition.enabled, + model_size: config.face_recognition.model_size, }, lpr: { enabled: config.lpr.enabled, @@ -136,7 +141,7 @@ export default function ClassificationSettingsView({ axios .put( - `config/set?semantic_search.enabled=${classificationSettings.search.enabled ? "True" : "False"}&semantic_search.reindex=${classificationSettings.search.reindex ? "True" : "False"}&semantic_search.model_size=${classificationSettings.search.model_size}&face_recognition.enabled=${classificationSettings.face.enabled ? "True" : "False"}&lpr.enabled=${classificationSettings.lpr.enabled ? "True" : "False"}`, + `config/set?semantic_search.enabled=${classificationSettings.search.enabled ? "True" : "False"}&semantic_search.reindex=${classificationSettings.search.reindex ? "True" : "False"}&semantic_search.model_size=${classificationSettings.search.model_size}&face_recognition.enabled=${classificationSettings.face.enabled ? "True" : "False"}&face_recognition.model_size=${classificationSettings.face.model_size}&lpr.enabled=${classificationSettings.lpr.enabled ? "True" : "False"}`, { requires_restart: 0, }, @@ -384,6 +389,61 @@ export default function ClassificationSettingsView({
+
+
+ {t("classification.faceRecognition.modelSize.label")} +
+
+

+ + classification.faceRecognition.modelSize.desc + +

+
    +
  • + + classification.faceRecognition.modelSize.small.desc + +
  • +
  • + + classification.faceRecognition.modelSize.large.desc + +
  • +
+
+
+
From bfee030d7b40efc559fba0b442152db6d19ceec2 Mon Sep 17 00:00:00 2001 From: GuoQing Liu <842607283@qq.com> Date: Wed, 26 Mar 2025 18:57:56 +0800 Subject: [PATCH 55/97] add model chinese i18n keys (#17379) --- web/public/locales/zh-CN/views/settings.json | 44 +++++++++++++++----- 1 file changed, 33 insertions(+), 11 deletions(-) diff --git a/web/public/locales/zh-CN/views/settings.json b/web/public/locales/zh-CN/views/settings.json index ccef8f151..4b39daf7c 100644 --- a/web/public/locales/zh-CN/views/settings.json +++ b/web/public/locales/zh-CN/views/settings.json @@ -7,13 +7,8 @@ "masksAndZones": "遮罩和区域编辑器 - Frigate", "motionTuner": "运动调整器 - Frigate", "object": "对象设置 - Frigate", - "general": "常规设置 - Frigate" - }, - "dialog": { - "unsavedChanges": { - "title": "你有未保存的更改。", - "desc": "是否要在继续之前保存更改?" - } + "general": "常规设置 - Frigate", + "frigatePlus": "Frigate+ 设置 - Frigate" }, "menu": { "uiSettings": "界面设置", @@ -23,7 +18,14 @@ "motionTuner": "运动调整器", "debug": "调试", "users": "用户", - "notifications": "通知" + "notifications": "通知", + "frigateplus": "Frigate+" + }, + "dialog": { + "unsavedChanges": { + "title": "你有未保存的更改。", + "desc": "是否要在继续之前保存更改?" + } }, "cameraSetting": { "camera": "摄像头", @@ -105,7 +107,19 @@ "faceRecognition": { "title": "人脸识别", "desc": "人脸识别功能允许为人物分配名称,当识别到他们的面孔时,Frigate 会将人物的名字作为子标签进行分配。这些信息会显示在界面、过滤器以及通知中。", - "readTheDocumentation": "阅读文档(英文)" + "readTheDocumentation": "阅读文档(英文)", + "modelSize": { + "label": "模型大小", + "desc": "用于人脸识别的模型尺寸。", + "small": { + "title": "小模型", + "desc": "使用小模型将采用OpenCV的局部二值模式直方图(LBPH)算法,可在大多数CPU上高效运行。" + }, + "large": { + "title": "大模型", + "desc": "使用大模型将采用ArcFace人脸嵌入模型,若适用将自动在GPU上运行。" + } + } }, "licensePlateRecognition": { "title": "车牌识别", @@ -113,7 +127,7 @@ "readTheDocumentation": "阅读文档(英文)" }, "toast": { - "success": "分类设置已保存。", + "success": "分类设置已保存,请重启 Frigate 以应用更改。", "error": "保存配置更改失败:{{errorMessage}}" } }, @@ -542,9 +556,17 @@ "trainDate": "训练日期", "baseModel": "基础模型", "supportedDetectors": "支持的检测器", + "dimensions": "大小", "cameras": "摄像头", "loading": "正在加载模型信息...", - "error": "加载模型信息失败" + "error": "加载模型信息失败", + "availableModels": "可用模型", + "loadingAvailableModels": "正在加载可用模型...", + "modelSelect": "您可以在Frigate+上选择可用的模型。请注意,只能选择与当前探测器配置兼容的模型。" + }, + "toast": { + "success": "Frigate+ 设置已保存。请重启 Frigate 以应用更改。", + "error": "配置更改保存失败:{{errorMessage}}" } } } From 6f707e8722e239827104bf97af0e51008eb2f99e Mon Sep 17 00:00:00 2001 From: Logan Garrett Date: Wed, 26 Mar 2025 07:24:30 -0400 Subject: [PATCH 56/97] Add explanation of group_add for igpu (#17375) Add explanation of group_add for igpu --- docker-compose.yml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/docker-compose.yml b/docker-compose.yml index 4322dd23c..168e7fd10 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,7 +1,8 @@ services: devcontainer: container_name: frigate-devcontainer - # add groups from host for render, plugdev, video + # Check host system's actual render/video/plugdev group IDs with 'getent group render', 'getent group video', and 'getent group plugdev' + # Must add these exact IDs in container's group_add section or OpenVINO GPU acceleration will fail group_add: - "109" # render - "110" # render @@ -37,4 +38,4 @@ services: container_name: mqtt image: eclipse-mosquitto:1.6 ports: - - "1883:1883" \ No newline at end of file + - "1883:1883" From b30de965255f5ae87d3949c11c7d453cdabef265 Mon Sep 17 00:00:00 2001 From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> Date: Wed, 26 Mar 2025 06:25:39 -0500 Subject: [PATCH 57/97] Reduce expansion of license_plate box for frigate+ models (#17373) --- frigate/data_processing/common/license_plate/mixin.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/frigate/data_processing/common/license_plate/mixin.py b/frigate/data_processing/common/license_plate/mixin.py index 9bf2119f9..1d80a4c02 100644 --- a/frigate/data_processing/common/license_plate/mixin.py +++ b/frigate/data_processing/common/license_plate/mixin.py @@ -1135,9 +1135,9 @@ class LicensePlateProcessingMixin: license_plate_frame = cv2.cvtColor(frame, cv2.COLOR_YUV2BGR_I420) - # Expand the license_plate_box by 30% + # Expand the license_plate_box by 10% box_array = np.array(license_plate_box) - expansion = (box_array[2:] - box_array[:2]) * 0.30 + expansion = (box_array[2:] - box_array[:2]) * 0.10 expanded_box = np.array( [ license_plate_box[0] - expansion[0], From 4ccf61a6d787298a32576e3e26f47b4e42f7aaf1 Mon Sep 17 00:00:00 2001 From: leccelecce <24962424+leccelecce@users.noreply.github.com> Date: Wed, 26 Mar 2025 12:16:12 +0000 Subject: [PATCH 58/97] Fix wrong value displayed in facerec settings (#17383) --- web/src/views/settings/ClassificationSettingsView.tsx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web/src/views/settings/ClassificationSettingsView.tsx b/web/src/views/settings/ClassificationSettingsView.tsx index 2ce958f9e..b40fde19f 100644 --- a/web/src/views/settings/ClassificationSettingsView.tsx +++ b/web/src/views/settings/ClassificationSettingsView.tsx @@ -424,7 +424,7 @@ export default function ClassificationSettingsView({ } > - {classificationSettings.search.model_size} + {classificationSettings.face.model_size} From 4edf0d8cd36a84406a60de494ca5889f250e3935 Mon Sep 17 00:00:00 2001 From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> Date: Wed, 26 Mar 2025 07:41:00 -0500 Subject: [PATCH 59/97] LPR bugfix (#17384) * ensure image is numpy array * clean up debugging * clean up postprocessor * process raw input as img --- .../common/license_plate/mixin.py | 52 +++++++++---------- frigate/data_processing/post/license_plate.py | 4 +- frigate/embeddings/onnx/lpr_embedding.py | 4 +- 3 files changed, 30 insertions(+), 30 deletions(-) diff --git a/frigate/data_processing/common/license_plate/mixin.py b/frigate/data_processing/common/license_plate/mixin.py index 1d80a4c02..9cc988267 100644 --- a/frigate/data_processing/common/license_plate/mixin.py +++ b/frigate/data_processing/common/license_plate/mixin.py @@ -32,10 +32,6 @@ class LicensePlateProcessingMixin: def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) - self.requires_license_plate_detection = ( - "license_plate" not in self.config.objects.all_objects - ) - self.event_metadata_publisher = EventMetadataPublisher() self.ctc_decoder = CTCDecoder() @@ -312,7 +308,6 @@ class LicensePlateProcessingMixin: # get minimum bounding box (rotated rectangle) around the contour and the smallest side length. points, min_side = self._get_min_boxes(contour) - logger.debug(f"min side {index}, {min_side}") if min_side < self.min_size: continue @@ -320,7 +315,6 @@ class LicensePlateProcessingMixin: points = np.array(points) score = self._box_score(output, contour) - logger.debug(f"box score {index}, {score}") if self.box_thresh > score: continue @@ -991,21 +985,21 @@ class LicensePlateProcessingMixin: license_plate = self._detect_license_plate(rgb) logger.debug( - f"YOLOv9 LPD inference time: {(datetime.datetime.now().timestamp() - yolov9_start) * 1000:.2f} ms" + f"{camera}: YOLOv9 LPD inference time: {(datetime.datetime.now().timestamp() - yolov9_start) * 1000:.2f} ms" ) self.__update_yolov9_metrics( datetime.datetime.now().timestamp() - yolov9_start ) if not license_plate: - logger.debug("Detected no license plates in full frame.") + logger.debug(f"{camera}: Detected no license plates in full frame.") return license_plate_area = (license_plate[2] - license_plate[0]) * ( license_plate[3] - license_plate[1] ) if license_plate_area < self.lpr_config.min_area: - logger.debug("License plate area below minimum threshold.") + logger.debug(f"{camera}: License plate area below minimum threshold.") return license_plate_frame = rgb[ @@ -1027,13 +1021,15 @@ class LicensePlateProcessingMixin: # don't run for non car objects if obj_data.get("label") != "car": - logger.debug("Not a processing license plate for non car object.") + logger.debug( + f"{camera}: Not a processing license plate for non car object." + ) return # don't run for stationary car objects if obj_data.get("stationary") == True: logger.debug( - "Not a processing license plate for a stationary car object." + f"{camera}: Not a processing license plate for a stationary car object." ) return @@ -1041,14 +1037,14 @@ class LicensePlateProcessingMixin: # that is not a license plate if obj_data.get("sub_label") and id not in self.detected_license_plates: logger.debug( - f"Not processing license plate due to existing sub label: {obj_data.get('sub_label')}." + f"{camera}: Not processing license plate due to existing sub label: {obj_data.get('sub_label')}." ) return license_plate: Optional[dict[str, any]] = None - if self.requires_license_plate_detection: - logger.debug("Running manual license_plate detection.") + if "license_plate" not in self.config.cameras[camera].objects.track: + logger.debug(f"{camera}: Running manual license_plate detection.") car_box = obj_data.get("box") @@ -1071,14 +1067,16 @@ class LicensePlateProcessingMixin: yolov9_start = datetime.datetime.now().timestamp() license_plate = self._detect_license_plate(car) logger.debug( - f"YOLOv9 LPD inference time: {(datetime.datetime.now().timestamp() - yolov9_start) * 1000:.2f} ms" + f"{camera}: YOLOv9 LPD inference time: {(datetime.datetime.now().timestamp() - yolov9_start) * 1000:.2f} ms" ) self.__update_yolov9_metrics( datetime.datetime.now().timestamp() - yolov9_start ) if not license_plate: - logger.debug("Detected no license plates for car object.") + logger.debug( + f"{camera}: Detected no license plates for car object." + ) return license_plate_area = max( @@ -1093,7 +1091,7 @@ class LicensePlateProcessingMixin: license_plate_area < self.config.cameras[obj_data["camera"]].lpr.min_area * 2 ): - logger.debug("License plate is less than min_area") + logger.debug(f"{camera}: License plate is less than min_area") return license_plate_frame = car[ @@ -1103,7 +1101,7 @@ class LicensePlateProcessingMixin: else: # don't run for object without attributes if not obj_data.get("current_attributes"): - logger.debug("No attributes to parse.") + logger.debug(f"{camera}: No attributes to parse.") return attributes: list[dict[str, any]] = obj_data.get( @@ -1130,7 +1128,7 @@ class LicensePlateProcessingMixin: or area(license_plate_box) < self.config.cameras[obj_data["camera"]].lpr.min_area ): - logger.debug(f"Invalid license plate box {license_plate}") + logger.debug(f"{camera}: Invalid license plate box {license_plate}") return license_plate_frame = cv2.cvtColor(frame, cv2.COLOR_YUV2BGR_I420) @@ -1184,7 +1182,7 @@ class LicensePlateProcessingMixin: ) logger.debug( - f"Detected text: {plate} (average confidence: {avg_confidence:.2f}, area: {text_area} pixels)" + f"{camera}: Detected text: {plate} (average confidence: {avg_confidence:.2f}, area: {text_area} pixels)" ) else: logger.debug("No text detected") @@ -1204,7 +1202,7 @@ class LicensePlateProcessingMixin: # Check against minimum confidence threshold if avg_confidence < self.lpr_config.recognition_threshold: logger.debug( - f"Average confidence {avg_confidence} is less than threshold ({self.lpr_config.recognition_threshold})" + f"{camera}: Average confidence {avg_confidence} is less than threshold ({self.lpr_config.recognition_threshold})" ) return @@ -1223,7 +1221,7 @@ class LicensePlateProcessingMixin: if similarity >= self.similarity_threshold: plate_id = existing_id logger.debug( - f"Matched plate {top_plate} to {data['plate']} (similarity: {similarity:.3f})" + f"{camera}: Matched plate {top_plate} to {data['plate']} (similarity: {similarity:.3f})" ) break if plate_id is None: @@ -1231,11 +1229,11 @@ class LicensePlateProcessingMixin: obj_data, top_plate, avg_confidence ) logger.debug( - f"New plate event for dedicated LPR camera {plate_id}: {top_plate}" + f"{camera}: New plate event for dedicated LPR camera {plate_id}: {top_plate}" ) else: logger.debug( - f"Matched existing plate event for dedicated LPR camera {plate_id}: {top_plate}" + f"{camera}: Matched existing plate event for dedicated LPR camera {plate_id}: {top_plate}" ) self.detected_license_plates[plate_id]["last_seen"] = current_time @@ -1246,7 +1244,7 @@ class LicensePlateProcessingMixin: if self._should_keep_previous_plate( id, top_plate, top_char_confidences, top_area, avg_confidence ): - logger.debug("Keeping previous plate") + logger.debug(f"{camera}: Keeping previous plate") return # Determine subLabel based on known plates, use regex matching @@ -1277,7 +1275,9 @@ class LicensePlateProcessingMixin: if dedicated_lpr: # save the best snapshot - logger.debug(f"Writing snapshot for {id}, {top_plate}, {current_time}") + logger.debug( + f"{camera}: Writing snapshot for {id}, {top_plate}, {current_time}" + ) frame_bgr = cv2.cvtColor(frame, cv2.COLOR_YUV2BGR_I420) self.sub_label_publisher.publish( EventMetadataTypeEnum.save_lpr_snapshot, diff --git a/frigate/data_processing/post/license_plate.py b/frigate/data_processing/post/license_plate.py index e5c8a29a8..c78e56b9d 100644 --- a/frigate/data_processing/post/license_plate.py +++ b/frigate/data_processing/post/license_plate.py @@ -139,7 +139,7 @@ class LicensePlatePostProcessor(LicensePlateProcessingMixin, PostProcessorApi): scale_y = image.shape[0] / detect_height # Determine which box to enlarge based on detection mode - if self.requires_license_plate_detection: + if "license_plate" not in self.config.cameras[camera_name].objects.track: # Scale and enlarge the car box box = obj_data.get("box") if not box: @@ -189,7 +189,7 @@ class LicensePlatePostProcessor(LicensePlateProcessingMixin, PostProcessorApi): ) keyframe_obj_data = obj_data.copy() - if self.requires_license_plate_detection: + if "license_plate" not in self.config.cameras[camera_name].objects.track: # car box keyframe_obj_data["box"] = [new_left, new_top, new_right, new_bottom] else: diff --git a/frigate/embeddings/onnx/lpr_embedding.py b/frigate/embeddings/onnx/lpr_embedding.py index c3b9a8771..cfe8fb16f 100644 --- a/frigate/embeddings/onnx/lpr_embedding.py +++ b/frigate/embeddings/onnx/lpr_embedding.py @@ -261,8 +261,8 @@ class LicensePlateDetector(BaseEmbedding): def _preprocess_inputs(self, raw_inputs): if isinstance(raw_inputs, list): raise ValueError("License plate embedding does not support batch inputs.") - # Get image as numpy array - img = self._process_image(raw_inputs) + + img = raw_inputs height, width, channels = img.shape # Resize maintaining aspect ratio From 395fc33ccce9f30c5b9130bd2c6e152e901d8f87 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Wed, 26 Mar 2025 07:14:36 -0600 Subject: [PATCH 60/97] Include all .so and .so.12 (#17388) --- docker/tensorrt/Dockerfile.base | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docker/tensorrt/Dockerfile.base b/docker/tensorrt/Dockerfile.base index 4305f1d74..bd8738792 100644 --- a/docker/tensorrt/Dockerfile.base +++ b/docker/tensorrt/Dockerfile.base @@ -20,8 +20,8 @@ RUN --mount=type=bind,source=docker/tensorrt/detector/tensorrt_libyolo.sh,target # COPY required individual CUDA deps RUN mkdir -p /usr/local/cuda-deps RUN if [ "$TARGETARCH" = "amd64" ]; then \ - cp /usr/local/cuda-12.3/targets/x86_64-linux/lib/libcurand.so.* /usr/local/cuda-deps/ && \ - cp /usr/local/cuda-12.3/targets/x86_64-linux/lib/libnvrtc.so.* /usr/local/cuda-deps/ ; \ + cp /usr/local/cuda-12.3/targets/x86_64-linux/lib/libcurand.s* /usr/local/cuda-deps/ && \ + cp /usr/local/cuda-12.3/targets/x86_64-linux/lib/libnvrtc.s* /usr/local/cuda-deps/ ; \ fi # Frigate w/ TensorRT Support as separate image From e3d4b84803fce7e9f345592c797a8cdc62022dbb Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Wed, 26 Mar 2025 07:23:01 -0600 Subject: [PATCH 61/97] Face recognition improvements (#17387) * Increase frequency of updates when internal face detection is used * Adjust number of required faces based on detection type * Adjust min_score config to unknown_score * Only for person * Improve typing * Update face rec docs * Cleanup ui colors * Cleanup --- docs/docs/configuration/face_recognition.md | 19 +++--- docs/docs/configuration/reference.md | 4 +- frigate/camera/state.py | 20 ++++-- frigate/config/classification.py | 4 +- frigate/data_processing/common/face/model.py | 9 +-- frigate/data_processing/real_time/face.py | 66 +++++++++++--------- web/src/pages/FaceLibrary.tsx | 25 ++++++-- web/src/types/frigateConfig.ts | 15 +++-- 8 files changed, 97 insertions(+), 65 deletions(-) diff --git a/docs/docs/configuration/face_recognition.md b/docs/docs/configuration/face_recognition.md index a3cb4e308..af6fd1eff 100644 --- a/docs/docs/configuration/face_recognition.md +++ b/docs/docs/configuration/face_recognition.md @@ -22,16 +22,16 @@ Frigate needs to first detect a `face` before it can recognize a face. ### Face Recognition Frigate has support for two face recognition model types: -- **small**: Frigate will use CV2 Local Binary Pattern Face Recognizer to recognize faces, which runs locally on the CPU. -- **large**: Frigate will run a face embedding model, this is only recommended to be run when an integrated or dedicated GPU is available. -In both cases a lightweight face landmark detection model is also used to align faces before running them through the face recognizer. +- **small**: Frigate will use CV2 Local Binary Pattern Face Recognizer to recognize faces, which runs locally on the CPU. This model is optimized for efficiency and is not as accurate. +- **large**: Frigate will run a face embedding model, this model is optimized for accuracy. It is only recommended to be run when an integrated or dedicated GPU is available. + +In both cases a lightweight face landmark detection model is also used to align faces before running the recognition model. ## Minimum System Requirements -Face recognition is lightweight and runs on the CPU, there are no significantly different system requirements than running Frigate itself when using the `small` model. - -When using the `large` model an integrated or discrete GPU is recommended. +The `small` model is optimized for efficiency and runs on the CPU, there are no significantly different system requirements. +The `large` model is optimized for accuracy and an integrated or discrete GPU is highly recommended. ## Configuration @@ -58,6 +58,8 @@ Fine-tune face recognition with these optional parameters: ### Recognition - `model_size`: Which model size to use, options are `small` or `large` +- `unknown_score`: Min score to mark a person as a potential match, matches below this will be marked as unknown. + - Default: `0.8`. - `recognition_threshold`: Recognition confidence score required to add the face to the object as a sub label. - Default: `0.9`. - `blur_confidence_filter`: Enables a filter that calculates how blurry the face is and adjusts the confidence based on this. @@ -108,13 +110,14 @@ Once straight-on images are performing well, start choosing slightly off-angle i ## FAQ -### Why is every face tagged as a known face and not unknown? +### Why can't I bulk upload photos? -Any recognized face with a score >= `min_score` will show in the `Train` tab along with the recognition score. A low scoring face is effectively the same as `unknown`, but includes more information. This does not mean the recognition is not working well, and is part of the importance of choosing the correct `recognition_threshold`. +It is important to methodically add photos to the library, bulk importing photos (especially from a general photo library) will lead to overfitting in that particular scenario and hurt recognition performance. ### Why do unknown people score similarly to known people? This can happen for a few different reasons, but this is usually an indicator that the training set needs to be improved. This is often related to overfitting: + - If you train with only a few images per person, especially if those images are very similar, the recognition model becomes overly specialized to those specific images. - When you provide images with different poses, lighting, and expressions, the algorithm extracts features that are consistent across those variations. - By training on a diverse set of images, the algorithm becomes less sensitive to minor variations and noise in the input image. diff --git a/docs/docs/configuration/reference.md b/docs/docs/configuration/reference.md index 8e24db0b7..e30ee1619 100644 --- a/docs/docs/configuration/reference.md +++ b/docs/docs/configuration/reference.md @@ -547,8 +547,8 @@ semantic_search: face_recognition: # Optional: Enable semantic search (default: shown below) enabled: False - # Optional: Minimum face distance score required to save the attempt (default: shown below) - min_score: 0.8 + # Optional: Minimum face distance score required to mark as a potential match (default: shown below) + unknown_score: 0.8 # Optional: Minimum face detection score required to detect a face (default: shown below) # NOTE: This only applies when not running a Frigate+ model detection_threshold: 0.7 diff --git a/frigate/camera/state.py b/frigate/camera/state.py index 98f808bf6..65a3dcf5d 100644 --- a/frigate/camera/state.py +++ b/frigate/camera/state.py @@ -5,7 +5,7 @@ import logging import os import threading from collections import defaultdict -from typing import Callable +from typing import Any, Callable import cv2 import numpy as np @@ -53,8 +53,19 @@ class CameraState: self.callbacks = defaultdict(list) self.ptz_autotracker_thread = ptz_autotracker_thread self.prev_enabled = self.camera_config.enabled + self.requires_face_detection = ( + self.config.face_recognition.enabled + and "face" not in self.config.objects.all_objects + ) - def get_current_frame(self, draw_options={}): + def get_max_update_frequency(self, obj: TrackedObject) -> int: + return ( + 1 + if self.requires_face_detection and obj.obj_data["label"] == "person" + else 5 + ) + + def get_current_frame(self, draw_options: dict[str, Any] = {}): with self.current_frame_lock: frame_copy = np.copy(self._current_frame) frame_time = self.current_frame_time @@ -283,11 +294,12 @@ class CameraState: updated_obj.last_updated = frame_time - # if it has been more than 5 seconds since the last thumb update + # if it has been more than max_update_frequency seconds since the last thumb update # and the last update is greater than the last publish or # the object has changed significantly if ( - frame_time - updated_obj.last_published > 5 + frame_time - updated_obj.last_published + > self.get_max_update_frequency(updated_obj) and updated_obj.last_updated > updated_obj.last_published ) or significant_update: # call event handlers diff --git a/frigate/config/classification.py b/frigate/config/classification.py index 25c379546..aecbf6537 100644 --- a/frigate/config/classification.py +++ b/frigate/config/classification.py @@ -54,8 +54,8 @@ class FaceRecognitionConfig(FrigateBaseModel): model_size: str = Field( default="small", title="The size of the embeddings model used." ) - min_score: float = Field( - title="Minimum face distance score required to save the attempt.", + unknown_score: float = Field( + title="Minimum face distance score required to be marked as a potential match.", default=0.8, gt=0.0, le=1.0, diff --git a/frigate/data_processing/common/face/model.py b/frigate/data_processing/common/face/model.py index a006734ce..5e15a2441 100644 --- a/frigate/data_processing/common/face/model.py +++ b/frigate/data_processing/common/face/model.py @@ -164,9 +164,7 @@ class LBPHRecognizer(FaceRecognizer): return self.recognizer: cv2.face.LBPHFaceRecognizer = ( - cv2.face.LBPHFaceRecognizer_create( - radius=2, threshold=(1 - self.config.face_recognition.min_score) * 1000 - ) + cv2.face.LBPHFaceRecognizer_create(radius=2, threshold=400) ) self.recognizer.train(faces, np.array(labels)) @@ -243,6 +241,8 @@ class ArcFaceRecognizer(FaceRecognizer): for name, embs in face_embeddings_map.items(): self.mean_embs[name] = stats.trim_mean(embs, 0.15) + logger.debug("Finished building ArcFace model") + def similarity_to_confidence( self, cosine_similarity: float, median=0.3, range_width=0.6, slope_factor=12 ): @@ -302,7 +302,4 @@ class ArcFaceRecognizer(FaceRecognizer): score = confidence label = name - if score < self.config.face_recognition.min_score: - return None - return label, round(score * blur_factor, 2) diff --git a/frigate/data_processing/real_time/face.py b/frigate/data_processing/real_time/face.py index e20dad633..9b479a527 100644 --- a/frigate/data_processing/real_time/face.py +++ b/frigate/data_processing/real_time/face.py @@ -36,36 +36,6 @@ MAX_DETECTION_HEIGHT = 1080 MIN_MATCHING_FACES = 2 -def weighted_average_by_area(results_list: list[tuple[str, float, int]]): - if len(results_list) < 3: - return "unknown", 0.0 - - score_count = {} - weighted_scores = {} - total_face_areas = {} - - for name, score, face_area in results_list: - if name not in weighted_scores: - score_count[name] = 1 - weighted_scores[name] = 0.0 - total_face_areas[name] = 0.0 - else: - score_count[name] += 1 - - weighted_scores[name] += score * face_area - total_face_areas[name] += face_area - - prominent_name = max(score_count) - - # if a single name is not prominent in the history then we are not confident - if score_count[prominent_name] / len(results_list) < 0.65: - return "unknown", 0.0 - - return prominent_name, weighted_scores[prominent_name] / total_face_areas[ - prominent_name - ] - - class FaceRealTimeProcessor(RealTimeProcessorApi): def __init__( self, @@ -271,6 +241,9 @@ class FaceRealTimeProcessor(RealTimeProcessorApi): sub_label, score = res + if score < self.face_config.unknown_score: + sub_label = "unknown" + logger.debug( f"Detected best face for person as: {sub_label} with probability {score}" ) @@ -288,7 +261,7 @@ class FaceRealTimeProcessor(RealTimeProcessorApi): self.person_face_history[id].append( (sub_label, score, face_frame.shape[0] * face_frame.shape[1]) ) - (weighted_sub_label, weighted_score) = weighted_average_by_area( + (weighted_sub_label, weighted_score) = self.weighted_average_by_area( self.person_face_history[id] ) @@ -415,3 +388,34 @@ class FaceRealTimeProcessor(RealTimeProcessorApi): def expire_object(self, object_id: str): if object_id in self.person_face_history: self.person_face_history.pop(object_id) + + def weighted_average_by_area(self, results_list: list[tuple[str, float, int]]): + min_faces = 1 if self.requires_face_detection else 3 + + if len(results_list) < min_faces: + return "unknown", 0.0 + + score_count = {} + weighted_scores = {} + total_face_areas = {} + + for name, score, face_area in results_list: + if name not in weighted_scores: + score_count[name] = 1 + weighted_scores[name] = 0.0 + total_face_areas[name] = 0.0 + else: + score_count[name] += 1 + + weighted_scores[name] += score * face_area + total_face_areas[name] += face_area + + prominent_name = max(score_count) + + # if a single name is not prominent in the history then we are not confident + if score_count[prominent_name] / len(results_list) < 0.65: + return "unknown", 0.0 + + return prominent_name, weighted_scores[prominent_name] / total_face_areas[ + prominent_name + ] diff --git a/web/src/pages/FaceLibrary.tsx b/web/src/pages/FaceLibrary.tsx index 0ac937283..696691997 100644 --- a/web/src/pages/FaceLibrary.tsx +++ b/web/src/pages/FaceLibrary.tsx @@ -33,7 +33,7 @@ import useKeyboardListener from "@/hooks/use-keyboard-listener"; import useOptimisticState from "@/hooks/use-optimistic-state"; import { cn } from "@/lib/utils"; import { FaceLibraryData, RecognizedFaceData } from "@/types/face"; -import { FrigateConfig } from "@/types/frigateConfig"; +import { FaceRecognitionConfig, FrigateConfig } from "@/types/frigateConfig"; import axios from "axios"; import { useCallback, useEffect, useMemo, useRef, useState } from "react"; import { isDesktop, isMobile } from "react-device-detect"; @@ -451,7 +451,7 @@ function TrainingGrid({ key={image} image={image} faceNames={faceNames} - threshold={config.face_recognition.recognition_threshold} + recognitionConfig={config.face_recognition} selected={selectedFaces.includes(image)} onClick={(data, meta) => { if (meta) { @@ -471,7 +471,7 @@ function TrainingGrid({ type FaceAttemptProps = { image: string; faceNames: string[]; - threshold: number; + recognitionConfig: FaceRecognitionConfig; selected: boolean; onClick: (data: RecognizedFaceData, meta: boolean) => void; onRefresh: () => void; @@ -479,7 +479,7 @@ type FaceAttemptProps = { function FaceAttempt({ image, faceNames, - threshold, + recognitionConfig, selected, onClick, onRefresh, @@ -496,6 +496,16 @@ function FaceAttempt({ }; }, [image]); + const scoreStatus = useMemo(() => { + if (data.score >= recognitionConfig.recognition_threshold) { + return "match"; + } else if (data.score >= recognitionConfig.unknown_score) { + return "potential"; + } else { + return "unknown"; + } + }, [data, recognitionConfig]); + // interaction const imgRef = useRef(null); @@ -579,10 +589,13 @@ function FaceAttempt({
{data.name}
= threshold ? "text-success" : "text-danger", + "", + scoreStatus == "match" && "text-success", + scoreStatus == "potential" && "text-orange-400", + scoreStatus == "unknown" && "text-danger", )} > - {data.score * 100}% + {Math.round(data.score * 100)}%
diff --git a/web/src/types/frigateConfig.ts b/web/src/types/frigateConfig.ts index 5312bed8c..d66d5edcb 100644 --- a/web/src/types/frigateConfig.ts +++ b/web/src/types/frigateConfig.ts @@ -20,6 +20,14 @@ export interface BirdseyeConfig { width: number; } +export interface FaceRecognitionConfig { + enabled: boolean; + model_size: SearchModelSize; + unknown_score: number; + detection_threshold: number; + recognition_threshold: number; +} + export type SearchModel = "jinav1" | "jinav2"; export type SearchModelSize = "small" | "large"; @@ -331,12 +339,7 @@ export interface FrigateConfig { environment_vars: Record; - face_recognition: { - enabled: boolean; - model_size: SearchModelSize; - detection_threshold: number; - recognition_threshold: number; - }; + face_recognition: FaceRecognitionConfig; ffmpeg: { global_args: string[]; From e6936c177b20b0aae932f12cffa297fe25004511 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Wed, 26 Mar 2025 10:00:23 -0600 Subject: [PATCH 62/97] Face model loading improvements (#17390) * Don't assume landmark file is downloaded * Rewrite build model task to be asynchronous so it doesn't block the pipeline * Handle case where face recognition does not respond * Cleanup * Make daemon thread --- frigate/api/classification.py | 20 +++++ frigate/data_processing/common/face/model.py | 89 +++++++++++++------- 2 files changed, 80 insertions(+), 29 deletions(-) diff --git a/frigate/api/classification.py b/frigate/api/classification.py index d3ee9c3d9..975a41c9d 100644 --- a/frigate/api/classification.py +++ b/frigate/api/classification.py @@ -198,6 +198,16 @@ async def register_face(request: Request, name: str, file: UploadFile): context: EmbeddingsContext = request.app.embeddings result = context.register_face(name, await file.read()) + + if not isinstance(result, dict): + return JSONResponse( + status_code=500, + content={ + "success": False, + "message": "Could not process request. Try restarting Frigate.", + }, + ) + return JSONResponse( status_code=200 if result.get("success", True) else 400, content=result, @@ -214,6 +224,16 @@ async def recognize_face(request: Request, file: UploadFile): context: EmbeddingsContext = request.app.embeddings result = context.recognize_face(await file.read()) + + if not isinstance(result, dict): + return JSONResponse( + status_code=500, + content={ + "success": False, + "message": "Could not process request. Try restarting Frigate.", + }, + ) + return JSONResponse( status_code=200 if result.get("success", True) else 400, content=result, diff --git a/frigate/data_processing/common/face/model.py b/frigate/data_processing/common/face/model.py index 5e15a2441..f7ef1ae13 100644 --- a/frigate/data_processing/common/face/model.py +++ b/frigate/data_processing/common/face/model.py @@ -1,5 +1,7 @@ import logging import os +import queue +import threading from abc import ABC, abstractmethod import cv2 @@ -18,10 +20,7 @@ class FaceRecognizer(ABC): def __init__(self, config: FrigateConfig) -> None: self.config = config - self.landmark_detector = cv2.face.createFacemarkLBF() - self.landmark_detector.loadModel( - os.path.join(MODEL_CACHE_DIR, "facedet/landmarkdet.yaml") - ) + self.init_landmark_detector() @abstractmethod def build(self) -> None: @@ -37,6 +36,13 @@ class FaceRecognizer(ABC): def classify(self, face_image: np.ndarray) -> tuple[str, float] | None: pass + def init_landmark_detector(self) -> None: + landmark_model = os.path.join(MODEL_CACHE_DIR, "facedet/landmarkdet.yaml") + + if os.path.exists(landmark_model): + self.landmark_detector = cv2.face.createFacemarkLBF() + self.landmark_detector.loadModel(landmark_model) + def align_face( self, image: np.ndarray, @@ -130,6 +136,7 @@ class LBPHRecognizer(FaceRecognizer): def build(self): if not self.landmark_detector: + self.init_landmark_detector() return None labels = [] @@ -201,45 +208,69 @@ class ArcFaceRecognizer(FaceRecognizer): super().__init__(config) self.mean_embs: dict[int, np.ndarray] = {} self.face_embedder: ArcfaceEmbedding = ArcfaceEmbedding() + self.model_builder_queue: queue.Queue | None = None def clear(self) -> None: self.mean_embs = {} - def build(self): - if not self.landmark_detector: - return None + def run_build_task(self) -> None: + self.model_builder_queue = queue.Queue() - face_embeddings_map: dict[str, list[np.ndarray]] = {} - idx = 0 + def build_model(): + face_embeddings_map: dict[str, list[np.ndarray]] = {} + idx = 0 - dir = "/media/frigate/clips/faces" - for name in os.listdir(dir): - if name == "train": - continue - - face_folder = os.path.join(dir, name) - - if not os.path.isdir(face_folder): - continue - - face_embeddings_map[name] = [] - for image in os.listdir(face_folder): - img = cv2.imread(os.path.join(face_folder, image)) - - if img is None: + dir = "/media/frigate/clips/faces" + for name in os.listdir(dir): + if name == "train": continue - img = self.align_face(img, img.shape[1], img.shape[0]) - emb = self.face_embedder([img])[0].squeeze() - face_embeddings_map[name].append(emb) + face_folder = os.path.join(dir, name) - idx += 1 + if not os.path.isdir(face_folder): + continue + + face_embeddings_map[name] = [] + for image in os.listdir(face_folder): + img = cv2.imread(os.path.join(face_folder, image)) + + if img is None: + continue + + img = self.align_face(img, img.shape[1], img.shape[0]) + emb = self.face_embedder([img])[0].squeeze() + face_embeddings_map[name].append(emb) + + idx += 1 + + self.model_builder_queue.put(face_embeddings_map) + + thread = threading.Thread(target=build_model, daemon=True) + thread.start() + + def build(self): + if not self.landmark_detector: + self.init_landmark_detector() + return None + + if self.model_builder_queue is not None: + try: + face_embeddings_map: dict[str, list[np.ndarray]] = ( + self.model_builder_queue.get(timeout=0.1) + ) + self.model_builder_queue = None + except queue.Empty: + return + else: + self.run_build_task() + return if not face_embeddings_map: return for name, embs in face_embeddings_map.items(): - self.mean_embs[name] = stats.trim_mean(embs, 0.15) + if embs: + self.mean_embs[name] = stats.trim_mean(embs, 0.15) logger.debug("Finished building ArcFace model") From e3f34d6f11fad2838d73ab36dcd2d873a5d25197 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Wed, 26 Mar 2025 13:42:30 -0600 Subject: [PATCH 63/97] Bird classification docs (#17369) * Add bird classification to sidebar * Add docs for bird classification * Update bird_classification.md Co-authored-by: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> * Add model info --------- Co-authored-by: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> --- .../docs/configuration/bird_classification.md | 31 +++++++++++++++++++ docs/sidebars.ts | 3 +- 2 files changed, 33 insertions(+), 1 deletion(-) create mode 100644 docs/docs/configuration/bird_classification.md diff --git a/docs/docs/configuration/bird_classification.md b/docs/docs/configuration/bird_classification.md new file mode 100644 index 000000000..398729290 --- /dev/null +++ b/docs/docs/configuration/bird_classification.md @@ -0,0 +1,31 @@ +--- +id: bird_classification +title: Bird Classification +--- + +Bird classification identifies known birds using a quantized Tensorflow model. When a known bird is recognized, its common name will be added as a `sub_label`. This information is included in the UI, filters, as well as in notifications. + +## Minimum System Requirements + +Bird classification runs a lightweight tflite model on the CPU, there are no significantly different system requirements than running Frigate itself. + +## Model + +The classification model used is the MobileNet INat Bird Classification, [available identifiers can be found here.](https://raw.githubusercontent.com/google-coral/test_data/master/inat_bird_labels.txt) + +## Configuration + +Bird classification is disabled by default, it must be enabled in your config file before it can be used. Bird classification is a global configuration setting. + +```yaml +classification: + bird: + enabled: true +``` + +## Advanced Configuration + +Fine-tune bird classification with these optional parameters: + +- `threshold`: Classification confidence score required to set the sub label on the object. + - Default: `0.9`. diff --git a/docs/sidebars.ts b/docs/sidebars.ts index bffb54349..f0b74f5d4 100644 --- a/docs/sidebars.ts +++ b/docs/sidebars.ts @@ -33,11 +33,12 @@ const sidebars: SidebarsConfig = { "configuration/object_detectors", "configuration/audio_detectors", ], - Classifiers: [ + Enrichments: [ "configuration/semantic_search", "configuration/genai", "configuration/face_recognition", "configuration/license_plate_recognition", + "configuration/bird_classification", ], Cameras: [ "configuration/cameras", From 53c8aa25cb0bbcbcd37c5f7737062b672c038db7 Mon Sep 17 00:00:00 2001 From: leccelecce <24962424+leccelecce@users.noreply.github.com> Date: Wed, 26 Mar 2025 21:44:15 +0000 Subject: [PATCH 64/97] Fix missing restart notification in Classification settings (#17397) * Use different message key to fix missing restart message * Move addMessage to finally block --- .../views/settings/ClassificationSettingsView.tsx | 4 ++-- web/src/views/settings/FrigatePlusSettingsView.tsx | 12 ++++++------ 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/web/src/views/settings/ClassificationSettingsView.tsx b/web/src/views/settings/ClassificationSettingsView.tsx index b40fde19f..24c3a9107 100644 --- a/web/src/views/settings/ClassificationSettingsView.tsx +++ b/web/src/views/settings/ClassificationSettingsView.tsx @@ -176,8 +176,8 @@ export default function ClassificationSettingsView({ }) .finally(() => { addMessage( - "search_settings", - `Restart Required (Classification settings changed)`, + "search_settings_restart", + `Restart required (Classification settings changed)`, undefined, "search_settings", ); diff --git a/web/src/views/settings/FrigatePlusSettingsView.tsx b/web/src/views/settings/FrigatePlusSettingsView.tsx index f29ebd604..49426bbe9 100644 --- a/web/src/views/settings/FrigatePlusSettingsView.tsx +++ b/web/src/views/settings/FrigatePlusSettingsView.tsx @@ -131,12 +131,6 @@ export default function FrigatePlusSettingsView({ position: "top-center", }); setChangedValue(false); - addMessage( - "plus_restart", - "Restart required (Frigate+ model changed)", - undefined, - "plus_restart", - ); updateConfig(); } else { toast.error( @@ -160,6 +154,12 @@ export default function FrigatePlusSettingsView({ ); }) .finally(() => { + addMessage( + "plus_restart", + "Restart required (Frigate+ model changed)", + undefined, + "plus_restart", + ); setIsLoading(false); }); }, [updateConfig, addMessage, frigatePlusSettings, t]); From a37f804469337b3dcd09bddaa05e532e0c5b2240 Mon Sep 17 00:00:00 2001 From: leccelecce <24962424+leccelecce@users.noreply.github.com> Date: Wed, 26 Mar 2025 23:51:54 +0000 Subject: [PATCH 65/97] i18n Settings fixes (#17398) --- web/public/locales/en/views/settings.json | 6 ++-- web/public/locales/zh-CN/views/settings.json | 30 ++++++++++---------- 2 files changed, 18 insertions(+), 18 deletions(-) diff --git a/web/public/locales/en/views/settings.json b/web/public/locales/en/views/settings.json index ffb2434ff..b1b70c8e5 100644 --- a/web/public/locales/en/views/settings.json +++ b/web/public/locales/en/views/settings.json @@ -11,9 +11,9 @@ "frigatePlus": "Frigate+ Settings - Frigate" }, "menu": { - "uiSettings": "UI Settings", - "classificationSettings": "Classification Settings", - "cameraSettings": "Camera Settings", + "ui": "UI", + "classification": "Classification", + "cameras": "Camera Settings", "masksAndZones": "Masks / Zones", "motionTuner": "Motion Tuner", "debug": "Debug", diff --git a/web/public/locales/zh-CN/views/settings.json b/web/public/locales/zh-CN/views/settings.json index 4b39daf7c..0c1087916 100644 --- a/web/public/locales/zh-CN/views/settings.json +++ b/web/public/locales/zh-CN/views/settings.json @@ -11,9 +11,9 @@ "frigatePlus": "Frigate+ 设置 - Frigate" }, "menu": { - "uiSettings": "界面设置", - "classificationSettings": "分类设置", - "cameraSettings": "摄像头设置", + "ui": "界面设置", + "classification": "分类设置", + "cameras": "摄像头设置", "masksAndZones": "遮罩/ 区域", "motionTuner": "运动调整器", "debug": "调试", @@ -109,16 +109,16 @@ "desc": "人脸识别功能允许为人物分配名称,当识别到他们的面孔时,Frigate 会将人物的名字作为子标签进行分配。这些信息会显示在界面、过滤器以及通知中。", "readTheDocumentation": "阅读文档(英文)", "modelSize": { - "label": "模型大小", - "desc": "用于人脸识别的模型尺寸。", - "small": { - "title": "小模型", - "desc": "使用小模型将采用OpenCV的局部二值模式直方图(LBPH)算法,可在大多数CPU上高效运行。" - }, - "large": { - "title": "大模型", - "desc": "使用大模型将采用ArcFace人脸嵌入模型,若适用将自动在GPU上运行。" - } + "label": "模型大小", + "desc": "用于人脸识别的模型尺寸。", + "small": { + "title": "小模型", + "desc": "使用小模型将采用OpenCV的局部二值模式直方图(LBPH)算法,可在大多数CPU上高效运行。" + }, + "large": { + "title": "大模型", + "desc": "使用大模型将采用ArcFace人脸嵌入模型,若适用将自动在GPU上运行。" + } } }, "licensePlateRecognition": { @@ -565,8 +565,8 @@ "modelSelect": "您可以在Frigate+上选择可用的模型。请注意,只能选择与当前探测器配置兼容的模型。" }, "toast": { - "success": "Frigate+ 设置已保存。请重启 Frigate 以应用更改。", - "error": "配置更改保存失败:{{errorMessage}}" + "success": "Frigate+ 设置已保存。请重启 Frigate 以应用更改。", + "error": "配置更改保存失败:{{errorMessage}}" } } } From ff34739f40ddb22817024584467748715c2908cf Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Wed, 26 Mar 2025 18:43:10 -0600 Subject: [PATCH 66/97] Face recognition bug fixes (#17401) * Simplify normalization * Fix confidence check --- frigate/data_processing/common/face/model.py | 4 ++-- frigate/embeddings/onnx/{facenet.py => face_embedding.py} | 3 +-- 2 files changed, 3 insertions(+), 4 deletions(-) rename frigate/embeddings/onnx/{facenet.py => face_embedding.py} (96%) diff --git a/frigate/data_processing/common/face/model.py b/frigate/data_processing/common/face/model.py index f7ef1ae13..1af934c5d 100644 --- a/frigate/data_processing/common/face/model.py +++ b/frigate/data_processing/common/face/model.py @@ -10,7 +10,7 @@ from scipy import stats from frigate.config import FrigateConfig from frigate.const import MODEL_CACHE_DIR -from frigate.embeddings.onnx.facenet import ArcfaceEmbedding +from frigate.embeddings.onnx.face_embedding import ArcfaceEmbedding logger = logging.getLogger(__name__) @@ -329,7 +329,7 @@ class ArcFaceRecognizer(FaceRecognizer): cosine_similarity = dot_product / (magnitude_A * magnitude_B) confidence = self.similarity_to_confidence(cosine_similarity) - if cosine_similarity > score: + if confidence > score: score = confidence label = name diff --git a/frigate/embeddings/onnx/facenet.py b/frigate/embeddings/onnx/face_embedding.py similarity index 96% rename from frigate/embeddings/onnx/facenet.py rename to frigate/embeddings/onnx/face_embedding.py index 3439620a0..0b808f716 100644 --- a/frigate/embeddings/onnx/facenet.py +++ b/frigate/embeddings/onnx/face_embedding.py @@ -90,8 +90,7 @@ class ArcfaceEmbedding(BaseEmbedding): frame[y_center : y_center + og_h, x_center : x_center + og_w] = og # run arcface normalization - normalized_image = frame.astype(np.float32) / 255.0 - frame = (normalized_image - 0.5) / 0.5 + frame = (frame / 127.5) - 1.0 frame = np.transpose(frame, (2, 0, 1)) frame = np.expand_dims(frame, axis=0) From 0947bffeefd7e6f3f76d1e8d838c030f7e1f6ac9 Mon Sep 17 00:00:00 2001 From: leccelecce <24962424+leccelecce@users.noreply.github.com> Date: Thu, 27 Mar 2025 00:48:28 +0000 Subject: [PATCH 67/97] nginx: don't gzip png, gif, jpeg or jpg (#17400) --- docker/main/rootfs/usr/local/nginx/conf/nginx.conf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/main/rootfs/usr/local/nginx/conf/nginx.conf b/docker/main/rootfs/usr/local/nginx/conf/nginx.conf index c8cd7fd45..4abfd3587 100644 --- a/docker/main/rootfs/usr/local/nginx/conf/nginx.conf +++ b/docker/main/rootfs/usr/local/nginx/conf/nginx.conf @@ -30,7 +30,7 @@ http { gzip on; gzip_comp_level 6; - gzip_types text/plain text/css application/json application/x-javascript application/javascript text/javascript image/svg+xml image/x-icon image/bmp image/png image/gif image/jpeg image/jpg; + gzip_types text/plain text/css application/json application/x-javascript application/javascript text/javascript image/svg+xml image/x-icon image/bmp; gzip_proxied no-cache no-store private expired auth; gzip_vary on; From 36446ceded9e7c347e31081024c846b895da3242 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Thu, 27 Mar 2025 05:31:29 -0600 Subject: [PATCH 68/97] Implement facenet tflite for small face recognition model (#17402) --- docs/docs/configuration/face_recognition.md | 8 +- frigate/data_processing/common/face/model.py | 187 +++++++++++-------- frigate/data_processing/real_time/face.py | 8 +- frigate/embeddings/__init__.py | 4 + frigate/embeddings/onnx/face_embedding.py | 114 ++++++++++- web/public/locales/en/views/settings.json | 4 +- 6 files changed, 227 insertions(+), 98 deletions(-) diff --git a/docs/docs/configuration/face_recognition.md b/docs/docs/configuration/face_recognition.md index af6fd1eff..278c592c0 100644 --- a/docs/docs/configuration/face_recognition.md +++ b/docs/docs/configuration/face_recognition.md @@ -23,15 +23,15 @@ Frigate needs to first detect a `face` before it can recognize a face. Frigate has support for two face recognition model types: -- **small**: Frigate will use CV2 Local Binary Pattern Face Recognizer to recognize faces, which runs locally on the CPU. This model is optimized for efficiency and is not as accurate. -- **large**: Frigate will run a face embedding model, this model is optimized for accuracy. It is only recommended to be run when an integrated or dedicated GPU is available. +- **small**: Frigate will run a FaceNet embedding model to recognize faces, which runs locally on the CPU. This model is optimized for efficiency and is not as accurate. +- **large**: Frigate will run a large ArcFace embedding model that is optimized for accuracy. It is only recommended to be run when an integrated or dedicated GPU is available. In both cases a lightweight face landmark detection model is also used to align faces before running the recognition model. ## Minimum System Requirements -The `small` model is optimized for efficiency and runs on the CPU, there are no significantly different system requirements. -The `large` model is optimized for accuracy and an integrated or discrete GPU is highly recommended. +The `small` model is optimized for efficiency and runs on the CPU, most CPUs should run the model efficiently. +The `large` model is optimized for accuracy, an integrated or discrete GPU is highly recommended. ## Configuration diff --git a/frigate/data_processing/common/face/model.py b/frigate/data_processing/common/face/model.py index 1af934c5d..eb27df68d 100644 --- a/frigate/data_processing/common/face/model.py +++ b/frigate/data_processing/common/face/model.py @@ -10,7 +10,7 @@ from scipy import stats from frigate.config import FrigateConfig from frigate.const import MODEL_CACHE_DIR -from frigate.embeddings.onnx.face_embedding import ArcfaceEmbedding +from frigate.embeddings.onnx.face_embedding import ArcfaceEmbedding, FaceNetEmbedding logger = logging.getLogger(__name__) @@ -124,83 +124,140 @@ class FaceRecognizer(ABC): return 1.0 -class LBPHRecognizer(FaceRecognizer): +def similarity_to_confidence( + cosine_similarity: float, median=0.3, range_width=0.6, slope_factor=12 +): + """ + Default sigmoid function to map cosine similarity to confidence. + + Args: + cosine_similarity (float): The input cosine similarity. + median (float): Assumed median of cosine similarity distribution. + range_width (float): Assumed range of cosine similarity distribution (90th percentile - 10th percentile). + slope_factor (float): Adjusts the steepness of the curve. + + Returns: + float: The confidence score. + """ + + # Calculate slope and bias + slope = slope_factor / range_width + bias = median + + # Calculate confidence + confidence = 1 / (1 + np.exp(-slope * (cosine_similarity - bias))) + return confidence + + +class FaceNetRecognizer(FaceRecognizer): def __init__(self, config: FrigateConfig): super().__init__(config) - self.label_map: dict[int, str] = {} - self.recognizer: cv2.face.LBPHFaceRecognizer | None = None + self.mean_embs: dict[int, np.ndarray] = {} + self.face_embedder: FaceNetEmbedding = FaceNetEmbedding() + self.model_builder_queue: queue.Queue | None = None def clear(self) -> None: - self.face_recognizer = None - self.label_map = {} + self.mean_embs = {} + + def run_build_task(self) -> None: + self.model_builder_queue = queue.Queue() + + def build_model(): + face_embeddings_map: dict[str, list[np.ndarray]] = {} + idx = 0 + + dir = "/media/frigate/clips/faces" + for name in os.listdir(dir): + if name == "train": + continue + + face_folder = os.path.join(dir, name) + + if not os.path.isdir(face_folder): + continue + + face_embeddings_map[name] = [] + for image in os.listdir(face_folder): + img = cv2.imread(os.path.join(face_folder, image)) + + if img is None: + continue + + img = self.align_face(img, img.shape[1], img.shape[0]) + emb = self.face_embedder([img])[0].squeeze() + face_embeddings_map[name].append(emb) + + idx += 1 + + self.model_builder_queue.put(face_embeddings_map) + + thread = threading.Thread(target=build_model, daemon=True) + thread.start() def build(self): if not self.landmark_detector: self.init_landmark_detector() return None - labels = [] - faces = [] - idx = 0 - - dir = "/media/frigate/clips/faces" - for name in os.listdir(dir): - if name == "train": - continue - - face_folder = os.path.join(dir, name) - - if not os.path.isdir(face_folder): - continue - - self.label_map[idx] = name - for image in os.listdir(face_folder): - img = cv2.imread(os.path.join(face_folder, image)) - - if img is None: - continue - - img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) - img = self.align_face(img, img.shape[1], img.shape[0]) - faces.append(img) - labels.append(idx) - - idx += 1 - - if not faces: + if self.model_builder_queue is not None: + try: + face_embeddings_map: dict[str, list[np.ndarray]] = ( + self.model_builder_queue.get(timeout=0.1) + ) + self.model_builder_queue = None + except queue.Empty: + return + else: + self.run_build_task() return - self.recognizer: cv2.face.LBPHFaceRecognizer = ( - cv2.face.LBPHFaceRecognizer_create(radius=2, threshold=400) - ) - self.recognizer.train(faces, np.array(labels)) + if not face_embeddings_map: + return - def classify(self, face_image: np.ndarray) -> tuple[str, float] | None: + for name, embs in face_embeddings_map.items(): + if embs: + self.mean_embs[name] = stats.trim_mean(embs, 0.15) + + logger.debug("Finished building ArcFace model") + + def classify(self, face_image): if not self.landmark_detector: return None - if not self.label_map or not self.recognizer: + if not self.mean_embs: self.build() - if not self.recognizer: + if not self.mean_embs: return None # face recognition is best run on grayscale images - img = cv2.cvtColor(face_image, cv2.COLOR_BGR2GRAY) # get blur factor before aligning face - blur_factor = self.get_blur_factor(img) - logger.debug(f"face detected with bluriness {blur_factor}") + blur_factor = self.get_blur_factor(face_image) + logger.debug(f"face detected with blurriness {blur_factor}") # align face and run recognition - img = self.align_face(img, img.shape[1], img.shape[0]) - index, distance = self.recognizer.predict(img) + img = self.align_face(face_image, face_image.shape[1], face_image.shape[0]) + embedding = self.face_embedder([img])[0].squeeze() - if index == -1: - return None + score = 0 + label = "" - score = (1.0 - (distance / 1000)) * blur_factor - return self.label_map[index], round(score, 2) + for name, mean_emb in self.mean_embs.items(): + dot_product = np.dot(embedding, mean_emb) + magnitude_A = np.linalg.norm(embedding) + magnitude_B = np.linalg.norm(mean_emb) + + cosine_similarity = dot_product / (magnitude_A * magnitude_B) + confidence = similarity_to_confidence( + cosine_similarity, median=0.5, range_width=0.6 + ) + + if confidence > score: + score = confidence + label = name + + return label, round(score * blur_factor, 2) class ArcFaceRecognizer(FaceRecognizer): @@ -274,30 +331,6 @@ class ArcFaceRecognizer(FaceRecognizer): logger.debug("Finished building ArcFace model") - def similarity_to_confidence( - self, cosine_similarity: float, median=0.3, range_width=0.6, slope_factor=12 - ): - """ - Default sigmoid function to map cosine similarity to confidence. - - Args: - cosine_similarity (float): The input cosine similarity. - median (float): Assumed median of cosine similarity distribution. - range_width (float): Assumed range of cosine similarity distribution (90th percentile - 10th percentile). - slope_factor (float): Adjusts the steepness of the curve. - - Returns: - float: The confidence score. - """ - - # Calculate slope and bias - slope = slope_factor / range_width - bias = median - - # Calculate confidence - confidence = 1 / (1 + np.exp(-slope * (cosine_similarity - bias))) - return confidence - def classify(self, face_image): if not self.landmark_detector: return None @@ -312,7 +345,7 @@ class ArcFaceRecognizer(FaceRecognizer): # get blur factor before aligning face blur_factor = self.get_blur_factor(face_image) - logger.debug(f"face detected with bluriness {blur_factor}") + logger.debug(f"face detected with blurriness {blur_factor}") # align face and run recognition img = self.align_face(face_image, face_image.shape[1], face_image.shape[0]) @@ -327,7 +360,7 @@ class ArcFaceRecognizer(FaceRecognizer): magnitude_B = np.linalg.norm(mean_emb) cosine_similarity = dot_product / (magnitude_A * magnitude_B) - confidence = self.similarity_to_confidence(cosine_similarity) + confidence = similarity_to_confidence(cosine_similarity) if confidence > score: score = confidence diff --git a/frigate/data_processing/real_time/face.py b/frigate/data_processing/real_time/face.py index 9b479a527..e3ebff079 100644 --- a/frigate/data_processing/real_time/face.py +++ b/frigate/data_processing/real_time/face.py @@ -21,8 +21,8 @@ from frigate.config import FrigateConfig from frigate.const import FACE_DIR, MODEL_CACHE_DIR from frigate.data_processing.common.face.model import ( ArcFaceRecognizer, + FaceNetRecognizer, FaceRecognizer, - LBPHRecognizer, ) from frigate.util.image import area @@ -78,7 +78,7 @@ class FaceRealTimeProcessor(RealTimeProcessorApi): self.label_map: dict[int, str] = {} if self.face_config.model_size == "small": - self.recognizer = LBPHRecognizer(self.config) + self.recognizer = FaceNetRecognizer(self.config) else: self.recognizer = ArcFaceRecognizer(self.config) @@ -412,10 +412,6 @@ class FaceRealTimeProcessor(RealTimeProcessorApi): prominent_name = max(score_count) - # if a single name is not prominent in the history then we are not confident - if score_count[prominent_name] / len(results_list) < 0.65: - return "unknown", 0.0 - return prominent_name, weighted_scores[prominent_name] / total_face_areas[ prominent_name ] diff --git a/frigate/embeddings/__init__.py b/frigate/embeddings/__init__.py index c593a6c0d..e0673565b 100644 --- a/frigate/embeddings/__init__.py +++ b/frigate/embeddings/__init__.py @@ -236,6 +236,10 @@ class EmbeddingsContext: if len(os.listdir(folder)) == 0: os.rmdir(folder) + self.requestor.send_data( + EmbeddingsRequestEnum.clear_face_classifier.value, None + ) + def update_description(self, event_id: str, description: str) -> None: self.requestor.send_data( EmbeddingsRequestEnum.embed_description.value, diff --git a/frigate/embeddings/onnx/face_embedding.py b/frigate/embeddings/onnx/face_embedding.py index 0b808f716..860caab57 100644 --- a/frigate/embeddings/onnx/face_embedding.py +++ b/frigate/embeddings/onnx/face_embedding.py @@ -11,9 +11,105 @@ from frigate.util.downloader import ModelDownloader from .base_embedding import BaseEmbedding from .runner import ONNXModelRunner +try: + from tflite_runtime.interpreter import Interpreter +except ModuleNotFoundError: + from tensorflow.lite.python.interpreter import Interpreter + logger = logging.getLogger(__name__) -FACE_EMBEDDING_SIZE = 112 +ARCFACE_INPUT_SIZE = 112 +FACENET_INPUT_SIZE = 160 + + +class FaceNetEmbedding(BaseEmbedding): + def __init__( + self, + device: str = "AUTO", + ): + super().__init__( + model_name="facedet", + model_file="facenet.tflite", + download_urls={ + "facenet.tflite": "https://github.com/NickM-27/facenet-onnx/releases/download/v1.0/facenet.tflite", + }, + ) + self.device = device + self.download_path = os.path.join(MODEL_CACHE_DIR, self.model_name) + self.tokenizer = None + self.feature_extractor = None + self.runner = None + files_names = list(self.download_urls.keys()) + + if not all( + os.path.exists(os.path.join(self.download_path, n)) for n in files_names + ): + logger.debug(f"starting model download for {self.model_name}") + self.downloader = ModelDownloader( + model_name=self.model_name, + download_path=self.download_path, + file_names=files_names, + download_func=self._download_model, + ) + self.downloader.ensure_model_files() + else: + self.downloader = None + self._load_model_and_utils() + logger.debug(f"models are already downloaded for {self.model_name}") + + def _load_model_and_utils(self): + if self.runner is None: + if self.downloader: + self.downloader.wait_for_download() + + self.runner = Interpreter( + model_path=os.path.join(MODEL_CACHE_DIR, "facedet/facenet.tflite"), + num_threads=2, + ) + self.runner.allocate_tensors() + self.tensor_input_details = self.runner.get_input_details() + self.tensor_output_details = self.runner.get_output_details() + + def _preprocess_inputs(self, raw_inputs): + pil = self._process_image(raw_inputs[0]) + + # handle images larger than input size + width, height = pil.size + if width != FACENET_INPUT_SIZE or height != FACENET_INPUT_SIZE: + if width > height: + new_height = int(((height / width) * FACENET_INPUT_SIZE) // 4 * 4) + pil = pil.resize((FACENET_INPUT_SIZE, new_height)) + else: + new_width = int(((width / height) * FACENET_INPUT_SIZE) // 4 * 4) + pil = pil.resize((new_width, FACENET_INPUT_SIZE)) + + og = np.array(pil).astype(np.float32) + + # Image must be FACE_EMBEDDING_SIZExFACE_EMBEDDING_SIZE + og_h, og_w, channels = og.shape + frame = np.zeros( + (FACENET_INPUT_SIZE, FACENET_INPUT_SIZE, channels), dtype=np.float32 + ) + + # compute center offset + x_center = (FACENET_INPUT_SIZE - og_w) // 2 + y_center = (FACENET_INPUT_SIZE - og_h) // 2 + + # copy img image into center of result image + frame[y_center : y_center + og_h, x_center : x_center + og_w] = og + + # run facenet normalization + frame = (frame / 127.5) - 1.0 + + frame = np.expand_dims(frame, axis=0) + return frame + + def __call__(self, inputs): + self._load_model_and_utils() + processed = self._preprocess_inputs(inputs) + self.runner.set_tensor(self.tensor_input_details[0]["index"], processed) + self.runner.invoke() + return self.runner.get_tensor(self.tensor_output_details[0]["index"]) class ArcfaceEmbedding(BaseEmbedding): @@ -66,25 +162,25 @@ class ArcfaceEmbedding(BaseEmbedding): # handle images larger than input size width, height = pil.size - if width != FACE_EMBEDDING_SIZE or height != FACE_EMBEDDING_SIZE: + if width != ARCFACE_INPUT_SIZE or height != ARCFACE_INPUT_SIZE: if width > height: - new_height = int(((height / width) * FACE_EMBEDDING_SIZE) // 4 * 4) - pil = pil.resize((FACE_EMBEDDING_SIZE, new_height)) + new_height = int(((height / width) * ARCFACE_INPUT_SIZE) // 4 * 4) + pil = pil.resize((ARCFACE_INPUT_SIZE, new_height)) else: - new_width = int(((width / height) * FACE_EMBEDDING_SIZE) // 4 * 4) - pil = pil.resize((new_width, FACE_EMBEDDING_SIZE)) + new_width = int(((width / height) * ARCFACE_INPUT_SIZE) // 4 * 4) + pil = pil.resize((new_width, ARCFACE_INPUT_SIZE)) og = np.array(pil).astype(np.float32) # Image must be FACE_EMBEDDING_SIZExFACE_EMBEDDING_SIZE og_h, og_w, channels = og.shape frame = np.zeros( - (FACE_EMBEDDING_SIZE, FACE_EMBEDDING_SIZE, channels), dtype=np.float32 + (ARCFACE_INPUT_SIZE, ARCFACE_INPUT_SIZE, channels), dtype=np.float32 ) # compute center offset - x_center = (FACE_EMBEDDING_SIZE - og_w) // 2 - y_center = (FACE_EMBEDDING_SIZE - og_h) // 2 + x_center = (ARCFACE_INPUT_SIZE - og_w) // 2 + y_center = (ARCFACE_INPUT_SIZE - og_h) // 2 # copy img image into center of result image frame[y_center : y_center + og_h, x_center : x_center + og_w] = og diff --git a/web/public/locales/en/views/settings.json b/web/public/locales/en/views/settings.json index b1b70c8e5..4a7693416 100644 --- a/web/public/locales/en/views/settings.json +++ b/web/public/locales/en/views/settings.json @@ -113,11 +113,11 @@ "desc": "The size of the model used for face recognition.", "small": { "title": "small", - "desc": "Using small employs a Local Binary Pattern Histogram model via OpenCV that runs efficiently on most CPUs." + "desc": "Using small employs a FaceNet face embedding model that runs efficiently on most CPUs." }, "large": { "title": "large", - "desc": "Using large employs an ArcFace Face embedding model and will automatically run on the GPU if applicable." + "desc": "Using large employs an ArcFace face embedding model and will automatically run on the GPU if applicable." } } }, From 1233bc3a422b6b31f46e194d1fa1b67d3293ea54 Mon Sep 17 00:00:00 2001 From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> Date: Thu, 27 Mar 2025 06:49:14 -0500 Subject: [PATCH 69/97] Miscellaneous fixes (#17406) * add config validator for face and lpr * more lpr docs tweaks * fix object lifecycle point clicking for aspect ratios less than 16/9 * fix semantic search indexing i18n keys * remove ability to set system language * clarify debug output --- .../license_plate_recognition.md | 20 ++++++++++++++---- frigate/config/config.py | 20 +++++++++++++++++- .../common/license_plate/mixin.py | 4 +++- web/src/components/menu/GeneralSettings.tsx | 21 +------------------ .../overlay/detail/ObjectLifecycle.tsx | 1 - web/src/pages/Explore.tsx | 12 ++++++++--- 6 files changed, 48 insertions(+), 30 deletions(-) diff --git a/docs/docs/configuration/license_plate_recognition.md b/docs/docs/configuration/license_plate_recognition.md index e4c8c1167..13d45310e 100644 --- a/docs/docs/configuration/license_plate_recognition.md +++ b/docs/docs/configuration/license_plate_recognition.md @@ -40,14 +40,14 @@ lpr: enabled: True ``` -You can also enable it for specific cameras only at the camera level: +Like other enrichments in Frigate, LPR **must be enabled globally** to use the feature. You can disable it for specific cameras at the camera level: ```yaml cameras: driveway: ... lpr: - enabled: True + enabled: False ``` For non-dedicated LPR cameras, ensure that your camera is configured to detect objects of type `car`, and that a car is actually being detected by Frigate. Otherwise, LPR will not run. @@ -195,12 +195,16 @@ When using `type: "lpr"` for a camera, a non-standard object detection pipeline Ensure that: -- Your camera has a clear, human-readable, well-lit view of the plate. If you can't read the plate, Frigate certainly won't be able to. This may require changing video size, quality, or frame rate settings on your camera, depending on your scene and how fast the vehicles are traveling. +- Your camera has a clear, human-readable, well-lit view of the plate. If you can't read the plate's characters, Frigate certainly won't be able to, even if the model is recognizing a `license_plate`. This may require changing video size, quality, or frame rate settings on your camera, depending on your scene and how fast the vehicles are traveling. - The plate is large enough in the image (try adjusting `min_area`) or increasing the resolution of your camera's stream. If you are using a Frigate+ model or a custom model that detects license plates, ensure that `license_plate` is added to your list of objects to track. If you are using the free model that ships with Frigate, you should _not_ add `license_plate` to the list of objects to track. +Recognized plates will show as object labels in the debug view and will appear in the "Recognized License Plates" select box in the More Filters popout in Explore. + +If you are still having issues detecting plates, start with a basic configuration and see the debugging tips below. + ### Can I run LPR without detecting `car` objects? In normal LPR mode, Frigate requires a `car` to be detected first before recognizing a license plate. If you have a dedicated LPR camera, you can change the camera `type` to `"lpr"` to use the Dedicated LPR Camera algorithm. This comes with important caveats, though. See the [Dedicated LPR Cameras](#dedicated-lpr-cameras) section above. @@ -222,10 +226,18 @@ Use `match_distance` to allow small character mismatches. Alternatively, define ### How do I debug LPR issues? - View MQTT messages for `frigate/events` to verify detected plates. -- Adjust `detection_threshold` and `recognition_threshold` settings. - If you are using a Frigate+ model or a model that detects license plates, watch the debug view (Settings --> Debug) to ensure that `license_plate` is being detected with a `car`. +- Watch the debug view to see plates recognized in real-time. For non-dedicated LPR cameras, the `car` label will change to the recognized plate when LPR is enabled and working. +- Adjust `detection_threshold` and `recognition_threshold` settings per the suggestions [above](#advanced-configuration). - Enable debug logs for LPR by adding `frigate.data_processing.common.license_plate: debug` to your `logger` configuration. These logs are _very_ verbose, so only enable this when necessary. + ```yaml + logger: + default: info + logs: + frigate.data_processing.common.license_plate: debug + ``` + ### Will LPR slow down my system? LPR runs on the CPU, so performance impact depends on your hardware. Ensure you have at least 4GB RAM and a capable CPU for optimal results. If you are running the Dedicated LPR Camera mode, resource usage will be higher compared to users who run a model that natively detects license plates. Tune your motion detection settings for your dedicated LPR camera so that the license plate detection model runs only when necessary. diff --git a/frigate/config/config.py b/frigate/config/config.py index c27cd365d..7c0b669c8 100644 --- a/frigate/config/config.py +++ b/frigate/config/config.py @@ -292,13 +292,30 @@ def verify_autotrack_zones(camera_config: CameraConfig) -> ValueError | None: def verify_motion_and_detect(camera_config: CameraConfig) -> ValueError | None: - """Verify that required_zones are specified when autotracking is enabled.""" + """Verify that motion detection is not disabled and object detection is enabled.""" if camera_config.detect.enabled and not camera_config.motion.enabled: raise ValueError( f"Camera {camera_config.name} has motion detection disabled and object detection enabled but object detection requires motion detection." ) +def verify_lpr_and_face( + frigate_config: FrigateConfig, camera_config: CameraConfig +) -> ValueError | None: + """Verify that lpr and face are enabled at the global level if enabled at the camera level.""" + if camera_config.lpr.enabled and not frigate_config.lpr.enabled: + raise ValueError( + f"Camera {camera_config.name} has lpr enabled but lpr is disabled at the global level of the config. You must enable lpr at the global level." + ) + if ( + camera_config.face_recognition.enabled + and not frigate_config.face_recognition.enabled + ): + raise ValueError( + f"Camera {camera_config.name} has face_recognition enabled but face_recognition is disabled at the global level of the config. You must enable face_recognition at the global level." + ) + + class FrigateConfig(FrigateBaseModel): version: Optional[str] = Field(default=None, title="Current config version.") @@ -607,6 +624,7 @@ class FrigateConfig(FrigateBaseModel): verify_required_zones_exist(camera_config) verify_autotrack_zones(camera_config) verify_motion_and_detect(camera_config) + verify_lpr_and_face(self, camera_config) self.objects.parse_all_objects(self.cameras) self.model.create_colormap(sorted(self.objects.all_objects)) diff --git a/frigate/data_processing/common/license_plate/mixin.py b/frigate/data_processing/common/license_plate/mixin.py index 9cc988267..48c191876 100644 --- a/frigate/data_processing/common/license_plate/mixin.py +++ b/frigate/data_processing/common/license_plate/mixin.py @@ -814,7 +814,9 @@ class LicensePlateProcessingMixin: ] ).clip(0, [input.shape[1], input.shape[0]] * 2) - logger.debug(f"Found license plate: {expanded_box.astype(int)}") + logger.debug( + f"Found license plate. Bounding box: {expanded_box.astype(int)}" + ) return tuple(expanded_box.astype(int)) else: return None # No detection above the threshold diff --git a/web/src/components/menu/GeneralSettings.tsx b/web/src/components/menu/GeneralSettings.tsx index eac5f6384..f844b74ef 100644 --- a/web/src/components/menu/GeneralSettings.tsx +++ b/web/src/components/menu/GeneralSettings.tsx @@ -12,7 +12,6 @@ import { LuSettings, LuSun, LuSunMoon, - LuEarth, } from "react-icons/lu"; import { DropdownMenu, @@ -76,7 +75,7 @@ export default function GeneralSettings({ className }: GeneralSettingsProps) { // settings - const { language, setLanguage, systemLanguage } = useLanguage(); + const { language, setLanguage } = useLanguage(); const { theme, colorScheme, setTheme, setColorScheme } = useTheme(); const [restartDialogOpen, setRestartDialogOpen] = useState(false); const [passwordDialogOpen, setPasswordDialogOpen] = useState(false); @@ -352,24 +351,6 @@ export default function GeneralSettings({ className }: GeneralSettingsProps) { )} - setLanguage(systemLanguage)} - > - {language === systemLanguage ? ( - <> - - {t("menu.withSystem")} - - ) : ( - {t("menu.withSystem")} - )} - diff --git a/web/src/components/overlay/detail/ObjectLifecycle.tsx b/web/src/components/overlay/detail/ObjectLifecycle.tsx index 0f77ecfbf..6dca83362 100644 --- a/web/src/components/overlay/detail/ObjectLifecycle.tsx +++ b/web/src/components/overlay/detail/ObjectLifecycle.tsx @@ -365,7 +365,6 @@ export default function ObjectLifecycle({
- t("exploreIsUnavailable.embeddingsReindexing.step.thumbnailsEmbedded") + {t( + "exploreIsUnavailable.embeddingsReindexing.step.thumbnailsEmbedded", + )} {reindexState.thumbnails}
- t("exploreIsUnavailable.embeddingsReindexing.step.descriptionsEmbedded") + {t( + "exploreIsUnavailable.embeddingsReindexing.step.descriptionsEmbedded", + )} {reindexState.descriptions}
- t("exploreIsUnavailable.embeddingsReindexing.step.trackedObjectsProcessed") + {t( + "exploreIsUnavailable.embeddingsReindexing.step.trackedObjectsProcessed", + )} {reindexState.processed_objects} /{" "} {reindexState.total_objects} From a35146ab616efc4affd8c81b9bf9f33cdbc5ec9c Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Thu, 27 Mar 2025 08:28:09 -0600 Subject: [PATCH 70/97] Various fixes (#17411) * Remove initial requirement for history * Clenaup conf * Handle symlinks --- docker/tensorrt/Dockerfile.base | 17 +++++++++++++++-- .../rootfs/etc/ld.so.conf.d/cuda_tensorrt.conf | 3 +-- frigate/data_processing/real_time/face.py | 5 ----- 3 files changed, 16 insertions(+), 9 deletions(-) diff --git a/docker/tensorrt/Dockerfile.base b/docker/tensorrt/Dockerfile.base index bd8738792..79a7f3c98 100644 --- a/docker/tensorrt/Dockerfile.base +++ b/docker/tensorrt/Dockerfile.base @@ -20,8 +20,21 @@ RUN --mount=type=bind,source=docker/tensorrt/detector/tensorrt_libyolo.sh,target # COPY required individual CUDA deps RUN mkdir -p /usr/local/cuda-deps RUN if [ "$TARGETARCH" = "amd64" ]; then \ - cp /usr/local/cuda-12.3/targets/x86_64-linux/lib/libcurand.s* /usr/local/cuda-deps/ && \ - cp /usr/local/cuda-12.3/targets/x86_64-linux/lib/libnvrtc.s* /usr/local/cuda-deps/ ; \ + cp /usr/local/cuda-12.3/targets/x86_64-linux/lib/libcurand.so.* /usr/local/cuda-deps/ && \ + cp /usr/local/cuda-12.3/targets/x86_64-linux/lib/libnvrtc.so.* /usr/local/cuda-deps/ && \ + cd /usr/local/cuda-deps/ && \ + for lib in libnvrtc.so.*; do \ + if [[ "$lib" =~ libnvrtc.so\.([0-9]+\.[0-9]+\.[0-9]+) ]]; then \ + version="${BASH_REMATCH[1]}"; \ + ln -sf "libnvrtc.so.$version" libnvrtc.so; \ + fi; \ + done && \ + for lib in libcurand.so.*; do \ + if [[ "$lib" =~ libcurand.so\.([0-9]+\.[0-9]+\.[0-9]+\.[0-9]+) ]]; then \ + version="${BASH_REMATCH[1]}"; \ + ln -sf "libcurand.so.$version" libcurand.so; \ + fi; \ + done; \ fi # Frigate w/ TensorRT Support as separate image diff --git a/docker/tensorrt/detector/rootfs/etc/ld.so.conf.d/cuda_tensorrt.conf b/docker/tensorrt/detector/rootfs/etc/ld.so.conf.d/cuda_tensorrt.conf index 72eec56e0..f66af7dc6 100644 --- a/docker/tensorrt/detector/rootfs/etc/ld.so.conf.d/cuda_tensorrt.conf +++ b/docker/tensorrt/detector/rootfs/etc/ld.so.conf.d/cuda_tensorrt.conf @@ -1,8 +1,7 @@ /usr/local/lib /usr/local/cuda +/usr/local/lib/python3.11/dist-packages/tensorrt /usr/local/lib/python3.11/dist-packages/nvidia/cudnn/lib /usr/local/lib/python3.11/dist-packages/nvidia/cuda_runtime/lib /usr/local/lib/python3.11/dist-packages/nvidia/cublas/lib -/usr/local/lib/python3.11/dist-packages/nvidia/cuda_nvrtc/lib -/usr/local/lib/python3.11/dist-packages/tensorrt /usr/local/lib/python3.11/dist-packages/nvidia/cufft/lib \ No newline at end of file diff --git a/frigate/data_processing/real_time/face.py b/frigate/data_processing/real_time/face.py index e3ebff079..d3fbc83df 100644 --- a/frigate/data_processing/real_time/face.py +++ b/frigate/data_processing/real_time/face.py @@ -390,11 +390,6 @@ class FaceRealTimeProcessor(RealTimeProcessorApi): self.person_face_history.pop(object_id) def weighted_average_by_area(self, results_list: list[tuple[str, float, int]]): - min_faces = 1 if self.requires_face_detection else 3 - - if len(results_list) < min_faces: - return "unknown", 0.0 - score_count = {} weighted_scores = {} total_face_areas = {} From 6ec7d96ec9136b05cccd2c7e7404ac119cdc74ba Mon Sep 17 00:00:00 2001 From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> Date: Thu, 27 Mar 2025 09:49:10 -0500 Subject: [PATCH 71/97] remove LPR denoising (#17412) --- .../common/license_plate/mixin.py | 26 +------------------ 1 file changed, 1 insertion(+), 25 deletions(-) diff --git a/frigate/data_processing/common/license_plate/mixin.py b/frigate/data_processing/common/license_plate/mixin.py index 48c191876..60066c48d 100644 --- a/frigate/data_processing/common/license_plate/mixin.py +++ b/frigate/data_processing/common/license_plate/mixin.py @@ -634,37 +634,13 @@ class LicensePlateProcessingMixin: else: gray = image - # detect noise with Laplacian variance - laplacian = cv2.Laplacian(gray, cv2.CV_64F) - noise_variance = np.var(laplacian) - brightness = cv2.mean(gray)[0] - noise_threshold = 70 - brightness_threshold = 150 - is_noisy = ( - noise_variance > noise_threshold and brightness < brightness_threshold - ) - - # apply bilateral filter and sharpening only if noisy - if is_noisy: - logger.debug( - f"Noise detected (variance: {noise_variance:.1f}, brightness: {brightness:.1f}) - denoising" - ) - smoothed = cv2.bilateralFilter(gray, d=15, sigmaColor=100, sigmaSpace=100) - sharpening_kernel = np.array([[-1, -1, -1], [-1, 9, -1], [-1, -1, -1]]) - processed = cv2.filter2D(smoothed, -1, sharpening_kernel) - else: - logger.debug( - f"No noise detected (variance: {noise_variance:.1f}, brightness: {brightness:.1f}) - skipping denoising and sharpening" - ) - processed = gray - # apply CLAHE for contrast enhancement grid_size = ( max(4, input_w // 40), max(4, input_h // 40), ) clahe = cv2.createCLAHE(clipLimit=1.5, tileGridSize=grid_size) - enhanced = clahe.apply(processed) + enhanced = clahe.apply(gray) # Convert back to 3-channel for model compatibility image = cv2.cvtColor(enhanced, cv2.COLOR_GRAY2RGB) From 8978d1ff743cb4b05d1f5cb7ddf9d4d8c143226b Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Thu, 27 Mar 2025 09:50:41 -0600 Subject: [PATCH 72/97] Tweak face recognition docs (#17413) --- docs/docs/configuration/face_recognition.md | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/docs/docs/configuration/face_recognition.md b/docs/docs/configuration/face_recognition.md index 278c592c0..9fb5c9a44 100644 --- a/docs/docs/configuration/face_recognition.md +++ b/docs/docs/configuration/face_recognition.md @@ -31,6 +31,7 @@ In both cases a lightweight face landmark detection model is also used to align ## Minimum System Requirements The `small` model is optimized for efficiency and runs on the CPU, most CPUs should run the model efficiently. + The `large` model is optimized for accuracy, an integrated or discrete GPU is highly recommended. ## Configuration @@ -65,7 +66,7 @@ Fine-tune face recognition with these optional parameters: - `blur_confidence_filter`: Enables a filter that calculates how blurry the face is and adjusts the confidence based on this. - Default: `True`. -## Dataset +## Creating a Robust Training Set The number of images needed for a sufficient training set for face recognition varies depending on several factors: @@ -74,11 +75,9 @@ The number of images needed for a sufficient training set for face recognition v However, here are some general guidelines: -- Minimum: For basic face recognition tasks, a minimum of 10-20 images per person is often recommended. -- Recommended: For more robust and accurate systems, 30-50 images per person is a good starting point. -- Ideal: For optimal performance, especially in challenging conditions, 100 or more images per person can be beneficial. - -## Creating a Robust Training Set +- Minimum: For basic face recognition tasks, a minimum of 5-10 images per person is often recommended. +- Recommended: For more robust and accurate systems, 20-30 images per person is a good starting point. +- Ideal: For optimal performance, especially in challenging conditions, 50-100 images per person can be beneficial. The accuracy of face recognition is heavily dependent on the quality of data given to it for training. It is recommended to build the face training library in phases. @@ -89,7 +88,8 @@ When choosing images to include in the face training set it is recommended to al - If it is difficult to make out details in a persons face it will not be helpful in training. - Avoid images with extreme under/over-exposure. - Avoid blurry / pixelated images. -- Be careful when uploading images of people when they are wearing clothing that covers a lot of their face as this may confuse the model. +- Avoid training on infrared (grayscale). The models are trained on color images and will be able to extract features from grayscale images. +- Using images of people wearing hats / sunglasses may confuse the model. - Do not upload too many similar images at the same time, it is recommended to train no more than 4-6 similar images for each person to avoid overfitting. ::: @@ -124,4 +124,4 @@ This can happen for a few different reasons, but this is usually an indicator th ### I see scores above the threshold in the train tab, but a sub label wasn't assigned? -The Frigate face recognizer collects face recognition scores from all of the frames across the person objects lifecycle. The scores are continually weighted based on the area of the face, and a sub label will only be assigned to person if there is a prominent person recognized. This avoids cases where a single high confidence recognition result would throw off the results. +The Frigate considers the recognition scores across all recogntion attempts for each person object. The scores are continually weighted based on the area of the face, and a sub label will only be assigned to person if a person is confidently recognized consistently. This avoids cases where a single high confidence recognition would throw off the results. From ccf20f456a21be15fd78414231c7ff02923830be Mon Sep 17 00:00:00 2001 From: aptalca <541623+aptalca@users.noreply.github.com> Date: Thu, 27 Mar 2025 12:33:03 -0400 Subject: [PATCH 73/97] update YOLO_NAS notebook (#17414) Google Colab updated to python 3.11 super-gradients v3.7.1 is not compatible with py3.11 and install fails super-gradients committed a fix to master branch but did not cut a relase since (acquired by Nvidia in the meantime) This commit installs super-gradients from master branch --- notebooks/YOLO_NAS_Pretrained_Export.ipynb | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/notebooks/YOLO_NAS_Pretrained_Export.ipynb b/notebooks/YOLO_NAS_Pretrained_Export.ipynb index e4e2222da..4fee19183 100644 --- a/notebooks/YOLO_NAS_Pretrained_Export.ipynb +++ b/notebooks/YOLO_NAS_Pretrained_Export.ipynb @@ -8,14 +8,14 @@ }, "outputs": [], "source": [ - "! pip install -q super_gradients==3.7.1" + "! pip install -q git+https://github.com/Deci-AI/super-gradients.git" ] }, { "cell_type": "code", "source": [ - "! sed -i 's/sghub.deci.ai/sg-hub-nv.s3.amazonaws.com/' /usr/local/lib/python3.10/dist-packages/super_gradients/training/pretrained_models.py\n", - "! sed -i 's/sghub.deci.ai/sg-hub-nv.s3.amazonaws.com/' /usr/local/lib/python3.10/dist-packages/super_gradients/training/utils/checkpoint_utils.py" + "! sed -i 's/sghub.deci.ai/sg-hub-nv.s3.amazonaws.com/' /usr/local/lib/python3.11/dist-packages/super_gradients/training/pretrained_models.py\n", + "! sed -i 's/sghub.deci.ai/sg-hub-nv.s3.amazonaws.com/' /usr/local/lib/python3.11/dist-packages/super_gradients/training/utils/checkpoint_utils.py" ], "metadata": { "id": "NiRCt917KKcL" @@ -84,4 +84,4 @@ }, "nbformat": 4, "nbformat_minor": 0 -} \ No newline at end of file +} From 67dd50a7f7940e6346aaa528d17f96522c7885a2 Mon Sep 17 00:00:00 2001 From: leccelecce <24962424+leccelecce@users.noreply.github.com> Date: Thu, 27 Mar 2025 16:33:49 +0000 Subject: [PATCH 74/97] Devcontainer: update Mosquitto from 1.6 to 2.0 (#17415) --- docker-compose.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docker-compose.yml b/docker-compose.yml index 168e7fd10..db63297d5 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -36,6 +36,7 @@ services: # - /dev/bus/usb:/dev/bus/usb # Uncomment for Google Coral USB mqtt: container_name: mqtt - image: eclipse-mosquitto:1.6 + image: eclipse-mosquitto:2.0 + command: mosquitto -c /mosquitto-no-auth.conf # enable no-auth mode ports: - "1883:1883" From 23c332387127bd03220c52f9da4a88ebc5ef7fba Mon Sep 17 00:00:00 2001 From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> Date: Thu, 27 Mar 2025 12:29:34 -0500 Subject: [PATCH 75/97] Dynamic embeddings reindexing (#17418) * reindex with api endpoint and zmq * threading * frontend * require admin role --- frigate/api/classification.py | 46 +++++ frigate/comms/embeddings_updater.py | 1 + frigate/embeddings/__init__.py | 3 + frigate/embeddings/embeddings.py | 29 +++ frigate/embeddings/maintainer.py | 3 + web/public/locales/en/views/settings.json | 12 +- .../settings/ClassificationSettingsView.tsx | 172 +++++++++++------- 7 files changed, 193 insertions(+), 73 deletions(-) diff --git a/frigate/api/classification.py b/frigate/api/classification.py index 975a41c9d..4a6969cd3 100644 --- a/frigate/api/classification.py +++ b/frigate/api/classification.py @@ -298,3 +298,49 @@ def reprocess_license_plate(request: Request, event_id: str): content=response, status_code=200, ) + + +@router.put("/reindex", dependencies=[Depends(require_role(["admin"]))]) +def reindex_embeddings(request: Request): + if not request.app.frigate_config.semantic_search.enabled: + message = ( + "Cannot reindex tracked object embeddings, Semantic Search is not enabled." + ) + logger.error(message) + return JSONResponse( + content=( + { + "success": False, + "message": message, + } + ), + status_code=400, + ) + + context: EmbeddingsContext = request.app.embeddings + response = context.reindex_embeddings() + + if response == "started": + return JSONResponse( + content={ + "success": True, + "message": "Embeddings reindexing has started.", + }, + status_code=202, # 202 Accepted + ) + elif response == "in_progress": + return JSONResponse( + content={ + "success": False, + "message": "Embeddings reindexing is already in progress.", + }, + status_code=409, # 409 Conflict + ) + else: + return JSONResponse( + content={ + "success": False, + "message": "Failed to start reindexing.", + }, + status_code=500, + ) diff --git a/frigate/comms/embeddings_updater.py b/frigate/comms/embeddings_updater.py index fc35c4665..6c26af3d1 100644 --- a/frigate/comms/embeddings_updater.py +++ b/frigate/comms/embeddings_updater.py @@ -17,6 +17,7 @@ class EmbeddingsRequestEnum(Enum): register_face = "register_face" reprocess_face = "reprocess_face" reprocess_plate = "reprocess_plate" + reindex = "reindex" class EmbeddingsResponder: diff --git a/frigate/embeddings/__init__.py b/frigate/embeddings/__init__.py index e0673565b..c60465845 100644 --- a/frigate/embeddings/__init__.py +++ b/frigate/embeddings/__init__.py @@ -250,3 +250,6 @@ class EmbeddingsContext: return self.requestor.send_data( EmbeddingsRequestEnum.reprocess_plate.value, {"event": event} ) + + def reindex_embeddings(self) -> dict[str, any]: + return self.requestor.send_data(EmbeddingsRequestEnum.reindex.value, {}) diff --git a/frigate/embeddings/embeddings.py b/frigate/embeddings/embeddings.py index 7e866d1fe..d2053f5ee 100644 --- a/frigate/embeddings/embeddings.py +++ b/frigate/embeddings/embeddings.py @@ -3,6 +3,7 @@ import datetime import logging import os +import threading import time from numpy import ndarray @@ -74,6 +75,10 @@ class Embeddings: self.metrics = metrics self.requestor = InterProcessRequestor() + self.reindex_lock = threading.Lock() + self.reindex_thread = None + self.reindex_running = False + # Create tables if they don't exist self.db.create_embeddings_tables() @@ -368,3 +373,27 @@ class Embeddings: totals["status"] = "completed" self.requestor.send_data(UPDATE_EMBEDDINGS_REINDEX_PROGRESS, totals) + + def start_reindex(self) -> bool: + """Start reindexing in a separate thread if not already running.""" + with self.reindex_lock: + if self.reindex_running: + logger.warning("Reindex embeddings is already running.") + return False + + # Mark as running and start the thread + self.reindex_running = True + self.reindex_thread = threading.Thread( + target=self._reindex_wrapper, daemon=True + ) + self.reindex_thread.start() + return True + + def _reindex_wrapper(self) -> None: + """Wrapper to run reindex and reset running flag when done.""" + try: + self.reindex() + finally: + with self.reindex_lock: + self.reindex_running = False + self.reindex_thread = None diff --git a/frigate/embeddings/maintainer.py b/frigate/embeddings/maintainer.py index 9b90f6f2c..85b0e6d54 100644 --- a/frigate/embeddings/maintainer.py +++ b/frigate/embeddings/maintainer.py @@ -206,6 +206,9 @@ class EmbeddingMaintainer(threading.Thread): self.embeddings.embed_description("", data, upsert=False), pack=False, ) + elif topic == EmbeddingsRequestEnum.reindex.value: + response = self.embeddings.start_reindex() + return "started" if response else "in_progress" processors = [self.realtime_processors, self.post_processors] for processor_list in processors: diff --git a/web/public/locales/en/views/settings.json b/web/public/locales/en/views/settings.json index 4a7693416..f6c5b2e99 100644 --- a/web/public/locales/en/views/settings.json +++ b/web/public/locales/en/views/settings.json @@ -87,9 +87,15 @@ "title": "Semantic Search", "desc": "Semantic Search in Frigate allows you to find tracked objects within your review items using either the image itself, a user-defined text description, or an automatically generated one.", "readTheDocumentation": "Read the Documentation", - "reindexOnStartup": { - "label": "Re-Index On Startup", - "desc": "Re-indexing will reprocess all thumbnails and descriptions (if enabled) and apply the embeddings on each startup. Don't forget to disable the option after restarting!" + "reindexNow": { + "label": "Reindex Now", + "desc": "Reindexing will regenerate embeddings for all tracked object. This process runs in the background and may max out your CPU and take a fair amount of time depending on the number of tracked objects you have.", + "confirmTitle": "Confirm Reindexing", + "confirmDesc": "Are you sure you want to reindex all tracked object embeddings? This process will run in the background but it may max out your CPU and take a fair amount of time. You can watch the progress on the Explore page.", + "confirmButton": "Reindex", + "success": "Reindexing started successfully.", + "alreadyInProgress": "Reindexing is already in progress.", + "error": "Failed to start reindexing: {{errorMessage}}" }, "modelSize": { "label": "Model Size", diff --git a/web/src/views/settings/ClassificationSettingsView.tsx b/web/src/views/settings/ClassificationSettingsView.tsx index 24c3a9107..d12008f8f 100644 --- a/web/src/views/settings/ClassificationSettingsView.tsx +++ b/web/src/views/settings/ClassificationSettingsView.tsx @@ -21,11 +21,21 @@ import { SelectTrigger, } from "@/components/ui/select"; import { Trans, useTranslation } from "react-i18next"; +import { + AlertDialog, + AlertDialogAction, + AlertDialogCancel, + AlertDialogContent, + AlertDialogDescription, + AlertDialogFooter, + AlertDialogHeader, + AlertDialogTitle, +} from "@/components/ui/alert-dialog"; +import { buttonVariants } from "@/components/ui/button"; type ClassificationSettings = { search: { enabled?: boolean; - reindex?: boolean; model_size?: SearchModelSize; }; face: { @@ -48,39 +58,22 @@ export default function ClassificationSettingsView({ useSWR("config"); const [changedValue, setChangedValue] = useState(false); const [isLoading, setIsLoading] = useState(false); + const [isReindexDialogOpen, setIsReindexDialogOpen] = useState(false); const { addMessage, removeMessage } = useContext(StatusBarMessagesContext)!; const [classificationSettings, setClassificationSettings] = useState({ - search: { - enabled: undefined, - reindex: undefined, - model_size: undefined, - }, - face: { - enabled: undefined, - model_size: undefined, - }, - lpr: { - enabled: undefined, - }, + search: { enabled: undefined, model_size: undefined }, + face: { enabled: undefined, model_size: undefined }, + lpr: { enabled: undefined }, }); const [origSearchSettings, setOrigSearchSettings] = useState({ - search: { - enabled: undefined, - reindex: undefined, - model_size: undefined, - }, - face: { - enabled: undefined, - model_size: undefined, - }, - lpr: { - enabled: undefined, - }, + search: { enabled: undefined, model_size: undefined }, + face: { enabled: undefined, model_size: undefined }, + lpr: { enabled: undefined }, }); useEffect(() => { @@ -89,32 +82,26 @@ export default function ClassificationSettingsView({ setClassificationSettings({ search: { enabled: config.semantic_search.enabled, - reindex: config.semantic_search.reindex, model_size: config.semantic_search.model_size, }, face: { enabled: config.face_recognition.enabled, model_size: config.face_recognition.model_size, }, - lpr: { - enabled: config.lpr.enabled, - }, + lpr: { enabled: config.lpr.enabled }, }); } setOrigSearchSettings({ search: { enabled: config.semantic_search.enabled, - reindex: config.semantic_search.reindex, model_size: config.semantic_search.model_size, }, face: { enabled: config.face_recognition.enabled, model_size: config.face_recognition.model_size, }, - lpr: { - enabled: config.lpr.enabled, - }, + lpr: { enabled: config.lpr.enabled }, }); } // we know that these deps are correct @@ -125,10 +112,7 @@ export default function ClassificationSettingsView({ newConfig: Partial, ) => { setClassificationSettings((prevConfig) => ({ - search: { - ...prevConfig.search, - ...newConfig.search, - }, + search: { ...prevConfig.search, ...newConfig.search }, face: { ...prevConfig.face, ...newConfig.face }, lpr: { ...prevConfig.lpr, ...newConfig.lpr }, })); @@ -141,10 +125,8 @@ export default function ClassificationSettingsView({ axios .put( - `config/set?semantic_search.enabled=${classificationSettings.search.enabled ? "True" : "False"}&semantic_search.reindex=${classificationSettings.search.reindex ? "True" : "False"}&semantic_search.model_size=${classificationSettings.search.model_size}&face_recognition.enabled=${classificationSettings.face.enabled ? "True" : "False"}&face_recognition.model_size=${classificationSettings.face.model_size}&lpr.enabled=${classificationSettings.lpr.enabled ? "True" : "False"}`, - { - requires_restart: 0, - }, + `config/set?semantic_search.enabled=${classificationSettings.search.enabled ? "True" : "False"}&semantic_search.model_size=${classificationSettings.search.model_size}&face_recognition.enabled=${classificationSettings.face.enabled ? "True" : "False"}&face_recognition.model_size=${classificationSettings.face.model_size}&lpr.enabled=${classificationSettings.lpr.enabled ? "True" : "False"}`, + { requires_restart: 0 }, ) .then((res) => { if (res.status === 200) { @@ -156,9 +138,7 @@ export default function ClassificationSettingsView({ } else { toast.error( t("classification.toast.error", { errorMessage: res.statusText }), - { - position: "top-center", - }, + { position: "top-center" }, ); } }) @@ -169,9 +149,7 @@ export default function ClassificationSettingsView({ "Unknown error"; toast.error( t("toast.save.error.title", { errorMessage, ns: "common" }), - { - position: "top-center", - }, + { position: "top-center" }, ); }) .finally(() => { @@ -191,6 +169,43 @@ export default function ClassificationSettingsView({ removeMessage("search_settings", "search_settings"); }, [origSearchSettings, removeMessage]); + const onReindex = useCallback(() => { + setIsLoading(true); + + axios + .put("/reindex") + .then((res) => { + if (res.status === 202) { + toast.success(t("classification.semanticSearch.reindexNow.success"), { + position: "top-center", + }); + } else { + toast.error( + t("classification.semanticSearch.reindexNow.error", { + errorMessage: res.statusText, + }), + { position: "top-center" }, + ); + } + }) + .catch((error) => { + const errorMessage = + error.response?.data?.message || + error.response?.data?.detail || + "Unknown error"; + toast.error( + t("classification.semanticSearch.reindexNow.error", { + errorMessage, + }), + { position: "top-center" }, + ); + }) + .finally(() => { + setIsLoading(false); + setIsReindexDialogOpen(false); + }); + }, [t]); + useEffect(() => { if (changedValue) { addMessage( @@ -262,28 +277,18 @@ export default function ClassificationSettingsView({
-
-
- { - handleClassificationConfigChange({ - search: { reindex: isChecked }, - }); - }} - /> -
- -
-
+
+
- classification.semanticSearch.reindexOnStartup.desc + classification.semanticSearch.reindexNow.desc
@@ -316,9 +321,7 @@ export default function ClassificationSettingsView({ value={classificationSettings.search.model_size} onValueChange={(value) => handleClassificationConfigChange({ - search: { - model_size: value as SearchModelSize, - }, + search: { model_size: value as SearchModelSize }, }) } > @@ -346,6 +349,35 @@ export default function ClassificationSettingsView({
+ + + + + {t("classification.semanticSearch.reindexNow.confirmTitle")} + + + + classification.semanticSearch.reindexNow.confirmDesc + + + + + setIsReindexDialogOpen(false)}> + {t("button.cancel", { ns: "common" })} + + + {t("classification.semanticSearch.reindexNow.confirmButton")} + + + + +
From e1a40534262e7dfe6632ca4026d3129b563f64c2 Mon Sep 17 00:00:00 2001 From: leccelecce <24962424+leccelecce@users.noreply.github.com> Date: Thu, 27 Mar 2025 18:48:43 +0000 Subject: [PATCH 76/97] Upgrade bundled nginx to 1.27.4 (#17419) --- docker/main/build_nginx.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/main/build_nginx.sh b/docker/main/build_nginx.sh index 2591810e3..606682665 100755 --- a/docker/main/build_nginx.sh +++ b/docker/main/build_nginx.sh @@ -2,7 +2,7 @@ set -euxo pipefail -NGINX_VERSION="1.25.3" +NGINX_VERSION="1.27.4" VOD_MODULE_VERSION="1.31" SECURE_TOKEN_MODULE_VERSION="1.5" SET_MISC_MODULE_VERSION="v0.33" From 3f1b4438e4f42dbc8f63b21f78c39a07d98e357e Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Thu, 27 Mar 2025 13:41:22 -0600 Subject: [PATCH 77/97] Ensure landmark detector has a defualt value (#17420) --- frigate/data_processing/common/face/model.py | 1 + 1 file changed, 1 insertion(+) diff --git a/frigate/data_processing/common/face/model.py b/frigate/data_processing/common/face/model.py index eb27df68d..0aeb76792 100644 --- a/frigate/data_processing/common/face/model.py +++ b/frigate/data_processing/common/face/model.py @@ -20,6 +20,7 @@ class FaceRecognizer(ABC): def __init__(self, config: FrigateConfig) -> None: self.config = config + self.landmark_detector: cv2.face.FacemarkLBF = None self.init_landmark_detector() @abstractmethod From 37e0b9b9049ce4bfc08ef0eb1872182a162aa254 Mon Sep 17 00:00:00 2001 From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> Date: Fri, 28 Mar 2025 07:29:11 -0500 Subject: [PATCH 78/97] LPR tweaks (#17428) * fix snapshot when using dedicated lpr * enhancement and debugging config * docs --- .../license_plate_recognition.md | 19 ++++- docs/docs/configuration/reference.md | 7 +- frigate/config/classification.py | 16 ++++ .../common/license_plate/mixin.py | 77 +++++++++++++++---- 4 files changed, 102 insertions(+), 17 deletions(-) diff --git a/docs/docs/configuration/license_plate_recognition.md b/docs/docs/configuration/license_plate_recognition.md index 13d45310e..f3581e598 100644 --- a/docs/docs/configuration/license_plate_recognition.md +++ b/docs/docs/configuration/license_plate_recognition.md @@ -87,6 +87,20 @@ Fine-tune the LPR feature using these optional parameters: - For example, setting `match_distance: 1` allows a plate `ABCDE` to match `ABCBE` or `ABCD`. - This parameter will _not_ operate on known plates that are defined as regular expressions. You should define the full string of your plate in `known_plates` in order to use `match_distance`. +### Image Enhancement + +- **`enhancement`**: A value between **0 and 10** that adjusts the level of image enhancement applied to captured license plates before they are processed for recognition. This preprocessing step can sometimes improve accuracy but may also have the opposite effect. + - **Default:** `0` (no enhancement) + - Higher values increase contrast, sharpen details, and reduce noise, but excessive enhancement can blur or distort characters, actually making them much harder for Frigate to recognize. + - This setting is best adjusted **at the camera level** if running LPR on multiple cameras. + - If Frigate is already recognizing plates correctly, leave this setting at the default of `0`. However, if you're experiencing frequent character issues or incomplete plates and you can already easily read the plates yourself, try increasing the value gradually, starting at **5** and adjusting as needed. To preview how different enhancement levels affect your plates, use the `debug_save_plates` configuration option (see below). + +### Debugging + +- **`debug_save_plates`**: Set to `True` to save captured text on plates for debugging. These images are stored in `/media/frigate/clips/lpr`, organized into subdirectories by `/`, and named based on the capture timestamp. + - These saved images are not full plates but rather the specific areas of text detected on the plates. It is normal for the text detection model to sometimes find multiple areas of text on the plate. Use them to analyze what text Frigate recognized and how image enhancement affects detection. + - **Note:** Frigate does **not** automatically delete these debug images. Once LPR is functioning correctly, you should disable this option and manually remove the saved files to free up storage. + ## Configuration Examples These configuration parameters are available at the global level of your config. The only optional parameters that should be set at the camera level are `enabled` and `min_area`. @@ -143,6 +157,7 @@ cameras: lpr: enabled: True expire_time: 3 # optional, default + enhancement: 3 # optional, enhance the image before trying to recognize characters ffmpeg: ... detect: enabled: False # optional, disable Frigate's standard object detection pipeline @@ -151,7 +166,7 @@ cameras: height: 1080 motion: threshold: 30 - contour_area: 80 # use an increased value here to tune out small motion changes + contour_area: 60 # use an increased value here to tune out small motion changes improve_contrast: false mask: 0.704,0.007,0.709,0.052,0.989,0.055,0.993,0.001 # ensure your camera's timestamp is masked record: @@ -197,6 +212,7 @@ Ensure that: - Your camera has a clear, human-readable, well-lit view of the plate. If you can't read the plate's characters, Frigate certainly won't be able to, even if the model is recognizing a `license_plate`. This may require changing video size, quality, or frame rate settings on your camera, depending on your scene and how fast the vehicles are traveling. - The plate is large enough in the image (try adjusting `min_area`) or increasing the resolution of your camera's stream. +- Your `enhancement` level (if you've changed it from the default of `0`) is not too high. Too much enhancement will run too much denoising and cause the plate characters to become blurry and unreadable. If you are using a Frigate+ model or a custom model that detects license plates, ensure that `license_plate` is added to your list of objects to track. If you are using the free model that ships with Frigate, you should _not_ add `license_plate` to the list of objects to track. @@ -229,6 +245,7 @@ Use `match_distance` to allow small character mismatches. Alternatively, define - If you are using a Frigate+ model or a model that detects license plates, watch the debug view (Settings --> Debug) to ensure that `license_plate` is being detected with a `car`. - Watch the debug view to see plates recognized in real-time. For non-dedicated LPR cameras, the `car` label will change to the recognized plate when LPR is enabled and working. - Adjust `detection_threshold` and `recognition_threshold` settings per the suggestions [above](#advanced-configuration). +- Enable `debug_save_plates` to save images of detected text on plates to the clips directory (`/media/frigate/clips/lpr`). - Enable debug logs for LPR by adding `frigate.data_processing.common.license_plate: debug` to your `logger` configuration. These logs are _very_ verbose, so only enable this when necessary. ```yaml diff --git a/docs/docs/configuration/reference.md b/docs/docs/configuration/reference.md index e30ee1619..81ca1becc 100644 --- a/docs/docs/configuration/reference.md +++ b/docs/docs/configuration/reference.md @@ -562,7 +562,7 @@ face_recognition: blur_confidence_filter: True # Optional: Configuration for license plate recognition capability -# NOTE: enabled and min_area can be overridden at the camera level +# NOTE: enabled, min_area, and enhancement can be overridden at the camera level lpr: # Optional: Enable license plate recognition (default: shown below) enabled: False @@ -580,6 +580,11 @@ lpr: match_distance: 1 # Optional: Known plates to track (strings or regular expressions) (default: shown below) known_plates: {} + # Optional: Enhance the detected plate image with contrast adjustment and denoising (default: shown below) + # A value between 0 and 10. Higher values are not always better and may perform worse than lower values. + enhancement: 0 + # Optional: Save plate images to /media/frigate/clips/lpr for debugging purposes (default: shown below) + debug_save_plates: False # Optional: Configuration for AI generated tracked object descriptions # WARNING: Depending on the provider, this will send thumbnails over the internet diff --git a/frigate/config/classification.py b/frigate/config/classification.py index aecbf6537..4026106b2 100644 --- a/frigate/config/classification.py +++ b/frigate/config/classification.py @@ -126,6 +126,16 @@ class LicensePlateRecognitionConfig(FrigateBaseModel): known_plates: Optional[Dict[str, List[str]]] = Field( default={}, title="Known plates to track (strings or regular expressions)." ) + enhancement: int = Field( + default=0, + title="Amount of contrast adjustment and denoising to apply to license plate images before recognition.", + ge=0, + le=10, + ) + debug_save_plates: bool = Field( + default=False, + title="Save plates captured for LPR for debugging purposes.", + ) class CameraLicensePlateRecognitionConfig(FrigateBaseModel): @@ -139,5 +149,11 @@ class CameraLicensePlateRecognitionConfig(FrigateBaseModel): default=1000, title="Minimum area of license plate to begin running recognition.", ) + enhancement: int = Field( + default=0, + title="Amount of contrast adjustment and denoising to apply to license plate images before recognition.", + ge=0, + le=10, + ) model_config = ConfigDict(extra="ignore", protected_namespaces=()) diff --git a/frigate/data_processing/common/license_plate/mixin.py b/frigate/data_processing/common/license_plate/mixin.py index 60066c48d..3d24d48d5 100644 --- a/frigate/data_processing/common/license_plate/mixin.py +++ b/frigate/data_processing/common/license_plate/mixin.py @@ -4,9 +4,11 @@ import base64 import datetime import logging import math +import os import random import re import string +from pathlib import Path from typing import List, Optional, Tuple import cv2 @@ -20,6 +22,7 @@ from frigate.comms.event_metadata_updater import ( EventMetadataTypeEnum, ) from frigate.config.camera.camera import CameraTypeEnum +from frigate.const import CLIPS_DIR from frigate.embeddings.onnx.lpr_embedding import LPR_EMBEDDING_SIZE from frigate.util.image import area @@ -107,7 +110,7 @@ class LicensePlateProcessingMixin: return self._process_classification_output(images, outputs) def _recognize( - self, images: List[np.ndarray] + self, camera: string, images: List[np.ndarray] ) -> Tuple[List[str], List[List[float]]]: """ Recognize the characters on the detected license plates using the recognition model. @@ -137,7 +140,7 @@ class LicensePlateProcessingMixin: # preprocess the images based on the max aspect ratio for i in range(index, min(num_images, index + self.batch_size)): norm_image = self._preprocess_recognition_image( - images[indices[i]], max_wh_ratio + camera, images[indices[i]], max_wh_ratio ) norm_image = norm_image[np.newaxis, :] norm_images.append(norm_image) @@ -146,7 +149,7 @@ class LicensePlateProcessingMixin: return self.ctc_decoder(outputs) def _process_license_plate( - self, image: np.ndarray + self, camera: string, id: string, image: np.ndarray ) -> Tuple[List[str], List[float], List[int]]: """ Complete pipeline for detecting, classifying, and recognizing license plates in the input image. @@ -174,21 +177,37 @@ class LicensePlateProcessingMixin: boxes = self._sort_boxes(list(boxes)) plate_images = [self._crop_license_plate(image, x) for x in boxes] + current_time = int(datetime.datetime.now().timestamp()) + if WRITE_DEBUG_IMAGES: - current_time = int(datetime.datetime.now().timestamp()) for i, img in enumerate(plate_images): cv2.imwrite( f"debug/frames/license_plate_cropped_{current_time}_{i + 1}.jpg", img, ) + if self.config.lpr.debug_save_plates: + logger.debug(f"{camera}: Saving plates for event {id}") + + Path(os.path.join(CLIPS_DIR, f"lpr/{camera}/{id}")).mkdir( + parents=True, exist_ok=True + ) + + for i, img in enumerate(plate_images): + cv2.imwrite( + os.path.join( + CLIPS_DIR, f"lpr/{camera}/{id}/{current_time}_{i + 1}.jpg" + ), + img, + ) + # keep track of the index of each image for correct area calc later sorted_indices = np.argsort([x.shape[1] / x.shape[0] for x in plate_images]) reverse_mapping = { idx: original_idx for original_idx, idx in enumerate(sorted_indices) } - results, confidences = self._recognize(plate_images) + results, confidences = self._recognize(camera, plate_images) if results: license_plates = [""] * len(plate_images) @@ -606,7 +625,7 @@ class LicensePlateProcessingMixin: return images, results def _preprocess_recognition_image( - self, image: np.ndarray, max_wh_ratio: float + self, camera: string, image: np.ndarray, max_wh_ratio: float ) -> np.ndarray: """ Preprocess an image for recognition by dynamically adjusting its width. @@ -634,13 +653,38 @@ class LicensePlateProcessingMixin: else: gray = image - # apply CLAHE for contrast enhancement - grid_size = ( - max(4, input_w // 40), - max(4, input_h // 40), - ) - clahe = cv2.createCLAHE(clipLimit=1.5, tileGridSize=grid_size) - enhanced = clahe.apply(gray) + if self.config.cameras[camera].lpr.enhancement > 3: + # denoise using a configurable pixel neighborhood value + logger.debug( + f"{camera}: Denoising recognition image (level: {self.config.cameras[camera].lpr.enhancement})" + ) + smoothed = cv2.bilateralFilter( + gray, + d=5 + self.config.cameras[camera].lpr.enhancement, + sigmaColor=10 * self.config.cameras[camera].lpr.enhancement, + sigmaSpace=10 * self.config.cameras[camera].lpr.enhancement, + ) + sharpening_kernel = np.array([[-1, -1, -1], [-1, 9, -1], [-1, -1, -1]]) + processed = cv2.filter2D(smoothed, -1, sharpening_kernel) + else: + processed = gray + + if self.config.cameras[camera].lpr.enhancement > 0: + # always apply the same CLAHE for contrast enhancement when enhancement level is above 3 + logger.debug( + f"{camera}: Enhancing contrast for recognition image (level: {self.config.cameras[camera].lpr.enhancement})" + ) + grid_size = ( + max(4, input_w // 40), + max(4, input_h // 40), + ) + clahe = cv2.createCLAHE( + clipLimit=2 if self.config.cameras[camera].lpr.enhancement > 5 else 1.5, + tileGridSize=grid_size, + ) + enhanced = clahe.apply(processed) + else: + enhanced = processed # Convert back to 3-channel for model compatibility image = cv2.cvtColor(enhanced, cv2.COLOR_GRAY2RGB) @@ -948,6 +992,8 @@ class LicensePlateProcessingMixin: return if dedicated_lpr: + id = "dedicated-lpr" + rgb = cv2.cvtColor(frame, cv2.COLOR_YUV2BGR_I420) # apply motion mask @@ -1149,7 +1195,7 @@ class LicensePlateProcessingMixin: # run detection, returns results sorted by confidence, best first start = datetime.datetime.now().timestamp() license_plates, confidences, areas = self._process_license_plate( - license_plate_frame + camera, id, license_plate_frame ) self.__update_lpr_metrics(datetime.datetime.now().timestamp() - start) @@ -1257,9 +1303,10 @@ class LicensePlateProcessingMixin: f"{camera}: Writing snapshot for {id}, {top_plate}, {current_time}" ) frame_bgr = cv2.cvtColor(frame, cv2.COLOR_YUV2BGR_I420) + _, encoded_img = cv2.imencode(".jpg", frame_bgr) self.sub_label_publisher.publish( EventMetadataTypeEnum.save_lpr_snapshot, - (base64.b64encode(frame_bgr).decode("ASCII"), id, camera), + (base64.b64encode(encoded_img).decode("ASCII"), id, camera), ) self.detected_license_plates[id] = { From b14abffea31457b05cae1dabd9ff0ea11792a945 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Fri, 28 Mar 2025 12:52:12 -0600 Subject: [PATCH 79/97] Refactor face library page (#17424) * Section faces by event id * Make score keeping more robust * layout improvements * Cleanup dialog * Fix clicking behavior * Add view in explore option * math.round * Don't require events * Cleanup * Remove selection * Don't require * Change dialog size with snapshot * Use filename as key * fix key * Rework layout for mobile * Handle mobile landscape * Fix train issue * Match logic * Move deletion logic * Fix reprocessing * Support creating a new face * Translations * Do sorting in frontend * Adjust unknown * Cleanup * Set max limit to faces to recognize * Fix sorting * Fix --- docs/docs/configuration/face_recognition.md | 2 +- frigate/api/classification.py | 19 +- frigate/data_processing/real_time/face.py | 97 +++-- web/public/locales/en/views/faceLibrary.json | 1 + web/src/pages/FaceLibrary.tsx | 353 +++++++++++++------ web/src/types/face.ts | 1 + 6 files changed, 325 insertions(+), 148 deletions(-) diff --git a/docs/docs/configuration/face_recognition.md b/docs/docs/configuration/face_recognition.md index 9fb5c9a44..d618dbba6 100644 --- a/docs/docs/configuration/face_recognition.md +++ b/docs/docs/configuration/face_recognition.md @@ -59,7 +59,7 @@ Fine-tune face recognition with these optional parameters: ### Recognition - `model_size`: Which model size to use, options are `small` or `large` -- `unknown_score`: Min score to mark a person as a potential match, matches below this will be marked as unknown. +- `unknown_score`: Min score to mark a person as a potential match, matches at or below this will be marked as unknown. - Default: `0.8`. - `recognition_threshold`: Recognition confidence score required to add the face to the object as a sub label. - Default: `0.9`. diff --git a/frigate/api/classification.py b/frigate/api/classification.py index 4a6969cd3..8c2d464d4 100644 --- a/frigate/api/classification.py +++ b/frigate/api/classification.py @@ -41,13 +41,9 @@ def get_faces(): face_dict[name] = [] - for file in sorted( - filter( - lambda f: (f.lower().endswith((".webp", ".png", ".jpg", ".jpeg"))), - os.listdir(face_dir), - ), - key=lambda f: os.path.getctime(os.path.join(face_dir, f)), - reverse=True, + for file in filter( + lambda f: (f.lower().endswith((".webp", ".png", ".jpg", ".jpeg"))), + os.listdir(face_dir), ): face_dict[name].append(file) @@ -125,10 +121,13 @@ def train_face(request: Request, name: str, body: dict = None): sanitized_name = sanitize_filename(name) rand_id = "".join(random.choices(string.ascii_lowercase + string.digits, k=6)) new_name = f"{sanitized_name}-{rand_id}.webp" - new_file = os.path.join(FACE_DIR, f"{sanitized_name}/{new_name}") + new_file_folder = os.path.join(FACE_DIR, f"{sanitized_name}") + + if not os.path.exists(new_file_folder): + os.mkdir(new_file_folder) if training_file_name: - shutil.move(training_file, new_file) + shutil.move(training_file, os.path.join(new_file_folder, new_name)) else: try: event: Event = Event.get(Event.id == event_id) @@ -155,7 +154,7 @@ def train_face(request: Request, name: str, body: dict = None): x2 = x1 + int(face_box[2] * detect_config.width) - 4 y2 = y1 + int(face_box[3] * detect_config.height) - 4 face = snapshot[y1:y2, x1:x2] - cv2.imwrite(new_file, face) + cv2.imwrite(os.path.join(new_file_folder, new_name), face) context: EmbeddingsContext = request.app.embeddings context.clear_face_classifier() diff --git a/frigate/data_processing/real_time/face.py b/frigate/data_processing/real_time/face.py index d3fbc83df..10479b92b 100644 --- a/frigate/data_processing/real_time/face.py +++ b/frigate/data_processing/real_time/face.py @@ -33,7 +33,8 @@ logger = logging.getLogger(__name__) MAX_DETECTION_HEIGHT = 1080 -MIN_MATCHING_FACES = 2 +MAX_FACES_ATTEMPTS_AFTER_REC = 6 +MAX_FACE_ATTEMPTS = 12 class FaceRealTimeProcessor(RealTimeProcessorApi): @@ -170,6 +171,23 @@ class FaceRealTimeProcessor(RealTimeProcessorApi): ) return + # check if we have hit limits + if ( + id in self.person_face_history + and len(self.person_face_history[id]) >= MAX_FACES_ATTEMPTS_AFTER_REC + ): + # if we are at max attempts after rec and we have a rec + if obj_data.get("sub_label"): + logger.debug( + "Not processing due to hitting max attempts after true recognition." + ) + return + + # if we don't have a rec and are at max attempts + if len(self.person_face_history[id]) >= MAX_FACE_ATTEMPTS: + logger.debug("Not processing due to hitting max rec attempts.") + return + face: Optional[dict[str, any]] = None if self.requires_face_detection: @@ -241,7 +259,7 @@ class FaceRealTimeProcessor(RealTimeProcessorApi): sub_label, score = res - if score < self.face_config.unknown_score: + if score <= self.face_config.unknown_score: sub_label = "unknown" logger.debug( @@ -255,13 +273,23 @@ class FaceRealTimeProcessor(RealTimeProcessorApi): os.makedirs(folder, exist_ok=True) cv2.imwrite(file, face_frame) + files = sorted( + filter(lambda f: (f.endswith(".webp")), os.listdir(folder)), + key=lambda f: os.path.getctime(os.path.join(folder, f)), + reverse=True, + ) + + # delete oldest face image if maximum is reached + if len(files) > self.config.face_recognition.save_attempts: + os.unlink(os.path.join(folder, files[-1])) + if id not in self.person_face_history: self.person_face_history[id] = [] self.person_face_history[id].append( (sub_label, score, face_frame.shape[0] * face_frame.shape[1]) ) - (weighted_sub_label, weighted_score) = self.weighted_average_by_area( + (weighted_sub_label, weighted_score) = self.weighted_average( self.person_face_history[id] ) @@ -297,6 +325,9 @@ class FaceRealTimeProcessor(RealTimeProcessorApi): sub_label, score = res + if score <= self.face_config.unknown_score: + sub_label = "unknown" + return {"success": True, "score": score, "face_name": sub_label} elif topic == EmbeddingsRequestEnum.register_face.value: rand_id = "".join( @@ -366,6 +397,9 @@ class FaceRealTimeProcessor(RealTimeProcessorApi): sub_label, score = res + if score <= self.face_config.unknown_score: + sub_label = "unknown" + if self.config.face_recognition.save_attempts: # write face to library folder = os.path.join(FACE_DIR, "train") @@ -375,38 +409,49 @@ class FaceRealTimeProcessor(RealTimeProcessorApi): ) shutil.move(current_file, new_file) - files = sorted( - filter(lambda f: (f.endswith(".webp")), os.listdir(folder)), - key=lambda f: os.path.getctime(os.path.join(folder, f)), - reverse=True, - ) - - # delete oldest face image if maximum is reached - if len(files) > self.config.face_recognition.save_attempts: - os.unlink(os.path.join(folder, files[-1])) - def expire_object(self, object_id: str): if object_id in self.person_face_history: self.person_face_history.pop(object_id) - def weighted_average_by_area(self, results_list: list[tuple[str, float, int]]): - score_count = {} + def weighted_average( + self, results_list: list[tuple[str, float, int]], max_weight: int = 4000 + ): + """ + Calculates a robust weighted average, capping the area weight and giving more weight to higher scores. + + Args: + results_list: A list of tuples, where each tuple contains (name, score, face_area). + max_weight: The maximum weight to apply based on face area. + + Returns: + A tuple containing the prominent name and its weighted average score, or (None, 0.0) if the list is empty. + """ + if not results_list: + return None, 0.0 + weighted_scores = {} - total_face_areas = {} + total_weights = {} for name, score, face_area in results_list: + if name == "unknown": + continue + if name not in weighted_scores: - score_count[name] = 1 weighted_scores[name] = 0.0 - total_face_areas[name] = 0.0 - else: - score_count[name] += 1 + total_weights[name] = 0.0 - weighted_scores[name] += score * face_area - total_face_areas[name] += face_area + # Capped weight based on face area + weight = min(face_area, max_weight) - prominent_name = max(score_count) + # Score-based weighting (higher scores get more weight) + weight *= (score - self.face_config.unknown_score) * 10 + weighted_scores[name] += score * weight + total_weights[name] += weight - return prominent_name, weighted_scores[prominent_name] / total_face_areas[ - prominent_name - ] + if not weighted_scores: + return None, 0.0 + + best_name = max(weighted_scores, key=weighted_scores.get) + weighted_average = weighted_scores[best_name] / total_weights[best_name] + + return best_name, weighted_average diff --git a/web/public/locales/en/views/faceLibrary.json b/web/public/locales/en/views/faceLibrary.json index fd5f50825..baabed69a 100644 --- a/web/public/locales/en/views/faceLibrary.json +++ b/web/public/locales/en/views/faceLibrary.json @@ -17,6 +17,7 @@ "createFaceLibrary": { "title": "Create Face Library", "desc": "Create a new face library", + "new": "Create New Face", "nextSteps": "It is recommended to use the Train tab to select and train images for each person as they are detected. When building a strong foundation it is strongly recommended to only train on images that are straight-on. Ignore images from cameras that recognize faces from an angle." }, "train": { diff --git a/web/src/pages/FaceLibrary.tsx b/web/src/pages/FaceLibrary.tsx index 696691997..df57f729a 100644 --- a/web/src/pages/FaceLibrary.tsx +++ b/web/src/pages/FaceLibrary.tsx @@ -3,6 +3,7 @@ import TimeAgo from "@/components/dynamic/TimeAgo"; import AddFaceIcon from "@/components/icons/AddFaceIcon"; import ActivityIndicator from "@/components/indicators/activity-indicator"; import CreateFaceWizardDialog from "@/components/overlay/detail/FaceCreateWizardDialog"; +import TextEntryDialog from "@/components/overlay/dialog/TextEntryDialog"; import UploadImageDialog from "@/components/overlay/dialog/UploadImageDialog"; import { Button } from "@/components/ui/button"; import { @@ -32,13 +33,23 @@ import { useFormattedTimestamp } from "@/hooks/use-date-utils"; import useKeyboardListener from "@/hooks/use-keyboard-listener"; import useOptimisticState from "@/hooks/use-optimistic-state"; import { cn } from "@/lib/utils"; +import { Event } from "@/types/event"; import { FaceLibraryData, RecognizedFaceData } from "@/types/face"; import { FaceRecognitionConfig, FrigateConfig } from "@/types/frigateConfig"; +import { TooltipPortal } from "@radix-ui/react-tooltip"; import axios from "axios"; import { useCallback, useEffect, useMemo, useRef, useState } from "react"; import { isDesktop, isMobile } from "react-device-detect"; import { useTranslation } from "react-i18next"; -import { LuImagePlus, LuRefreshCw, LuScanFace, LuTrash2 } from "react-icons/lu"; +import { + LuImagePlus, + LuPlus, + LuRefreshCw, + LuScanFace, + LuSearch, + LuTrash2, +} from "react-icons/lu"; +import { useNavigate } from "react-router-dom"; import { toast } from "sonner"; import useSWR from "swr"; @@ -391,14 +402,53 @@ function TrainingGrid({ onClickFace, onRefresh, }: TrainingGridProps) { - const { t } = useTranslation(["views/faceLibrary"]); + const { t } = useTranslation(["views/faceLibrary", "views/explore"]); + const navigate = useNavigate(); // face data - const [selectedEvent, setSelectedEvent] = useState(); + const faceGroups = useMemo(() => { + const groups: { [eventId: string]: RecognizedFaceData[] } = {}; + + Array.from(new Set(attemptImages)) + .sort() + .reverse() + .forEach((image) => { + const parts = image.split("-"); + const data = { + filename: image, + timestamp: Number.parseFloat(parts[0]), + eventId: `${parts[0]}-${parts[1]}`, + name: parts[2], + score: Number.parseFloat(parts[3]), + }; + + if (groups[data.eventId]) { + groups[data.eventId].push(data); + } else { + groups[data.eventId] = [data]; + } + }); + + return groups; + }, [attemptImages]); + + const eventIdsQuery = useMemo( + () => Object.keys(faceGroups).join(","), + [faceGroups], + ); + + const { data: events } = useSWR([ + "event_ids", + { ids: eventIdsQuery }, + ]); + + // selection + + const [selectedEvent, setSelectedEvent] = useState(); const formattedDate = useFormattedTimestamp( - selectedEvent?.timestamp ?? 0, + selectedEvent?.start_time ?? 0, config?.ui.time_format == "24hour" ? t("time.formattedTimestampWithYear.24hour", { ns: "common" }) : t("time.formattedTimestampWithYear.12hour", { ns: "common" }), @@ -415,23 +465,32 @@ function TrainingGrid({ } }} > - + {t("details.face")} {t("details.faceDesc")}
{t("details.person")}
-
{selectedEvent?.name}
-
-
-
- {t("details.confidence")} -
- {(selectedEvent?.score || 0) * 100}% + {selectedEvent?.sub_label ?? "Unknown"}
+ {selectedEvent?.data.sub_label_score && ( +
+
+ {t("details.confidence")} +
+
+ {Math.round(selectedEvent?.data?.sub_label_score || 0) * 100}% +
+
+ )}
{t("details.timestamp")} @@ -440,36 +499,89 @@ function TrainingGrid({
- {attemptImages.map((image: string) => ( - { - if (meta) { - onClickFace(image, meta); - } else { - setSelectedEvent(data); - } - }} - onRefresh={onRefresh} - /> - ))} + {Object.entries(faceGroups).map(([key, group]) => { + const event = events?.find((ev) => ev.id == key); + + return ( +
+
+
+ Person + {event?.sub_label + ? `: ${event.sub_label} (${Math.round((event.data.sub_label_score || 0) * 100)}%)` + : ": Unknown"} +
+ {event && ( + + +
{ + navigate(`/explore?event_id=${event.id}`); + }} + > + +
+
+ + + {t("details.item.button.viewInExplore", { + ns: "views/explore", + })} + + +
+ )} +
+ +
+ {group.map((data: RecognizedFaceData) => ( + { + if (meta || selectedFaces.length > 0) { + onClickFace(data.filename, true); + } else if (event) { + setSelectedEvent(event); + } + }} + onRefresh={onRefresh} + /> + ))} +
+
+ ); + })}
); } type FaceAttemptProps = { - image: string; + data: RecognizedFaceData; faceNames: string[]; recognitionConfig: FaceRecognitionConfig; selected: boolean; @@ -477,7 +589,7 @@ type FaceAttemptProps = { onRefresh: () => void; }; function FaceAttempt({ - image, + data, faceNames, recognitionConfig, selected, @@ -485,16 +597,6 @@ function FaceAttempt({ onRefresh, }: FaceAttemptProps) { const { t } = useTranslation(["views/faceLibrary"]); - const data = useMemo(() => { - const parts = image.split("-"); - - return { - timestamp: Number.parseFloat(parts[0]), - eventId: `${parts[0]}-${parts[1]}`, - name: parts[2], - score: Number.parseFloat(parts[3]), - }; - }, [image]); const scoreStatus = useMemo(() => { if (data.score >= recognitionConfig.recognition_threshold) { @@ -508,6 +610,8 @@ function FaceAttempt({ // interaction + const [newFace, setNewFace] = useState(false); + const imgRef = useRef(null); useContextMenu(imgRef, () => { @@ -519,7 +623,9 @@ function FaceAttempt({ const onTrainAttempt = useCallback( (trainName: string) => { axios - .post(`/faces/train/${trainName}/classify`, { training_file: image }) + .post(`/faces/train/${trainName}/classify`, { + training_file: data.filename, + }) .then((resp) => { if (resp.status == 200) { toast.success(t("toast.success.trainedFace"), { @@ -538,12 +644,12 @@ function FaceAttempt({ }); }); }, - [image, onRefresh, t], + [data, onRefresh, t], ); const onReprocess = useCallback(() => { axios - .post(`/faces/reprocess`, { training_file: image }) + .post(`/faces/reprocess`, { training_file: data.filename }) .then((resp) => { if (resp.status == 200) { toast.success(t("toast.success.updatedFaceScore"), { @@ -561,79 +667,102 @@ function FaceAttempt({ position: "top-center", }); }); - }, [image, onRefresh, t]); + }, [data, onRefresh, t]); return ( -
-
- onClick(data, e.metaKey || e.ctrlKey)} + <> + {newFace && ( + onTrainAttempt(newName)} /> -
- + )} + +
+
+ onClick(data, e.metaKey || e.ctrlKey)} + /> +
+ +
-
-
-
-
-
{data.name}
-
- {Math.round(data.score * 100)}% +
+
+
+
{data.name}
+
+ {Math.round(data.score * 100)}% +
+
+
+ + + + + + + + + {t("trainFaceAs")} + setNewFace(true)} + > + + {t("createFaceLibrary.new")} + + {faceNames.map((faceName) => ( + onTrainAttempt(faceName)} + > + + {faceName} + + ))} + + + {t("trainFace")} + + + + onReprocess()} + /> + + {t("button.reprocessFace")} +
-
- - - - - - - - - {t("trainFaceAs")} - {faceNames.map((faceName) => ( - onTrainAttempt(faceName)} - > - {faceName} - - ))} - - - {t("trainFace")} - - - - onReprocess()} - /> - - {t("button.reprocessFace")} - -
-
+ ); } @@ -643,6 +772,8 @@ type FaceGridProps = { onDelete: (name: string, ids: string[]) => void; }; function FaceGrid({ faceImages, pageToggle, onDelete }: FaceGridProps) { + const sortedFaces = useMemo(() => faceImages.sort().reverse(), [faceImages]); + return (
- {faceImages.map((image: string) => ( + {sortedFaces.map((image: string) => ( Date: Fri, 28 Mar 2025 17:13:35 -0600 Subject: [PATCH 80/97] Implement enchrichments events per second graph (#17436) * Cleanup existing naming * Add face recognitions per second * Add lpr fps * Add all eps * Clean up line graph * Translations * Change wording * Fix incorrect access * Don't require plates * Add comment * Fix --- .../common/license_plate/mixin.py | 21 +++- frigate/data_processing/real_time/face.py | 10 +- frigate/data_processing/types.py | 24 ++-- frigate/embeddings/embeddings.py | 31 +++-- frigate/embeddings/maintainer.py | 1 + frigate/stats/util.py | 27 +++- web/public/locales/en/views/system.json | 7 +- .../graph/{CameraGraph.tsx => LineGraph.tsx} | 115 ++++++++++++++++++ web/src/pages/System.tsx | 12 +- web/src/views/system/CameraMetrics.tsx | 2 +- ...atureMetrics.tsx => EnrichmentMetrics.tsx} | 36 ++++-- 11 files changed, 233 insertions(+), 53 deletions(-) rename web/src/components/graph/{CameraGraph.tsx => LineGraph.tsx} (59%) rename web/src/views/system/{FeatureMetrics.tsx => EnrichmentMetrics.tsx} (75%) diff --git a/frigate/data_processing/common/license_plate/mixin.py b/frigate/data_processing/common/license_plate/mixin.py index 3d24d48d5..63285ac79 100644 --- a/frigate/data_processing/common/license_plate/mixin.py +++ b/frigate/data_processing/common/license_plate/mixin.py @@ -24,6 +24,7 @@ from frigate.comms.event_metadata_updater import ( from frigate.config.camera.camera import CameraTypeEnum from frigate.const import CLIPS_DIR from frigate.embeddings.onnx.lpr_embedding import LPR_EMBEDDING_SIZE +from frigate.util.builtin import EventsPerSecond from frigate.util.image import area logger = logging.getLogger(__name__) @@ -34,11 +35,12 @@ WRITE_DEBUG_IMAGES = False class LicensePlateProcessingMixin: def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) - + self.plates_rec_second = EventsPerSecond() + self.plates_rec_second.start() + self.plates_det_second = EventsPerSecond() + self.plates_det_second.start() self.event_metadata_publisher = EventMetadataPublisher() - self.ctc_decoder = CTCDecoder() - self.batch_size = 6 # Detection specific parameters @@ -947,15 +949,17 @@ class LicensePlateProcessingMixin: """ Update inference metrics. """ - self.metrics.yolov9_lpr_fps.value = ( - self.metrics.yolov9_lpr_fps.value * 9 + duration + self.metrics.yolov9_lpr_speed.value = ( + self.metrics.yolov9_lpr_speed.value * 9 + duration ) / 10 def __update_lpr_metrics(self, duration: float) -> None: """ Update inference metrics. """ - self.metrics.alpr_pps.value = (self.metrics.alpr_pps.value * 9 + duration) / 10 + self.metrics.alpr_speed.value = ( + self.metrics.alpr_speed.value * 9 + duration + ) / 10 def _generate_plate_event(self, camera: str, plate: str, plate_score: float) -> str: """Generate a unique ID for a plate event based on camera and text.""" @@ -982,6 +986,8 @@ class LicensePlateProcessingMixin: self, obj_data: dict[str, any], frame: np.ndarray, dedicated_lpr: bool = False ): """Look for license plates in image.""" + self.metrics.alpr_pps.value = self.plates_rec_second.eps() + self.metrics.yolov9_lpr_pps.value = self.plates_det_second.eps() camera = obj_data if dedicated_lpr else obj_data["camera"] current_time = int(datetime.datetime.now().timestamp()) @@ -1011,6 +1017,7 @@ class LicensePlateProcessingMixin: logger.debug( f"{camera}: YOLOv9 LPD inference time: {(datetime.datetime.now().timestamp() - yolov9_start) * 1000:.2f} ms" ) + self.plates_det_second.update() self.__update_yolov9_metrics( datetime.datetime.now().timestamp() - yolov9_start ) @@ -1093,6 +1100,7 @@ class LicensePlateProcessingMixin: logger.debug( f"{camera}: YOLOv9 LPD inference time: {(datetime.datetime.now().timestamp() - yolov9_start) * 1000:.2f} ms" ) + self.plates_det_second.update() self.__update_yolov9_metrics( datetime.datetime.now().timestamp() - yolov9_start ) @@ -1197,6 +1205,7 @@ class LicensePlateProcessingMixin: license_plates, confidences, areas = self._process_license_plate( camera, id, license_plate_frame ) + self.plates_rec_second.update() self.__update_lpr_metrics(datetime.datetime.now().timestamp() - start) if license_plates: diff --git a/frigate/data_processing/real_time/face.py b/frigate/data_processing/real_time/face.py index 10479b92b..5b20a6303 100644 --- a/frigate/data_processing/real_time/face.py +++ b/frigate/data_processing/real_time/face.py @@ -24,6 +24,7 @@ from frigate.data_processing.common.face.model import ( FaceNetRecognizer, FaceRecognizer, ) +from frigate.util.builtin import EventsPerSecond from frigate.util.image import area from ..types import DataProcessorMetrics @@ -51,6 +52,7 @@ class FaceRealTimeProcessor(RealTimeProcessorApi): self.requires_face_detection = "face" not in self.config.objects.all_objects self.person_face_history: dict[str, list[tuple[str, float, int]]] = {} self.recognizer: FaceRecognizer | None = None + self.faces_per_second = EventsPerSecond() download_path = os.path.join(MODEL_CACHE_DIR, "facedet") self.model_files = { @@ -103,6 +105,7 @@ class FaceRealTimeProcessor(RealTimeProcessorApi): score_threshold=0.5, nms_threshold=0.3, ) + self.faces_per_second.start() def __detect_face( self, input: np.ndarray, threshold: float @@ -146,12 +149,15 @@ class FaceRealTimeProcessor(RealTimeProcessorApi): return face def __update_metrics(self, duration: float) -> None: - self.metrics.face_rec_fps.value = ( - self.metrics.face_rec_fps.value * 9 + duration + self.faces_per_second.update() + self.metrics.face_rec_speed.value = ( + self.metrics.face_rec_speed.value * 9 + duration ) / 10 def process_frame(self, obj_data: dict[str, any], frame: np.ndarray): """Look for faces in image.""" + self.metrics.face_rec_fps.value = self.faces_per_second.eps() + if not self.config.cameras[obj_data["camera"]].face_recognition.enabled: return diff --git a/frigate/data_processing/types.py b/frigate/data_processing/types.py index 29abb22d1..8ec7b9617 100644 --- a/frigate/data_processing/types.py +++ b/frigate/data_processing/types.py @@ -6,18 +6,26 @@ from multiprocessing.sharedctypes import Synchronized class DataProcessorMetrics: - image_embeddings_fps: Synchronized - text_embeddings_sps: Synchronized + image_embeddings_speed: Synchronized + text_embeddings_speed: Synchronized + face_rec_speed: Synchronized face_rec_fps: Synchronized + alpr_speed: Synchronized alpr_pps: Synchronized - yolov9_lpr_fps: Synchronized + yolov9_lpr_speed: Synchronized + yolov9_lpr_pps: Synchronized def __init__(self): - self.image_embeddings_fps = mp.Value("d", 0.01) - self.text_embeddings_sps = mp.Value("d", 0.01) - self.face_rec_fps = mp.Value("d", 0.01) - self.alpr_pps = mp.Value("d", 0.01) - self.yolov9_lpr_fps = mp.Value("d", 0.01) + self.image_embeddings_speed = mp.Value("d", 0.01) + self.image_embeddings_eps = mp.Value("d", 0.0) + self.text_embeddings_speed = mp.Value("d", 0.01) + self.text_embeddings_eps = mp.Value("d", 0.0) + self.face_rec_speed = mp.Value("d", 0.01) + self.face_rec_fps = mp.Value("d", 0.0) + self.alpr_speed = mp.Value("d", 0.01) + self.alpr_pps = mp.Value("d", 0.0) + self.yolov9_lpr_speed = mp.Value("d", 0.01) + self.yolov9_lpr_pps = mp.Value("d", 0.0) class DataProcessorModelRunner: diff --git a/frigate/embeddings/embeddings.py b/frigate/embeddings/embeddings.py index d2053f5ee..6eb060560 100644 --- a/frigate/embeddings/embeddings.py +++ b/frigate/embeddings/embeddings.py @@ -21,7 +21,7 @@ from frigate.data_processing.types import DataProcessorMetrics from frigate.db.sqlitevecq import SqliteVecQueueDatabase from frigate.models import Event from frigate.types import ModelStatusTypesEnum -from frigate.util.builtin import serialize +from frigate.util.builtin import EventsPerSecond, serialize from frigate.util.path import get_event_thumbnail_bytes from .onnx.jina_v1_embedding import JinaV1ImageEmbedding, JinaV1TextEmbedding @@ -75,6 +75,11 @@ class Embeddings: self.metrics = metrics self.requestor = InterProcessRequestor() + self.image_eps = EventsPerSecond() + self.image_eps.start() + self.text_eps = EventsPerSecond() + self.text_eps.start() + self.reindex_lock = threading.Lock() self.reindex_thread = None self.reindex_running = False @@ -120,6 +125,10 @@ class Embeddings: device="GPU" if config.semantic_search.model_size == "large" else "CPU", ) + def update_stats(self) -> None: + self.metrics.image_embeddings_eps = self.image_eps.eps() + self.metrics.text_embeddings_eps = self.text_eps.eps() + def get_model_definitions(self): # Version-specific models if self.config.semantic_search.model == SemanticSearchModelEnum.jinav2: @@ -175,9 +184,10 @@ class Embeddings: ) duration = datetime.datetime.now().timestamp() - start - self.metrics.image_embeddings_fps.value = ( - self.metrics.image_embeddings_fps.value * 9 + duration + self.metrics.image_embeddings_speed.value = ( + self.metrics.image_embeddings_speed.value * 9 + duration ) / 10 + self.image_eps.update() return embedding @@ -199,6 +209,7 @@ class Embeddings: for i in range(len(ids)): items.append(ids[i]) items.append(serialize(embeddings[i])) + self.image_eps.update() self.db.execute_sql( """ @@ -209,8 +220,8 @@ class Embeddings: ) duration = datetime.datetime.now().timestamp() - start - self.metrics.text_embeddings_sps.value = ( - self.metrics.text_embeddings_sps.value * 9 + (duration / len(ids)) + self.metrics.text_embeddings_speed.value = ( + self.metrics.text_embeddings_speed.value * 9 + (duration / len(ids)) ) / 10 return embeddings @@ -231,9 +242,10 @@ class Embeddings: ) duration = datetime.datetime.now().timestamp() - start - self.metrics.text_embeddings_sps.value = ( - self.metrics.text_embeddings_sps.value * 9 + duration + self.metrics.text_embeddings_speed.value = ( + self.metrics.text_embeddings_speed.value * 9 + duration ) / 10 + self.text_eps.update() return embedding @@ -254,6 +266,7 @@ class Embeddings: for i in range(len(ids)): items.append(ids[i]) items.append(serialize(embeddings[i])) + self.text_eps.update() self.db.execute_sql( """ @@ -264,8 +277,8 @@ class Embeddings: ) duration = datetime.datetime.now().timestamp() - start - self.metrics.text_embeddings_sps.value = ( - self.metrics.text_embeddings_sps.value * 9 + (duration / len(ids)) + self.metrics.text_embeddings_speed.value = ( + self.metrics.text_embeddings_speed.value * 9 + (duration / len(ids)) ) / 10 return embeddings diff --git a/frigate/embeddings/maintainer.py b/frigate/embeddings/maintainer.py index 85b0e6d54..7554b12c6 100644 --- a/frigate/embeddings/maintainer.py +++ b/frigate/embeddings/maintainer.py @@ -236,6 +236,7 @@ class EmbeddingMaintainer(threading.Thread): return camera_config = self.config.cameras[camera] + self.embeddings.update_stats() # no need to process updated objects if face recognition, lpr, genai are disabled if not camera_config.genai.enabled and len(self.realtime_processors) == 0: diff --git a/frigate/stats/util.py b/frigate/stats/util.py index 287c384cd..2b33a6173 100644 --- a/frigate/stats/util.py +++ b/frigate/stats/util.py @@ -293,27 +293,42 @@ def stats_snapshot( stats["embeddings"].update( { "image_embedding_speed": round( - embeddings_metrics.image_embeddings_fps.value * 1000, 2 + embeddings_metrics.image_embeddings_speed.value * 1000, 2 + ), + "image_embedding": round( + embeddings_metrics.image_embeddings_eps.value, 2 ), "text_embedding_speed": round( - embeddings_metrics.text_embeddings_sps.value * 1000, 2 + embeddings_metrics.text_embeddings_speed.value * 1000, 2 + ), + "text_embedding": round( + embeddings_metrics.text_embeddings_eps.value, 2 ), } ) if config.face_recognition.enabled: stats["embeddings"]["face_recognition_speed"] = round( - embeddings_metrics.face_rec_fps.value * 1000, 2 + embeddings_metrics.face_rec_speed.value * 1000, 2 + ) + stats["embeddings"]["face_recognition"] = round( + embeddings_metrics.face_rec_fps.value, 2 ) if config.lpr.enabled: stats["embeddings"]["plate_recognition_speed"] = round( - embeddings_metrics.alpr_pps.value * 1000, 2 + embeddings_metrics.alpr_speed.value * 1000, 2 + ) + stats["embeddings"]["plate_recognition"] = round( + embeddings_metrics.alpr_pps.value, 2 ) - if "license_plate" not in config.objects.all_objects: + if embeddings_metrics.yolov9_lpr_pps.value > 0.0: stats["embeddings"]["yolov9_plate_detection_speed"] = round( - embeddings_metrics.yolov9_lpr_fps.value * 1000, 2 + embeddings_metrics.yolov9_lpr_speed.value * 1000, 2 + ) + stats["embeddings"]["yolov9_plate_detection"] = round( + embeddings_metrics.yolov9_lpr_pps.value, 2 ) get_processing_stats(config, stats, hwaccel_errors) diff --git a/web/public/locales/en/views/system.json b/web/public/locales/en/views/system.json index 77516f3e1..98583134c 100644 --- a/web/public/locales/en/views/system.json +++ b/web/public/locales/en/views/system.json @@ -3,7 +3,7 @@ "cameras": "Cameras Stats - Frigate", "storage": "Storage Stats - Frigate", "general": "General Stats - Frigate", - "features": "Features Stats - Frigate", + "enrichments": "Enrichments Stats - Frigate", "logs": { "frigate": "Frigate Logs - Frigate", "go2rtc": "Go2RTC Logs - Frigate", @@ -144,8 +144,9 @@ "healthy": "System is healthy", "reindexingEmbeddings": "Reindexing embeddings ({{processed}}% complete)" }, - "features": { - "title": "Features", + "enrichments": { + "title": "Enrichments", + "infPerSecond": "Inferences Per Second", "embeddings": { "image_embedding_speed": "Image Embedding Speed", "face_embedding_speed": "Face Embedding Speed", diff --git a/web/src/components/graph/CameraGraph.tsx b/web/src/components/graph/LineGraph.tsx similarity index 59% rename from web/src/components/graph/CameraGraph.tsx rename to web/src/components/graph/LineGraph.tsx index a347c2d37..ef55c9343 100644 --- a/web/src/components/graph/CameraGraph.tsx +++ b/web/src/components/graph/LineGraph.tsx @@ -143,3 +143,118 @@ export function CameraLineGraph({
); } + +type EventsPerSecondLineGraphProps = { + graphId: string; + unit: string; + name: string; + updateTimes: number[]; + data: ApexAxisChartSeries; +}; +export function EventsPerSecondsLineGraph({ + graphId, + unit, + name, + updateTimes, + data, +}: EventsPerSecondLineGraphProps) { + const { data: config } = useSWR("config", { + revalidateOnFocus: false, + }); + + const { theme, systemTheme } = useTheme(); + + const lastValue = useMemo( + // @ts-expect-error y is valid + () => data[0].data[data[0].data.length - 1]?.y ?? 0, + [data], + ); + + const formatTime = useCallback( + (val: unknown) => { + return formatUnixTimestampToDateTime( + updateTimes[Math.round(val as number) - 1], + { + timezone: config?.ui.timezone, + strftime_fmt: + config?.ui.time_format == "24hour" ? "%H:%M" : "%I:%M %p", + }, + ); + }, + [config, updateTimes], + ); + + const options = useMemo(() => { + return { + chart: { + id: graphId, + selection: { + enabled: false, + }, + toolbar: { + show: false, + }, + zoom: { + enabled: false, + }, + }, + colors: GRAPH_COLORS, + grid: { + show: false, + }, + legend: { + show: false, + }, + dataLabels: { + enabled: false, + }, + stroke: { + width: 1, + }, + tooltip: { + theme: systemTheme || theme, + }, + markers: { + size: 0, + }, + xaxis: { + tickAmount: isMobileOnly ? 2 : 3, + tickPlacement: "on", + labels: { + rotate: 0, + formatter: formatTime, + }, + axisBorder: { + show: false, + }, + axisTicks: { + show: false, + }, + }, + yaxis: { + show: true, + labels: { + formatter: (val: number) => Math.ceil(val).toString(), + }, + min: 0, + }, + } as ApexCharts.ApexOptions; + }, [graphId, systemTheme, theme, formatTime]); + + useEffect(() => { + ApexCharts.exec(graphId, "updateOptions", options, true, true); + }, [graphId, options]); + + return ( +
+
+
{name}
+
+ {lastValue} + {unit} +
+
+ +
+ ); +} diff --git a/web/src/pages/System.tsx b/web/src/pages/System.tsx index 7881fd0d3..5ef92e8a3 100644 --- a/web/src/pages/System.tsx +++ b/web/src/pages/System.tsx @@ -14,10 +14,10 @@ import CameraMetrics from "@/views/system/CameraMetrics"; import { useHashState } from "@/hooks/use-overlay-state"; import { Toaster } from "@/components/ui/sonner"; import { FrigateConfig } from "@/types/frigateConfig"; -import FeatureMetrics from "@/views/system/FeatureMetrics"; +import EnrichmentMetrics from "@/views/system/EnrichmentMetrics"; import { useTranslation } from "react-i18next"; -const allMetrics = ["general", "features", "storage", "cameras"] as const; +const allMetrics = ["general", "enrichments", "storage", "cameras"] as const; type SystemMetric = (typeof allMetrics)[number]; function System() { @@ -34,7 +34,7 @@ function System() { !config?.lpr.enabled && !config?.face_recognition.enabled ) { - const index = metrics.indexOf("features"); + const index = metrics.indexOf("enrichments"); metrics.splice(index, 1); } @@ -89,7 +89,7 @@ function System() { aria-label={`Select ${item}`} > {item == "general" && } - {item == "features" && } + {item == "enrichments" && } {item == "storage" && } {item == "cameras" && } {isDesktop && ( @@ -122,8 +122,8 @@ function System() { setLastUpdated={setLastUpdated} /> )} - {page == "features" && ( - diff --git a/web/src/views/system/CameraMetrics.tsx b/web/src/views/system/CameraMetrics.tsx index 497e6f435..b94dc3606 100644 --- a/web/src/views/system/CameraMetrics.tsx +++ b/web/src/views/system/CameraMetrics.tsx @@ -1,5 +1,5 @@ import { useFrigateStats } from "@/api/ws"; -import { CameraLineGraph } from "@/components/graph/CameraGraph"; +import { CameraLineGraph } from "@/components/graph/LineGraph"; import CameraInfoDialog from "@/components/overlay/CameraInfoDialog"; import { Skeleton } from "@/components/ui/skeleton"; import { FrigateConfig } from "@/types/frigateConfig"; diff --git a/web/src/views/system/FeatureMetrics.tsx b/web/src/views/system/EnrichmentMetrics.tsx similarity index 75% rename from web/src/views/system/FeatureMetrics.tsx rename to web/src/views/system/EnrichmentMetrics.tsx index c5b6e1454..2f94db3f6 100644 --- a/web/src/views/system/FeatureMetrics.tsx +++ b/web/src/views/system/EnrichmentMetrics.tsx @@ -7,15 +7,16 @@ import { Skeleton } from "@/components/ui/skeleton"; import { ThresholdBarGraph } from "@/components/graph/SystemGraph"; import { cn } from "@/lib/utils"; import { useTranslation } from "react-i18next"; +import { EventsPerSecondsLineGraph } from "@/components/graph/LineGraph"; -type FeatureMetricsProps = { +type EnrichmentMetricsProps = { lastUpdated: number; setLastUpdated: (last: number) => void; }; -export default function FeatureMetrics({ +export default function EnrichmentMetrics({ lastUpdated, setLastUpdated, -}: FeatureMetricsProps) { +}: EnrichmentMetricsProps) { // stats const { t } = useTranslation(["views/system"]); @@ -102,15 +103,26 @@ export default function FeatureMetrics({ {embeddingInferenceTimeSeries.map((series) => (
{series.name}
- + {series.name.endsWith("Speed") ? ( + + ) : ( + + )}
))} From c1b06d63b6c0e7fa6b011e4bb3d925bad24ad7fa Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Fri, 28 Mar 2025 18:35:50 -0600 Subject: [PATCH 81/97] Fix embedding eps (#17437) * Fix embedding eps * Fix thumbnail cleanup --- frigate/embeddings/embeddings.py | 4 ++-- frigate/events/cleanup.py | 10 +++++++--- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/frigate/embeddings/embeddings.py b/frigate/embeddings/embeddings.py index 6eb060560..2fda584d3 100644 --- a/frigate/embeddings/embeddings.py +++ b/frigate/embeddings/embeddings.py @@ -126,8 +126,8 @@ class Embeddings: ) def update_stats(self) -> None: - self.metrics.image_embeddings_eps = self.image_eps.eps() - self.metrics.text_embeddings_eps = self.text_eps.eps() + self.metrics.image_embeddings_eps.value = self.image_eps.eps() + self.metrics.text_embeddings_eps.value = self.text_eps.eps() def get_model_definitions(self): # Version-specific models diff --git a/frigate/events/cleanup.py b/frigate/events/cleanup.py index ae39e3fd2..fbc7b6c3d 100644 --- a/frigate/events/cleanup.py +++ b/frigate/events/cleanup.py @@ -11,7 +11,7 @@ from frigate.config import FrigateConfig from frigate.const import CLIPS_DIR from frigate.db.sqlitevecq import SqliteVecQueueDatabase from frigate.models import Event, Timeline -from frigate.util.path import delete_event_images +from frigate.util.path import delete_event_snapshot, delete_event_thumbnail logger = logging.getLogger(__name__) @@ -98,7 +98,7 @@ class EventCleanup(threading.Thread): # delete the media from disk for expired in expired_events: - deleted = delete_event_images(expired) + deleted = delete_event_snapshot(expired) if not deleted: logger.warning( @@ -176,7 +176,7 @@ class EventCleanup(threading.Thread): # so no need to delete mp4 files for event in expired_events: events_to_update.append(event.id) - deleted = delete_event_images(event) + deleted = delete_event_snapshot(event) if not deleted: logger.warning( @@ -340,6 +340,10 @@ class EventCleanup(threading.Thread): .iterator() ) events_to_delete = [e.id for e in events] + + for e in events: + delete_event_thumbnail(e) + logger.debug(f"Found {len(events_to_delete)} events that can be expired") if len(events_to_delete) > 0: for i in range(0, len(events_to_delete), CHUNK_SIZE): From 4aa493b96c2199e75f1a7bcb630bbcbc443a53a8 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Fri, 28 Mar 2025 20:37:11 -0600 Subject: [PATCH 82/97] Catch case where frame time is between end and rounded start (#17438) --- frigate/api/media.py | 61 +++++++++++++++++++++++++++++++------------- 1 file changed, 43 insertions(+), 18 deletions(-) diff --git a/frigate/api/media.py b/frigate/api/media.py index 83307a15c..e9290e237 100644 --- a/frigate/api/media.py +++ b/frigate/api/media.py @@ -240,25 +240,50 @@ def get_snapshot_from_recording( content={"success": False, "message": "Camera not found"}, status_code=404, ) - - recording_query = ( - Recordings.select( - Recordings.path, - Recordings.start_time, - ) - .where( - ( - (frame_time >= Recordings.start_time) - & (frame_time <= Recordings.end_time) - ) - ) - .where(Recordings.camera == camera_name) - .order_by(Recordings.start_time.desc()) - .limit(1) - ) + recording: Recordings | None = None try: - recording: Recordings = recording_query.get() + recording = ( + Recordings.select( + Recordings.path, + Recordings.start_time, + ) + .where( + ( + (frame_time >= Recordings.start_time) + & (frame_time <= Recordings.end_time) + ) + ) + .where(Recordings.camera == camera_name) + .order_by(Recordings.start_time.desc()) + .limit(1) + .get() + ) + except DoesNotExist: + # try again with a rounded frame time as it may be between + # the rounded segment start time + frame_time = round(frame_time) + try: + recording = ( + Recordings.select( + Recordings.path, + Recordings.start_time, + ) + .where( + ( + (frame_time >= Recordings.start_time) + & (frame_time <= Recordings.end_time) + ) + ) + .where(Recordings.camera == camera_name) + .order_by(Recordings.start_time.desc()) + .limit(1) + .get() + ) + except DoesNotExist: + pass + + if recording is not None: time_in_segment = frame_time - recording.start_time codec = "png" if format == "png" else "mjpeg" mime_type = "png" if format == "png" else "jpeg" @@ -279,7 +304,7 @@ def get_snapshot_from_recording( status_code=404, ) return Response(image_data, headers={"Content-Type": f"image/{mime_type}"}) - except DoesNotExist: + else: return JSONResponse( content={ "success": False, From 37c3ac54133d77f1474a19bf56df910c4f7e227a Mon Sep 17 00:00:00 2001 From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> Date: Sat, 29 Mar 2025 06:58:50 -0500 Subject: [PATCH 83/97] Misc fixes (#17443) * ensure semantic search is enabled before updating embeddings stats * add camera and label to snapshot download filename --- frigate/embeddings/maintainer.py | 4 +++- web/src/components/overlay/detail/SearchDetailDialog.tsx | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/frigate/embeddings/maintainer.py b/frigate/embeddings/maintainer.py index 7554b12c6..eee8e0e96 100644 --- a/frigate/embeddings/maintainer.py +++ b/frigate/embeddings/maintainer.py @@ -235,8 +235,10 @@ class EmbeddingMaintainer(threading.Thread): if not camera or source_type != EventTypeEnum.tracked_object: return + if self.config.semantic_search.enabled: + self.embeddings.update_stats() + camera_config = self.config.cameras[camera] - self.embeddings.update_stats() # no need to process updated objects if face recognition, lpr, genai are disabled if not camera_config.genai.enabled and len(self.realtime_processors) == 0: diff --git a/web/src/components/overlay/detail/SearchDetailDialog.tsx b/web/src/components/overlay/detail/SearchDetailDialog.tsx index afa428eda..98b093b8f 100644 --- a/web/src/components/overlay/detail/SearchDetailDialog.tsx +++ b/web/src/components/overlay/detail/SearchDetailDialog.tsx @@ -955,8 +955,8 @@ export function ObjectSnapshotTab({ From bda7fcc7842defedd9457c55eadf19895b192318 Mon Sep 17 00:00:00 2001 From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> Date: Sat, 29 Mar 2025 07:19:12 -0500 Subject: [PATCH 84/97] use ceil instead of round for recording snapshot (#17444) --- frigate/api/media.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/frigate/api/media.py b/frigate/api/media.py index e9290e237..7dab6ae60 100644 --- a/frigate/api/media.py +++ b/frigate/api/media.py @@ -2,6 +2,7 @@ import glob import logging +import math import os import subprocess as sp import time @@ -262,7 +263,7 @@ def get_snapshot_from_recording( except DoesNotExist: # try again with a rounded frame time as it may be between # the rounded segment start time - frame_time = round(frame_time) + frame_time = math.ceil(frame_time) try: recording = ( Recordings.select( From 17912b4695b92f6aae105687ee551a1f30539f0d Mon Sep 17 00:00:00 2001 From: GuoQing Liu <842607283@qq.com> Date: Sun, 30 Mar 2025 18:55:26 +0800 Subject: [PATCH 85/97] update chinese i18n (#17450) --- web/public/locales/zh-CN/views/faceLibrary.json | 1 + web/public/locales/zh-CN/views/settings.json | 16 +++++++++++----- web/public/locales/zh-CN/views/system.json | 7 ++++--- 3 files changed, 16 insertions(+), 8 deletions(-) diff --git a/web/public/locales/zh-CN/views/faceLibrary.json b/web/public/locales/zh-CN/views/faceLibrary.json index 76a98bf31..f2dba6fc3 100644 --- a/web/public/locales/zh-CN/views/faceLibrary.json +++ b/web/public/locales/zh-CN/views/faceLibrary.json @@ -16,6 +16,7 @@ "createFaceLibrary": { "title": "创建人脸库", "desc": "创建一个新的人脸库", + "new": "新建人脸", "nextSteps": "建议使用“训练”选项卡为每个检测到的人选择并训练图像。在打好基础前,强烈建议训练仅使用正面图像。而不是从摄像机中识别到的角度拍摄的人脸图像。" }, "train": { diff --git a/web/public/locales/zh-CN/views/settings.json b/web/public/locales/zh-CN/views/settings.json index 0c1087916..779d4ca28 100644 --- a/web/public/locales/zh-CN/views/settings.json +++ b/web/public/locales/zh-CN/views/settings.json @@ -87,9 +87,15 @@ "title": "语义搜索", "desc": "Frigate的语义搜索能够让你使用自然语言根据图像本身、自定义的文本描述或自动生成的描述来搜索视频。", "readTheDocumentation": "阅读文档(英文)", - "reindexOnStartup": { - "label": "启动时重新索引", - "desc": "每次启动将重新索引并重新处理所有缩略图和描述。关闭该设置后不要忘记重启!" + "reindexNow": { + "label": "立即重建索引", + "desc": "重建索引将为所有跟踪对象重新生成特征向量。该过程将在后台运行,可能会使CPU满载,所需时间取决于跟踪对象的数量。", + "confirmTitle": "确认重建索引", + "confirmDesc": "确定要为所有跟踪对象重建特征向量索引吗?此过程将在后台运行,但可能会导致CPU满载并耗费较长时间。您可以在探索页面查看进度。", + "confirmButton": "重建索引", + "success": "重建索引已成功启动。", + "alreadyInProgress": "重建索引已在执行中。", + "error": "启动重建索引失败:{{errorMessage}}" }, "modelSize": { "label": "模型大小", @@ -113,11 +119,11 @@ "desc": "用于人脸识别的模型尺寸。", "small": { "title": "小模型", - "desc": "使用小模型将采用OpenCV的局部二值模式直方图(LBPH)算法,可在大多数CPU上高效运行。" + "desc": "使用小模型将采用FaceNet人脸特征提取模型,可在大多数CPU上高效运行。" }, "large": { "title": "大模型", - "desc": "使用大模型将采用ArcFace人脸嵌入模型,若适用将自动在GPU上运行。" + "desc": "使用大模型将采用ArcFace人脸特征提取模型,若条件允许将自动使用GPU运行。" } } }, diff --git a/web/public/locales/zh-CN/views/system.json b/web/public/locales/zh-CN/views/system.json index 01251f3c5..8e442162a 100644 --- a/web/public/locales/zh-CN/views/system.json +++ b/web/public/locales/zh-CN/views/system.json @@ -3,7 +3,7 @@ "cameras": "摄像头统计 - Frigate", "storage": "存储统计 - Frigate", "general": "常规统计 - Frigate", - "features": "功能统计 - Frigate", + "enrichments": "增强功能统计 - Frigate", "logs": { "frigate": "Frigate 日志 - Frigate", "go2rtc": "Go2RTC 日志 - Frigate", @@ -144,8 +144,9 @@ "healthy": "系统运行正常", "reindexingEmbeddings": "正在重新索引嵌入(已完成 {{processed}}%)" }, - "features": { - "title": "功能", + "enrichments": { + "title": "增强功能", + "infPerSecond": "每秒推理次数", "embeddings": { "image_embedding_speed": "图像特征提取速度", "face_embedding_speed": "人脸特征提取速度", From 2c1ded37a172c2b8e10a1b9bda5c2bb5faef7795 Mon Sep 17 00:00:00 2001 From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> Date: Sun, 30 Mar 2025 07:17:25 -0500 Subject: [PATCH 86/97] Ensure we use the stream name from local storage on mobile (#17452) --- web/src/views/live/LiveDashboardView.tsx | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/web/src/views/live/LiveDashboardView.tsx b/web/src/views/live/LiveDashboardView.tsx index 199146b0b..b00b1a2f5 100644 --- a/web/src/views/live/LiveDashboardView.tsx +++ b/web/src/views/live/LiveDashboardView.tsx @@ -472,11 +472,20 @@ export default function LiveDashboardView({ } else { grow = "aspect-video"; } - const streamName = - currentGroupStreamingSettings?.[camera.name]?.streamName || - camera?.live?.streams - ? Object?.values(camera?.live?.streams)?.[0] - : ""; + const availableStreams = camera.live.streams || {}; + const firstStreamEntry = Object.values(availableStreams)[0] || ""; + + const streamNameFromSettings = + currentGroupStreamingSettings?.[camera.name]?.streamName || ""; + const streamExists = + streamNameFromSettings && + Object.values(availableStreams).includes( + streamNameFromSettings, + ); + + const streamName = streamExists + ? streamNameFromSettings + : firstStreamEntry; const autoLive = currentGroupStreamingSettings?.[camera.name]?.streamType !== "no-streaming"; From 2920127ada801e39652aae796a26436d53590d48 Mon Sep 17 00:00:00 2001 From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> Date: Sun, 30 Mar 2025 08:43:24 -0500 Subject: [PATCH 87/97] Dedicated LPR improvements (#17453) * remove license plate from attributes for dedicated lpr cameras * ensure we always have a color * use frigate+ models with dedicated lpr cameras * docs * docs clarity * docs enrichments * use license_plate as object type --- .../license_plate_recognition.md | 119 ++++++++++++++---- frigate/camera/state.py | 8 +- .../common/license_plate/mixin.py | 70 +++++++---- frigate/embeddings/maintainer.py | 6 +- frigate/track/tracked_object.py | 2 +- frigate/video.py | 37 ++++-- 6 files changed, 183 insertions(+), 59 deletions(-) diff --git a/docs/docs/configuration/license_plate_recognition.md b/docs/docs/configuration/license_plate_recognition.md index f3581e598..ccb9a16a6 100644 --- a/docs/docs/configuration/license_plate_recognition.md +++ b/docs/docs/configuration/license_plate_recognition.md @@ -62,7 +62,7 @@ Fine-tune the LPR feature using these optional parameters: - **`detection_threshold`**: License plate object detection confidence score required before recognition runs. - Default: `0.7` - - Note: This is field only applies to the standalone license plate detection model, `min_score` should be used to filter for models that have license plate detection built in. + - Note: This is field only applies to the standalone license plate detection model, `threshold` and `min_score` object filters should be used for models like Frigate+ that have license plate detection built in. - **`min_area`**: Defines the minimum area (in pixels) a license plate must be before recognition runs. - Default: `1000` pixels. Note: this is intentionally set very low as it is an _area_ measurement (length x width). For reference, 1000 pixels represents a ~32x32 pixel square in your camera image. - Depending on the resolution of your camera's `detect` stream, you can increase this value to ignore small or distant plates. @@ -137,17 +137,86 @@ lpr: - "MN D3163" ``` +:::note + +If you want to detect cars on cameras but don't want to use resources to run LPR on those cars, you should disable LPR for those specific cameras. + +```yaml +cameras: + side_yard: + lpr: + enabled: False + ... +``` + +::: + ## Dedicated LPR Cameras Dedicated LPR cameras are single-purpose cameras with powerful optical zoom to capture license plates on distant vehicles, often with fine-tuned settings to capture plates at night. -Users with a dedicated LPR camera can run Frigate's LPR by specifying a camera type of `lpr` in the camera configuration. An example config for a dedicated LPR camera might look like this: +Users can configure Frigate's LPR in two different ways depending on whether they are using a Frigate+ model: + +### Using a Frigate+ Model + +Users running a Frigate+ model (or any model that natively detects `license_plate`) can take advantage of `license_plate` detection. This allows license plates to be treated as standard objects in dedicated LPR mode, meaning that alerts, detections, snapshots, zones, and other Frigate features work as usual, and plates are detected efficiently through your configured object detector. + +An example configuration for a dedicated LPR camera using a Frigate+ model: + +```yaml +# LPR global configuration +lpr: + enabled: True + +# Dedicated LPR camera configuration +cameras: + dedicated_lpr_camera: + type: "lpr" # required to use dedicated LPR camera mode + detect: + enabled: True + fps: 5 # increase if vehicles move quickly + min_initialized: 2 # set at fps divided by 3 for very fast cars + width: 1920 + height: 1080 + objects: + track: + - license_plate + filters: + license_plate: + threshold: 0.7 + motion: + threshold: 30 + contour_area: 60 # use an increased value to tune out small motion changes + improve_contrast: false + mask: 0.704,0.007,0.709,0.052,0.989,0.055,0.993,0.001 # ensure your camera's timestamp is masked + record: + enabled: True # disable recording if you only want snapshots + snapshots: + enabled: True + review: + detections: + labels: + - license_plate +``` + +With this setup: + +- License plates are treated as normal objects in Frigate. +- Scores, alerts, detections, snapshots, zones, and object masks work as expected. +- Snapshots will have license plate bounding boxes on them. +- The `frigate/events` MQTT topic will publish tracked object updates. +- Debug view will display `license_plate` bounding boxes. + +### Using the Secondary LPR Pipeline (Without Frigate+) + +If you are not running a Frigate+ model, you can use Frigate’s built-in secondary dedicated LPR pipeline. In this mode, Frigate bypasses the standard object detection pipeline and runs a local license plate detector model on the full frame whenever motion activity occurs. + +An example configuration for a dedicated LPR camera using the secondary pipeline: ```yaml # LPR global configuration lpr: enabled: True - min_plate_length: 4 detection_threshold: 0.7 # change if necessary # Dedicated LPR camera configuration @@ -156,14 +225,15 @@ cameras: type: "lpr" # required to use dedicated LPR camera mode lpr: enabled: True - expire_time: 3 # optional, default enhancement: 3 # optional, enhance the image before trying to recognize characters ffmpeg: ... detect: - enabled: False # optional, disable Frigate's standard object detection pipeline - fps: 5 # keep this at 5, higher values are unnecessary for dedicated LPR mode and could overwhelm the detector + enabled: False # disable Frigate's standard object detection pipeline + fps: 5 # increase if necessary, though high values may slow down Frigate's enrichments pipeline and use considerable CPU width: 1920 height: 1080 + objects: + track: [] # required when not using a Frigate+ model for dedicated LPR mode motion: threshold: 30 contour_area: 60 # use an increased value here to tune out small motion changes @@ -178,31 +248,38 @@ cameras: default: 7 ``` -The camera-level `type` setting tells Frigate to treat your camera as a dedicated LPR camera. Setting this option bypasses Frigate's standard object detection pipeline so that a `car` does not need to be detected to run LPR. This dedicated LPR pipeline does not utilize defined zones or object masks, and the license plate detector is always run on the full frame whenever motion activity occurs. If a plate is found, a snapshot at the highest scoring moment is saved as a `car` object, visible in Explore and searchable by the recognized plate via Explore's More Filters. - -An optional config variable for dedicated LPR cameras only, `expire_time`, can be specified under the `lpr` configuration at the camera level to change the time it takes for Frigate to consider a previously tracked plate as expired. - -:::note - -When using `type: "lpr"` for a camera, a non-standard object detection pipeline is used. Any detected license plates on dedicated LPR cameras are treated similarly to manual events in Frigate. Note that for `car` objects with license plates: +With this setup: +- The standard object detection pipeline is bypassed. Any detected license plates on dedicated LPR cameras are treated similarly to manual events in Frigate. You must **not** specify `license_plate` as an object to track. +- The license plate detector runs on the full frame whenever motion is detected and processes frames according to your detect `fps` setting. - Review items will always be classified as a `detection`. - Snapshots will always be saved. -- Tracked objects are retained according to your retain settings for `record` and `snapshots`. -- Zones and object masks cannot be used. -- Debug view may not show `license_plate` bounding boxes, even if you are using a Frigate+ model for your standard object detection pipeline. -- The `frigate/events` MQTT topic will not publish tracked object updates, though `frigate/reviews` will if recordings are enabled. +- Zones and object masks are **not** used. +- The `frigate/events` MQTT topic will **not** publish tracked object updates, though `frigate/reviews` will if recordings are enabled. +- License plate snapshots are saved at the highest-scoring moment and appear in Explore. +- Debug view will not show `license_plate` bounding boxes. -::: +### Summary + +| Feature | Native `license_plate` detecting Model (like Frigate+) | Secondary Pipeline (without native model or Frigate+) | +| ----------------------- | ------------------------------------------------------ | --------------------------------------------------------------- | +| License Plate Detection | Uses `license_plate` as a tracked object | Runs a dedicated LPR pipeline | +| FPS Setting | 5 (increase for fast-moving cars) | 5 (increase for fast-moving cars, but it may use much more CPU) | +| Object Detection | Standard Frigate+ detection applies | Bypasses standard object detection | +| Zones & Object Masks | Supported | Not supported | +| Debug View | May show `license_plate` bounding boxes | May **not** show `license_plate` bounding boxes | +| MQTT `frigate/events` | Publishes tracked object updates | Does **not** publish tracked object updates | +| Explore | Recognized plates available in More Filters | Recognized plates available in More Filters | + +By selecting the appropriate configuration, users can optimize their dedicated LPR cameras based on whether they are using a Frigate+ model or the secondary LPR pipeline. ### Best practices for using Dedicated LPR camera mode - Tune your motion detection and increase the `contour_area` until you see only larger motion boxes being created as cars pass through the frame (likely somewhere between 50-90 for a 1920x1080 detect stream). Increasing the `contour_area` filters out small areas of motion and will prevent excessive resource use from looking for license plates in frames that don't even have a car passing through it. - Disable the `improve_contrast` motion setting, especially if you are running LPR at night and the frame is mostly dark. This will prevent small pixel changes and smaller areas of motion from triggering license plate detection. - Ensure your camera's timestamp is covered with a motion mask so that it's not incorrectly detected as a license plate. -- While not strictly required, it may be beneficial to disable standard object detection on your dedicated LPR camera (`detect` --> `enabled: False`). If you've set the camera type to `"lpr"`, license plate detection will still be performed on the entire frame when motion occurs. -- If multiple tracked objects are being produced for the same license plate, you can tweak the `expire_time` to prevent plates from being expired from the view as quickly. -- You may need to change your camera settings for a clearer image or decrease your global `recognition_threshold` config if your plates are not being accurately recognized at night. +- For non-Frigate+ users, you may need to change your camera settings for a clearer image or decrease your global `recognition_threshold` config if your plates are not being accurately recognized at night. +- The secondary pipeline mode runs a local AI model on your CPU to detect plates. Increasing detect `fps` will increase CPU usage proportionally. ## FAQ diff --git a/frigate/camera/state.py b/frigate/camera/state.py index 65a3dcf5d..267c7d457 100644 --- a/frigate/camera/state.py +++ b/frigate/camera/state.py @@ -88,7 +88,9 @@ class CameraState: thickness = 1 else: thickness = 2 - color = self.config.model.colormap[obj["label"]] + color = self.config.model.colormap.get( + obj["label"], (255, 255, 255) + ) else: thickness = 1 color = (255, 0, 0) @@ -110,7 +112,9 @@ class CameraState: and obj["frame_time"] == frame_time ): thickness = 5 - color = self.config.model.colormap[obj["label"]] + color = self.config.model.colormap.get( + obj["label"], (255, 255, 255) + ) # debug autotracking zooming - show the zoom factor box if ( diff --git a/frigate/data_processing/common/license_plate/mixin.py b/frigate/data_processing/common/license_plate/mixin.py index 63285ac79..7f731704f 100644 --- a/frigate/data_processing/common/license_plate/mixin.py +++ b/frigate/data_processing/common/license_plate/mixin.py @@ -21,7 +21,6 @@ from frigate.comms.event_metadata_updater import ( EventMetadataPublisher, EventMetadataTypeEnum, ) -from frigate.config.camera.camera import CameraTypeEnum from frigate.const import CLIPS_DIR from frigate.embeddings.onnx.lpr_embedding import LPR_EMBEDDING_SIZE from frigate.util.builtin import EventsPerSecond @@ -972,7 +971,7 @@ class LicensePlateProcessingMixin: ( now, camera, - "car", + "license_plate", event_id, True, plate_score, @@ -994,9 +993,7 @@ class LicensePlateProcessingMixin: if not self.config.cameras[camera].lpr.enabled: return - if not dedicated_lpr and self.config.cameras[camera].type == CameraTypeEnum.lpr: - return - + # dedicated LPR cam without frigate+ if dedicated_lpr: id = "dedicated-lpr" @@ -1050,8 +1047,11 @@ class LicensePlateProcessingMixin: else: id = obj_data["id"] - # don't run for non car objects - if obj_data.get("label") != "car": + # don't run for non car or non license plate (dedicated lpr with frigate+) objects + if ( + obj_data.get("label") != "car" + and obj_data.get("label") != "license_plate" + ): logger.debug( f"{camera}: Not a processing license plate for non car object." ) @@ -1131,26 +1131,34 @@ class LicensePlateProcessingMixin: license_plate[0] : license_plate[2], ] else: - # don't run for object without attributes - if not obj_data.get("current_attributes"): + # don't run for object without attributes if this isn't dedicated lpr with frigate+ + if ( + not obj_data.get("current_attributes") + and obj_data.get("label") != "license_plate" + ): logger.debug(f"{camera}: No attributes to parse.") return - attributes: list[dict[str, any]] = obj_data.get( - "current_attributes", [] - ) - for attr in attributes: - if attr.get("label") != "license_plate": - continue + if obj_data.get("label") == "car": + attributes: list[dict[str, any]] = obj_data.get( + "current_attributes", [] + ) + for attr in attributes: + if attr.get("label") != "license_plate": + continue - if license_plate is None or attr.get( - "score", 0.0 - ) > license_plate.get("score", 0.0): - license_plate = attr + if license_plate is None or attr.get( + "score", 0.0 + ) > license_plate.get("score", 0.0): + license_plate = attr - # no license plates detected in this frame - if not license_plate: - return + # no license plates detected in this frame + if not license_plate: + return + + # we are using dedicated lpr with frigate+ + if obj_data.get("label") == "license_plate": + license_plate = obj_data license_plate_box = license_plate.get("box") @@ -1160,7 +1168,9 @@ class LicensePlateProcessingMixin: or area(license_plate_box) < self.config.cameras[obj_data["camera"]].lpr.min_area ): - logger.debug(f"{camera}: Invalid license plate box {license_plate}") + logger.debug( + f"{camera}: Area for license plate box {area(license_plate_box)} is less than min_area {self.config.cameras[obj_data['camera']].lpr.min_area}" + ) return license_plate_frame = cv2.cvtColor(frame, cv2.COLOR_YUV2BGR_I420) @@ -1239,8 +1249,11 @@ class LicensePlateProcessingMixin: ) return - # For LPR cameras, match or assign plate ID using Jaro-Winkler distance - if dedicated_lpr: + # For dedicated LPR cameras, match or assign plate ID using Jaro-Winkler distance + if ( + dedicated_lpr + and "license_plate" not in self.config.cameras[camera].objects.track + ): plate_id = None for existing_id, data in self.detected_license_plates.items(): @@ -1306,8 +1319,11 @@ class LicensePlateProcessingMixin: (id, top_plate, avg_confidence), ) - if dedicated_lpr: - # save the best snapshot + # save the best snapshot for dedicated lpr cams not using frigate+ + if ( + dedicated_lpr + and "license_plate" not in self.config.cameras[camera].objects.track + ): logger.debug( f"{camera}: Writing snapshot for {id}, {top_plate}, {current_time}" ) diff --git a/frigate/embeddings/maintainer.py b/frigate/embeddings/maintainer.py index eee8e0e96..be7843c9c 100644 --- a/frigate/embeddings/maintainer.py +++ b/frigate/embeddings/maintainer.py @@ -457,7 +457,11 @@ class EmbeddingMaintainer(threading.Thread): camera_config = self.config.cameras[camera] - if not camera_config.type == CameraTypeEnum.lpr: + if ( + camera_config.type != CameraTypeEnum.lpr + or "license_plate" in camera_config.objects.track + ): + # we're not a dedicated lpr camera or we are one but we're using frigate+ return try: diff --git a/frigate/track/tracked_object.py b/frigate/track/tracked_object.py index 7a4829c2a..978671512 100644 --- a/frigate/track/tracked_object.py +++ b/frigate/track/tracked_object.py @@ -442,7 +442,7 @@ class TrackedObject: if bounding_box: thickness = 2 - color = self.colormap[self.obj_data["label"]] + color = self.colormap.get(self.obj_data["label"], (255, 255, 255)) # draw the bounding boxes on the frame box = self.thumbnail_data["box"] diff --git a/frigate/video.py b/frigate/video.py index 91e92fee1..b14f8567c 100755 --- a/frigate/video.py +++ b/frigate/video.py @@ -15,6 +15,7 @@ from frigate.camera import CameraMetrics, PTZMetrics from frigate.comms.config_updater import ConfigSubscriber from frigate.comms.inter_process import InterProcessRequestor from frigate.config import CameraConfig, DetectConfig, ModelConfig +from frigate.config.camera.camera import CameraTypeEnum from frigate.const import ( CACHE_DIR, CACHE_SEGMENT_FORMAT, @@ -519,6 +520,7 @@ def track_camera( frame_queue, frame_shape, model_config, + config, config.detect, frame_manager, motion_detector, @@ -585,6 +587,7 @@ def process_frames( frame_queue: mp.Queue, frame_shape, model_config: ModelConfig, + camera_config: CameraConfig, detect_config: DetectConfig, frame_manager: FrameManager, motion_detector: MotionDetector, @@ -612,6 +615,29 @@ def process_frames( region_min_size = get_min_region_size(model_config) + attributes_map = model_config.attributes_map + all_attributes = model_config.all_attributes + + # remove license_plate from attributes if this camera is a dedicated LPR cam + if camera_config.type == CameraTypeEnum.lpr: + modified_attributes_map = model_config.attributes_map.copy() + + if ( + "car" in modified_attributes_map + and "license_plate" in modified_attributes_map["car"] + ): + modified_attributes_map["car"] = [ + attr + for attr in modified_attributes_map["car"] + if attr != "license_plate" + ] + + attributes_map = modified_attributes_map + + all_attributes = [ + attr for attr in model_config.all_attributes if attr != "license_plate" + ] + while not stop_event.is_set(): _, updated_enabled_config = enabled_config_subscriber.check_for_update() @@ -805,9 +831,7 @@ def process_frames( # if detection was run on this frame, consolidate if len(regions) > 0: tracked_detections = [ - d - for d in consolidated_detections - if d[0] not in model_config.all_attributes + d for d in consolidated_detections if d[0] not in all_attributes ] # now that we have refined our detections, we need to track objects object_tracker.match_and_update( @@ -819,7 +843,7 @@ def process_frames( # group the attribute detections based on what label they apply to attribute_detections: dict[str, list[TrackedObjectAttribute]] = {} - for label, attribute_labels in model_config.attributes_map.items(): + for label, attribute_labels in attributes_map.items(): attribute_detections[label] = [ TrackedObjectAttribute(d) for d in consolidated_detections @@ -836,8 +860,7 @@ def process_frames( for attributes in attribute_detections.values(): for attribute in attributes: filtered_objects = filter( - lambda o: attribute.label - in model_config.attributes_map.get(o["label"], []), + lambda o: attribute.label in attributes_map.get(o["label"], []), all_objects, ) selected_object_id = attribute.find_best_object(filtered_objects) @@ -885,7 +908,7 @@ def process_frames( for obj in object_tracker.tracked_objects.values(): if obj["frame_time"] == frame_time: thickness = 2 - color = model_config.colormap[obj["label"]] + color = model_config.colormap.get(obj["label"], (255, 255, 255)) else: thickness = 1 color = (255, 0, 0) From 7f3f62e46dcb96dfee7560108ee03e97cb8d04ff Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Sun, 30 Mar 2025 08:33:52 -0600 Subject: [PATCH 88/97] Clean up and clarify face docs (#17454) --- docs/docs/configuration/face_recognition.md | 24 ++++++++------------- docs/docs/configuration/reference.md | 4 ++-- 2 files changed, 11 insertions(+), 17 deletions(-) diff --git a/docs/docs/configuration/face_recognition.md b/docs/docs/configuration/face_recognition.md index d618dbba6..b894133fb 100644 --- a/docs/docs/configuration/face_recognition.md +++ b/docs/docs/configuration/face_recognition.md @@ -9,15 +9,9 @@ Face recognition identifies known individuals by matching detected faces with pr ### Face Detection -Users running a Frigate+ model (or any custom model that natively detects faces) should ensure that `face` is added to the [list of objects to track](../plus/#available-label-types) either globally or for a specific camera. This will allow face detection to run at the same time as object detection and be more efficient. +When running a Frigate+ model (or any custom model that natively detects faces) should ensure that `face` is added to the [list of objects to track](../plus/#available-label-types) either globally or for a specific camera. This will allow face detection to run at the same time as object detection and be more efficient. -Users without a model that detects faces can still run face recognition. Frigate uses a lightweight DNN face detection model that runs on the CPU. In this case, you should _not_ define `face` in your list of objects to track. - -:::note - -Frigate needs to first detect a `face` before it can recognize a face. - -::: +When running a default COCO model or another model that does not include `face` as a detectable label, face detection will run via CV2 using a lightweight DNN model that runs on the CPU. In this case, you should _not_ define `face` in your list of objects to track. ### Face Recognition @@ -26,7 +20,7 @@ Frigate has support for two face recognition model types: - **small**: Frigate will run a FaceNet embedding model to recognize faces, which runs locally on the CPU. This model is optimized for efficiency and is not as accurate. - **large**: Frigate will run a large ArcFace embedding model that is optimized for accuracy. It is only recommended to be run when an integrated or dedicated GPU is available. -In both cases a lightweight face landmark detection model is also used to align faces before running the recognition model. +In both cases, a lightweight face landmark detection model is also used to align faces before running recognition. ## Minimum System Requirements @@ -88,9 +82,9 @@ When choosing images to include in the face training set it is recommended to al - If it is difficult to make out details in a persons face it will not be helpful in training. - Avoid images with extreme under/over-exposure. - Avoid blurry / pixelated images. -- Avoid training on infrared (grayscale). The models are trained on color images and will be able to extract features from grayscale images. +- Avoid training on infrared (gray-scale). The models are trained on color images and will be able to extract features from gray-scale images. - Using images of people wearing hats / sunglasses may confuse the model. -- Do not upload too many similar images at the same time, it is recommended to train no more than 4-6 similar images for each person to avoid overfitting. +- Do not upload too many similar images at the same time, it is recommended to train no more than 4-6 similar images for each person to avoid over-fitting. ::: @@ -100,7 +94,7 @@ When first enabling face recognition it is important to build a foundation of st Then it is recommended to use the `Face Library` tab in Frigate to select and train images for each person as they are detected. When building a strong foundation it is strongly recommended to only train on images that are straight-on. Ignore images from cameras that recognize faces from an angle. -Aim to strike a balance between the quality of images while also having a range of conditions (day / night, different weather conditions, different times of day, etc.) in order to have diversity in the images used for each person and not have overfitting. +Aim to strike a balance between the quality of images while also having a range of conditions (day / night, different weather conditions, different times of day, etc.) in order to have diversity in the images used for each person and not have over-fitting. Once a person starts to be consistently recognized correctly on images that are straight-on, it is time to move on to the next step. @@ -112,11 +106,11 @@ Once straight-on images are performing well, start choosing slightly off-angle i ### Why can't I bulk upload photos? -It is important to methodically add photos to the library, bulk importing photos (especially from a general photo library) will lead to overfitting in that particular scenario and hurt recognition performance. +It is important to methodically add photos to the library, bulk importing photos (especially from a general photo library) will lead to over-fitting in that particular scenario and hurt recognition performance. ### Why do unknown people score similarly to known people? -This can happen for a few different reasons, but this is usually an indicator that the training set needs to be improved. This is often related to overfitting: +This can happen for a few different reasons, but this is usually an indicator that the training set needs to be improved. This is often related to over-fitting: - If you train with only a few images per person, especially if those images are very similar, the recognition model becomes overly specialized to those specific images. - When you provide images with different poses, lighting, and expressions, the algorithm extracts features that are consistent across those variations. @@ -124,4 +118,4 @@ This can happen for a few different reasons, but this is usually an indicator th ### I see scores above the threshold in the train tab, but a sub label wasn't assigned? -The Frigate considers the recognition scores across all recogntion attempts for each person object. The scores are continually weighted based on the area of the face, and a sub label will only be assigned to person if a person is confidently recognized consistently. This avoids cases where a single high confidence recognition would throw off the results. +The Frigate considers the recognition scores across all recognition attempts for each person object. The scores are continually weighted based on the area of the face, and a sub label will only be assigned to person if a person is confidently recognized consistently. This avoids cases where a single high confidence recognition would throw off the results. diff --git a/docs/docs/configuration/reference.md b/docs/docs/configuration/reference.md index 81ca1becc..26e51b616 100644 --- a/docs/docs/configuration/reference.md +++ b/docs/docs/configuration/reference.md @@ -556,8 +556,8 @@ face_recognition: recognition_threshold: 0.9 # Optional: Min area of detected face box to consider running face recognition (default: shown below) min_area: 500 - # Optional: Save images of recognized faces for training (default: shown below) - save_attempts: True + # Optional: Number of images of recognized faces to save for training (default: shown below) + save_attempts: 100 # Optional: Apply a blur quality filter to adjust confidence based on the blur level of the image (default: shown below) blur_confidence_filter: True From 1dd5007fa83ef1fb39fc5837295709aaa3ea8f57 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Mon, 31 Mar 2025 12:47:33 -0600 Subject: [PATCH 89/97] Update nvidia inference time docs (#17469) --- docs/docs/frigate/hardware.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/docs/frigate/hardware.md b/docs/docs/frigate/hardware.md index 62152996d..72d340682 100644 --- a/docs/docs/frigate/hardware.md +++ b/docs/docs/frigate/hardware.md @@ -135,6 +135,7 @@ Inference speeds will vary greatly depending on the GPU and the model used. | GTX 1660 SUPER | ~ 4 ms | | | | RTX 3050 | 5 - 7 ms | 320: ~ 10 ms 640: ~ 16 ms | 336: ~ 16 ms 560: ~ 40 ms | | RTX 3070 Mobile | ~ 5 ms | | | +| RTX 3070 | 4 - 6 ms | 320: ~ 6 ms 640: ~ 12 ms | 336: ~ 14 ms 560: ~ 36 ms | | Quadro P400 2GB | 20 - 25 ms | | | | Quadro P2000 | ~ 12 ms | | | From 207d1d280698a08d92a916fd2167d9c0170de246 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Mon, 31 Mar 2025 15:49:56 -0600 Subject: [PATCH 90/97] Face UI cleanup (#17472) * Add note * Sort by event id * Fix reprocess causing shift * Move event group to separate comp * Handle selecting events * implement event selection * Implement selected handler * handle right click * Toggle ctrl + a * Stop propogation * Fix --- docs/docs/configuration/face_recognition.md | 4 + frigate/data_processing/real_time/face.py | 52 ++-- web/src/hooks/use-contextmenu.ts | 1 + web/src/pages/FaceLibrary.tsx | 319 +++++++++++++------- 4 files changed, 255 insertions(+), 121 deletions(-) diff --git a/docs/docs/configuration/face_recognition.md b/docs/docs/configuration/face_recognition.md index b894133fb..fe6ed1f99 100644 --- a/docs/docs/configuration/face_recognition.md +++ b/docs/docs/configuration/face_recognition.md @@ -108,6 +108,10 @@ Once straight-on images are performing well, start choosing slightly off-angle i It is important to methodically add photos to the library, bulk importing photos (especially from a general photo library) will lead to over-fitting in that particular scenario and hurt recognition performance. +### Why can't I bulk reprocess faces? + +Face embedding models work by breaking apart faces into different features. This means that when reprocessing an image, only images from a similar angle will have its score affected. + ### Why do unknown people score similarly to known people? This can happen for a few different reasons, but this is usually an indicator that the training set needs to be improved. This is often related to over-fitting: diff --git a/frigate/data_processing/real_time/face.py b/frigate/data_processing/real_time/face.py index 5b20a6303..dd18aeffc 100644 --- a/frigate/data_processing/real_time/face.py +++ b/frigate/data_processing/real_time/face.py @@ -272,22 +272,9 @@ class FaceRealTimeProcessor(RealTimeProcessorApi): f"Detected best face for person as: {sub_label} with probability {score}" ) - if self.config.face_recognition.save_attempts: - # write face to library - folder = os.path.join(FACE_DIR, "train") - file = os.path.join(folder, f"{id}-{sub_label}-{score}-0.webp") - os.makedirs(folder, exist_ok=True) - cv2.imwrite(file, face_frame) - - files = sorted( - filter(lambda f: (f.endswith(".webp")), os.listdir(folder)), - key=lambda f: os.path.getctime(os.path.join(folder, f)), - reverse=True, - ) - - # delete oldest face image if maximum is reached - if len(files) > self.config.face_recognition.save_attempts: - os.unlink(os.path.join(folder, files[-1])) + self.write_face_attempt( + face_frame, id, datetime.datetime.now().timestamp(), sub_label, score + ) if id not in self.person_face_history: self.person_face_history[id] = [] @@ -383,9 +370,9 @@ class FaceRealTimeProcessor(RealTimeProcessorApi): } elif topic == EmbeddingsRequestEnum.reprocess_face.value: current_file: str = request_data["image_file"] - id = current_file[0 : current_file.index("-", current_file.index("-") + 1)] - face_score = current_file[current_file.rfind("-") : current_file.rfind(".")] + (id_time, id_rand, timestamp, _, _) = current_file.split("-") img = None + id = f"{id_time}-{id_rand}" if current_file: img = cv2.imread(current_file) @@ -411,7 +398,7 @@ class FaceRealTimeProcessor(RealTimeProcessorApi): folder = os.path.join(FACE_DIR, "train") os.makedirs(folder, exist_ok=True) new_file = os.path.join( - folder, f"{id}-{sub_label}-{score}-{face_score}.webp" + folder, f"{id}-{timestamp}-{sub_label}-{score}.webp" ) shutil.move(current_file, new_file) @@ -461,3 +448,30 @@ class FaceRealTimeProcessor(RealTimeProcessorApi): weighted_average = weighted_scores[best_name] / total_weights[best_name] return best_name, weighted_average + + def write_face_attempt( + self, + frame: np.ndarray, + event_id: str, + timestamp: float, + sub_label: str, + score: float, + ) -> None: + if self.config.face_recognition.save_attempts: + # write face to library + folder = os.path.join(FACE_DIR, "train") + file = os.path.join( + folder, f"{event_id}-{timestamp}-{sub_label}-{score}.webp" + ) + os.makedirs(folder, exist_ok=True) + cv2.imwrite(file, frame) + + files = sorted( + filter(lambda f: (f.endswith(".webp")), os.listdir(folder)), + key=lambda f: os.path.getctime(os.path.join(folder, f)), + reverse=True, + ) + + # delete oldest face image if maximum is reached + if len(files) > self.config.face_recognition.save_attempts: + os.unlink(os.path.join(folder, files[-1])) diff --git a/web/src/hooks/use-contextmenu.ts b/web/src/hooks/use-contextmenu.ts index f121846ae..21c03e353 100644 --- a/web/src/hooks/use-contextmenu.ts +++ b/web/src/hooks/use-contextmenu.ts @@ -33,6 +33,7 @@ export default function useContextMenu( }; } else { const context = (e: MouseEvent) => { + e.stopPropagation(); e.preventDefault(); callback(); }; diff --git a/web/src/pages/FaceLibrary.tsx b/web/src/pages/FaceLibrary.tsx index df57f729a..0b18f3d39 100644 --- a/web/src/pages/FaceLibrary.tsx +++ b/web/src/pages/FaceLibrary.tsx @@ -142,29 +142,33 @@ export default function FaceLibrary() { const [selectedFaces, setSelectedFaces] = useState([]); - const onClickFace = useCallback( - (imageId: string, ctrl: boolean) => { + const onClickFaces = useCallback( + (images: string[], ctrl: boolean) => { if (selectedFaces.length == 0 && !ctrl) { return; } - const index = selectedFaces.indexOf(imageId); + let newSelectedFaces = [...selectedFaces]; - if (index != -1) { - if (selectedFaces.length == 1) { - setSelectedFaces([]); + images.forEach((imageId) => { + const index = newSelectedFaces.indexOf(imageId); + + if (index != -1) { + if (selectedFaces.length == 1) { + newSelectedFaces = []; + } else { + const copy = [ + ...newSelectedFaces.slice(0, index), + ...newSelectedFaces.slice(index + 1), + ]; + newSelectedFaces = copy; + } } else { - const copy = [ - ...selectedFaces.slice(0, index), - ...selectedFaces.slice(index + 1), - ]; - setSelectedFaces(copy); + newSelectedFaces.push(imageId); } - } else { - const copy = [...selectedFaces]; - copy.push(imageId); - setSelectedFaces(copy); - } + }); + + setSelectedFaces(newSelectedFaces); }, [selectedFaces, setSelectedFaces], ); @@ -212,7 +216,11 @@ export default function FaceLibrary() { switch (key) { case "a": if (modifiers.ctrl) { - setSelectedFaces([...trainImages]); + if (selectedFaces.length) { + setSelectedFaces([]); + } else { + setSelectedFaces([...trainImages]); + } } break; case "Escape": @@ -253,6 +261,16 @@ export default function FaceLibrary() { /> {selectedFaces?.length > 0 ? (
+
+
{`${selectedFaces.length} selected`}
+
{"|"}
+
setSelectedFaces([])} + > + {t("button.unselect", { ns: "common" })} +
+
+ +
+ + + + + + + + {trainImages.length > 0 && ( + setPageToggle("train")} + > +
{t("train.title")}
+
+ ({trainImages.length}) +
+
+ )} + {trainImages.length > 0 && faces.length > 0 && ( <> - -
{t("train.title")}
-
-
|
+ +
+ Collections +
)} - {Object.values(faces).map((face) => ( - -
- {face} ({faceData?.[face].length}) +
setPageToggle(face)} + > + {face} + + ({faceData?.[face].length}) +
- + + ))} - - -
- - ) : ( - - - - - - {trainImages.length > 0 && ( - setPageToggle("train")} - > -
{t("train.title")}
-
- )} - {Object.values(faces).map((face) => ( - setPageToggle(face)} - > - {face} ({faceData?.[face].length}) - - ))} -
-
+
+
+ ); } From 19fc63e3af212fe80cf2249be96bb9a92b29692c Mon Sep 17 00:00:00 2001 From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> Date: Thu, 3 Apr 2025 12:34:19 -0500 Subject: [PATCH 97/97] Face Library UI tweaks (#17525) * install react-dropzone * use react-dropzone with preview when uploading new face * spacing consistency * text tweaks --- web/package-lock.json | 46 ++++++- web/package.json | 1 + web/public/locales/en/views/faceLibrary.json | 21 ++- web/src/components/input/ImageEntry.tsx | 121 +++++++++++++++--- web/src/components/input/TextEntry.tsx | 35 ++--- .../overlay/detail/FaceCreateWizardDialog.tsx | 18 ++- 6 files changed, 191 insertions(+), 51 deletions(-) diff --git a/web/package-lock.json b/web/package-lock.json index 986677695..ebcdba519 100644 --- a/web/package-lock.json +++ b/web/package-lock.json @@ -56,6 +56,7 @@ "react-day-picker": "^8.10.1", "react-device-detect": "^2.2.3", "react-dom": "^18.3.1", + "react-dropzone": "^14.3.8", "react-grid-layout": "^1.5.0", "react-hook-form": "^7.52.1", "react-i18next": "^15.2.0", @@ -3526,6 +3527,15 @@ "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==" }, + "node_modules/attr-accept": { + "version": "2.2.5", + "resolved": "https://registry.npmjs.org/attr-accept/-/attr-accept-2.2.5.tgz", + "integrity": "sha512-0bDNnY/u6pPwHDMoF0FieU354oBi0a8rD9FcsLwzcGWbc8KS8KPIi7y+s13OlVY+gMWc/9xEMUgNE6Qm8ZllYQ==", + "license": "MIT", + "engines": { + "node": ">=4" + } + }, "node_modules/autoprefixer": { "version": "10.4.20", "resolved": "https://registry.npmjs.org/autoprefixer/-/autoprefixer-10.4.20.tgz", @@ -5112,6 +5122,18 @@ "node": "^10.12.0 || >=12.0.0" } }, + "node_modules/file-selector": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/file-selector/-/file-selector-2.1.2.tgz", + "integrity": "sha512-QgXo+mXTe8ljeqUFaX3QVHc5osSItJ/Km+xpocx0aSqWGMSCf6qYs/VnzZgS864Pjn5iceMRFigeAV7AfTlaig==", + "license": "MIT", + "dependencies": { + "tslib": "^2.7.0" + }, + "engines": { + "node": ">= 12" + } + }, "node_modules/fill-range": { "version": "7.0.1", "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz", @@ -7221,6 +7243,23 @@ "node": ">=6" } }, + "node_modules/react-dropzone": { + "version": "14.3.8", + "resolved": "https://registry.npmjs.org/react-dropzone/-/react-dropzone-14.3.8.tgz", + "integrity": "sha512-sBgODnq+lcA4P296DY4wacOZz3JFpD99fp+hb//iBO2HHnyeZU3FwWyXJ6salNpqQdsZrgMrotuko/BdJMV8Ug==", + "license": "MIT", + "dependencies": { + "attr-accept": "^2.2.4", + "file-selector": "^2.1.0", + "prop-types": "^15.8.1" + }, + "engines": { + "node": ">= 10.13" + }, + "peerDependencies": { + "react": ">= 16.8 || 18.0.0" + } + }, "node_modules/react-grid-layout": { "version": "1.5.0", "resolved": "https://registry.npmjs.org/react-grid-layout/-/react-grid-layout-1.5.0.tgz", @@ -8548,9 +8587,10 @@ "integrity": "sha512-Y/arvbn+rrz3JCKl9C4kVNfTfSm2/mEp5FSz5EsZSANGPSlQrpRI5M4PKF+mJnE52jOO90PnPSc3Ur3bTQw0gA==" }, "node_modules/tslib": { - "version": "2.6.2", - "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.2.tgz", - "integrity": "sha512-AEYxH93jGFPn/a2iVAwW87VuUIkR1FVUKB77NwMF7nBTDkDrrT/Hpt/IrCJ0QXhW27jTBDcf5ZY7w6RiqTMw2Q==" + "version": "2.8.1", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.8.1.tgz", + "integrity": "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==", + "license": "0BSD" }, "node_modules/type-check": { "version": "0.4.0", diff --git a/web/package.json b/web/package.json index 37233a976..7bcffad79 100644 --- a/web/package.json +++ b/web/package.json @@ -62,6 +62,7 @@ "react-day-picker": "^8.10.1", "react-device-detect": "^2.2.3", "react-dom": "^18.3.1", + "react-dropzone": "^14.3.8", "react-grid-layout": "^1.5.0", "react-hook-form": "^7.52.1", "react-i18next": "^15.2.0", diff --git a/web/public/locales/en/views/faceLibrary.json b/web/public/locales/en/views/faceLibrary.json index 0a4444ae5..ee3dc2c29 100644 --- a/web/public/locales/en/views/faceLibrary.json +++ b/web/public/locales/en/views/faceLibrary.json @@ -1,6 +1,7 @@ { "description": { - "addFace": "Walk through adding a new face to the Face Library." + "addFace": "Walk through adding a new collection to the Face Library.", + "placeholder": "Enter a name for this collection" }, "details": { "person": "Person", @@ -15,10 +16,10 @@ "desc": "Upload an image to scan for faces and include for {{pageToggle}}" }, "createFaceLibrary": { - "title": "Create Face Library", - "desc": "Create a new face library", + "title": "Create Collection", + "desc": "Create a new collection", "new": "Create New Face", - "nextSteps": "It is recommended to use the Train tab to select and train images for each person as they are detected. When building a strong foundation it is strongly recommended to only train on images that are straight-on. Ignore images from cameras that recognize faces from an angle." + "nextSteps": "To build a strong foundation:
  • Use the Train tab to select and train on images for each detected person.
  • Focus on straight-on images for best results; avoid training images that capture faces at an angle.
  • " }, "train": { "title": "Train", @@ -28,7 +29,7 @@ "selectFace": "Select Face", "deleteFaceLibrary": { "title": "Delete Name", - "desc": "Are you sure you want to delete {{name}}? This will permanently delete all associated faces." + "desc": "Are you sure you want to delete the collection {{name}}? This will permanently delete all associated faces." }, "button": { "deleteFaceAttempts": "Delete Face Attempts", @@ -36,7 +37,15 @@ "uploadImage": "Upload Image", "reprocessFace": "Reprocess Face" }, - "readTheDocs": "Read the documentation to view more details on refining images for the Face Library", + "imageEntry": { + "validation": { + "selectImage": "Please select an image file." + }, + "dropActive": "Drop the image here...", + "dropInstructions": "Drag and drop an image here, or click to select", + "maxSize": "Max size: {{size}}MB" + }, + "readTheDocs": "Read the documentation", "trainFaceAs": "Train Face as:", "trainFace": "Train Face", "toast": { diff --git a/web/src/components/input/ImageEntry.tsx b/web/src/components/input/ImageEntry.tsx index afb399177..1e64840be 100644 --- a/web/src/components/input/ImageEntry.tsx +++ b/web/src/components/input/ImageEntry.tsx @@ -1,38 +1,82 @@ -import { Form, FormControl, FormField, FormItem } from "@/components/ui/form"; -import { Input } from "@/components/ui/input"; +import { Button } from "@/components/ui/button"; +import { + Form, + FormControl, + FormField, + FormItem, + FormMessage, +} from "@/components/ui/form"; +import { cn } from "@/lib/utils"; import { zodResolver } from "@hookform/resolvers/zod"; -import React, { useCallback } from "react"; +import { useCallback, useState } from "react"; +import { useDropzone } from "react-dropzone"; import { useForm } from "react-hook-form"; - +import { useTranslation } from "react-i18next"; +import { LuUpload, LuX } from "react-icons/lu"; import { z } from "zod"; type ImageEntryProps = { onSave: (file: File) => void; children?: React.ReactNode; + maxSize?: number; + accept?: Record; }; -export default function ImageEntry({ onSave, children }: ImageEntryProps) { + +export default function ImageEntry({ + onSave, + children, + maxSize = 10 * 1024 * 1024, // 10MB default + accept = { "image/*": [".jpeg", ".jpg", ".png", ".gif", ".webp"] }, +}: ImageEntryProps) { + const { t } = useTranslation(["views/faceLibrary"]); + const [preview, setPreview] = useState(null); + const formSchema = z.object({ - file: z.instanceof(FileList, { message: "Please select an image file." }), + file: z.instanceof(File, { message: "Please select an image file." }), }); const form = useForm>({ resolver: zodResolver(formSchema), }); - const fileRef = form.register("file"); - // upload handler + const onDrop = useCallback( + (acceptedFiles: File[]) => { + if (acceptedFiles.length > 0) { + const file = acceptedFiles[0]; + form.setValue("file", file, { shouldValidate: true }); + + // Create preview + const objectUrl = URL.createObjectURL(file); + setPreview(objectUrl); + + // Clean up preview URL when component unmounts + return () => URL.revokeObjectURL(objectUrl); + } + }, + [form], + ); + + const { getRootProps, getInputProps, isDragActive, isDragReject } = + useDropzone({ + onDrop, + maxSize, + accept, + multiple: false, + }); const onSubmit = useCallback( (data: z.infer) => { - if (!data["file"] || Object.keys(data.file).length == 0) { - return; - } - - onSave(data["file"]["0"]); + if (!data.file) return; + onSave(data.file); }, [onSave], ); + const clearSelection = () => { + form.reset(); + setPreview(null); + }; + return (
    @@ -42,16 +86,55 @@ export default function ImageEntry({ onSave, children }: ImageEntryProps) { render={() => ( - +
    + {!preview ? ( +
    + + +

    + {isDragActive + ? t("imageEntry.dropActive") + : t("imageEntry.dropInstructions")} +

    +

    + {t("imageEntry.maxSize", { + size: Math.round(maxSize / (1024 * 1024)), + })} +

    +
    + ) : ( +
    + Preview + +
    + )} +
    +
    )} /> - {children} +
    {children}
    ); diff --git a/web/src/components/input/TextEntry.tsx b/web/src/components/input/TextEntry.tsx index c9fa8a8a9..92867f5e3 100644 --- a/web/src/components/input/TextEntry.tsx +++ b/web/src/components/input/TextEntry.tsx @@ -1,4 +1,10 @@ -import { Form, FormControl, FormField, FormItem } from "@/components/ui/form"; +import { + Form, + FormControl, + FormField, + FormItem, + FormMessage, +} from "@/components/ui/form"; import { Input } from "@/components/ui/input"; import { zodResolver } from "@hookform/resolvers/zod"; import React, { useCallback } from "react"; @@ -14,50 +20,47 @@ type TextEntryProps = { children?: React.ReactNode; }; export default function TextEntry({ - defaultValue, + defaultValue = "", placeholder, - allowEmpty, + allowEmpty = false, onSave, children, }: TextEntryProps) { const formSchema = z.object({ - text: z.string(), + text: allowEmpty + ? z.string().optional() + : z.string().min(1, "Field is required"), }); const form = useForm>({ resolver: zodResolver(formSchema), defaultValues: { text: defaultValue }, }); - const fileRef = form.register("text"); - - // upload handler const onSubmit = useCallback( (data: z.infer) => { - if (!allowEmpty && !data["text"]) { - return; - } - onSave(data["text"]); + onSave(data.text || ""); }, - [onSave, allowEmpty], + [onSave], ); return (
    - + ( + render={({ field }) => ( + )} /> diff --git a/web/src/components/overlay/detail/FaceCreateWizardDialog.tsx b/web/src/components/overlay/detail/FaceCreateWizardDialog.tsx index 659ac4c88..00e4b5c5f 100644 --- a/web/src/components/overlay/detail/FaceCreateWizardDialog.tsx +++ b/web/src/components/overlay/detail/FaceCreateWizardDialog.tsx @@ -20,7 +20,7 @@ import { cn } from "@/lib/utils"; import axios from "axios"; import { useCallback, useState } from "react"; import { isDesktop } from "react-device-detect"; -import { useTranslation } from "react-i18next"; +import { Trans, useTranslation } from "react-i18next"; import { LuExternalLink } from "react-icons/lu"; import { Link } from "react-router-dom"; import { toast } from "sonner"; @@ -101,7 +101,7 @@ export default function CreateFaceWizardDialog({ }} >
    {t("button.addFace")} @@ -110,7 +110,7 @@ export default function CreateFaceWizardDialog({ {step == 0 && ( { setName(name); setStep(1); @@ -133,12 +133,16 @@ export default function CreateFaceWizardDialog({ )} {step == 2 && ( -
    +
    {t("toast.success.addFaceLibrary", { name })} -

    - {t("createFaceLibrary.nextSteps")} +

    +

      + + createFaceLibrary.nextSteps + +

    -
    +