From 21e9b2f2ce918d8248f0bed8c3c8130d87392777 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Wed, 30 Jul 2025 07:06:39 -0600 Subject: [PATCH 001/144] Add docs for planning a setup (#19326) * Add docs for planning a setup * Add more granularity * Improve title * Add storage section * Fix level * Change named hardware * link to section Co-authored-by: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> --------- Co-authored-by: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> --- docs/docs/frigate/planning_setup.md | 74 +++++++++++++++++++++++++++++ docs/sidebars.ts | 1 + 2 files changed, 75 insertions(+) create mode 100644 docs/docs/frigate/planning_setup.md diff --git a/docs/docs/frigate/planning_setup.md b/docs/docs/frigate/planning_setup.md new file mode 100644 index 000000000..cddd50265 --- /dev/null +++ b/docs/docs/frigate/planning_setup.md @@ -0,0 +1,74 @@ +--- +id: planning_setup +title: Planning a New Installation +--- + +Choosing the right hardware for your Frigate NVR setup is important for optimal performance and a smooth experience. This guide will walk you through the key considerations, focusing on the number of cameras and the hardware required for efficient object detection. + +## Key Considerations + +### Number of Cameras and Simultaneous Activity + +The most fundamental factor in your hardware decision is the number of cameras you plan to use. However, it's not just about the raw count; it's also about how many of those cameras are likely to see activity and require object detection simultaneously. + +When motion is detected in a camera's feed, regions of that frame are sent to your chosen [object detection hardware](/configuration/object_detectors). + +- **Low Simultaneous Activity (1-6 cameras with occasional motion)**: If you have a few cameras in areas with infrequent activity (e.g., a seldom-used backyard, a quiet interior), the demand on your object detection hardware will be lower. A single, entry-level AI accelerator will suffice. +- **Moderate Simultaneous Activity (6-12 cameras with some overlapping motion)**: For setups with more cameras, especially in areas like a busy street or a property with multiple access points, it's more likely that several cameras will capture activity at the same time. This increases the load on your object detection hardware, requiring more processing power. +- **High Simultaneous Activity (12+ cameras or highly active zones)**: Large installations or scenarios where many cameras frequently capture activity (e.g., busy street with overview, identification, dedicated LPR cameras, etc.) will necessitate robust object detection capabilities. You'll likely need multiple entry-level AI accelerators or a more powerful single unit such as a discrete GPU. +- **Commercial Installations (40+ cameras)**: Commercial installations or scenarios where a substantial number of cameras capture activity (e.g., a commercial property, an active public space) will necessitate robust object detection capabilities. You'll likely need a modern discrete GPU. + +### Video Decoding + +Modern CPUs with integrated GPUs (Intel Quick Sync, AMD VCN) or dedicated GPUs can significantly offload video decoding from the main CPU, freeing up resources. This is highly recommended, especially for multiple cameras. + +:::tip + +For commercial installations it is important to verify the number of supported concurrent streams on your GPU, many consumer GPUs max out at ~20 concurrent camera streams. + +::: + +## Hardware Considerations + +### Object Detection + +There are many different hardware options for object detection depending on priorities and available hardware. See [the recommended hardware page](./hardware.md#detectors) for more specifics on what hardware is recommended for object detection. + +### Storage + +Storage is an important consideration when planning a new installation. To get a more precise estimate of your storage requirements, you can use an IP camera storage calculator. Websites like [IPConfigure Storage Calculator](https://calculator.ipconfigure.com/) can help you determine the necessary disk space based on your camera settings. + + +#### SSDs (Solid State Drives) + +SSDs are an excellent choice for Frigate, offering high speed and responsiveness. The older concern that SSDs would quickly "wear out" from constant video recording is largely no longer valid for modern consumer and enterprise-grade SSDs. + +- Longevity: Modern SSDs are designed with advanced wear-leveling algorithms and significantly higher "Terabytes Written" (TBW) ratings than earlier models. For typical home NVR use, a good quality SSD will likely outlast the useful life of your NVR hardware itself. +- Performance: SSDs excel at handling the numerous small write operations that occur during continuous video recording and can significantly improve the responsiveness of the Frigate UI and clip retrieval. +- Silence and Efficiency: SSDs produce no noise and consume less power than traditional HDDs. + +#### HDDs (Hard Disk Drives) + +Traditional Hard Disk Drives (HDDs) remain a great and often more cost-effective option for long-term video storage, especially for larger setups where raw capacity is prioritized. + +- Cost-Effectiveness: HDDs offer the best cost per gigabyte, making them ideal for storing many days, weeks, or months of continuous footage. +- Capacity: HDDs are available in much larger capacities than most consumer SSDs, which is beneficial for extensive video archives. +- NVR-Rated Drives: If choosing an HDD, consider drives specifically designed for surveillance (NVR) use, such as Western Digital Purple or Seagate SkyHawk. These drives are engineered for 24/7 operation and continuous write workloads, offering improved reliability compared to standard desktop drives. + +Determining Your Storage Needs +The amount of storage you need will depend on several factors: + +- Number of Cameras: More cameras naturally require more space. +- Resolution and Framerate: Higher resolution (e.g., 4K) and higher framerate (e.g., 30fps) streams consume significantly more storage. +- Recording Method: Continuous recording uses the most space. motion-only recording or object-triggered recording can save space, but may miss some footage. +- Retention Period: How many days, weeks, or months of footage do you want to keep? + +#### Network Storage (NFS/SMB) + +While supported, using network-attached storage (NAS) for recordings can introduce latency and network dependency considerations. For optimal performance and reliability, it is generally recommended to have local storage for your Frigate recordings. If using a NAS, ensure your network connection to it is robust and fast (Gigabit Ethernet at minimum) and that the NAS itself can handle the continuous write load. + +### RAM (Memory) + +- **Basic Minimum: 4GB RAM**: This is generally sufficient for a very basic Frigate setup with a few cameras and a dedicated object detection accelerator, without running any enrichments. Performance might be tight, especially with higher resolution streams or numerous detections. +- **Minimum for Enrichments: 8GB RAM**: If you plan to utilize Frigate's enrichment features (e.g., facial recognition, license plate recognition, or other AI models that run alongside standard object detection), 8GB of RAM should be considered the minimum. Enrichments require additional memory to load and process their respective models and data. +- **Recommended: 16GB RAM**: For most users, especially those with many cameras (8+) or who plan to heavily leverage enrichments, 16GB of RAM is highly recommended. This provides ample headroom for smooth operation, reduces the likelihood of swapping to disk (which can impact performance), and allows for future expansion. \ No newline at end of file diff --git a/docs/sidebars.ts b/docs/sidebars.ts index 7007bb0f0..20ae2b0ac 100644 --- a/docs/sidebars.ts +++ b/docs/sidebars.ts @@ -7,6 +7,7 @@ const sidebars: SidebarsConfig = { Frigate: [ 'frigate/index', 'frigate/hardware', + 'frigate/planning_setup', 'frigate/installation', 'frigate/updating', 'frigate/camera_setup', From b5067c07f859bdfd30a844b31c9a4e2fc455d321 Mon Sep 17 00:00:00 2001 From: boc-the-git <3479092+boc-the-git@users.noreply.github.com> Date: Fri, 1 Aug 2025 21:51:18 +1000 Subject: [PATCH 002/144] Remove deprecated 'version' attribute (#19347) --- docs/docs/frigate/installation.md | 1 - 1 file changed, 1 deletion(-) diff --git a/docs/docs/frigate/installation.md b/docs/docs/frigate/installation.md index 2c139d2bd..917913528 100644 --- a/docs/docs/frigate/installation.md +++ b/docs/docs/frigate/installation.md @@ -187,7 +187,6 @@ Next, you should configure [hardware object detection](/configuration/object_det Running in Docker with compose is the recommended install method. ```yaml -version: "3.9" services: frigate: container_name: frigate From 369e6ba2c2ee3a3254b4d0e1a619150a46f42662 Mon Sep 17 00:00:00 2001 From: Hosted Weblate Date: Tue, 5 Aug 2025 17:23:59 +0200 Subject: [PATCH 003/144] Translated using Weblate (Lithuanian) Currently translated at 7.8% (9 of 114 strings) Translated using Weblate (Lithuanian) Currently translated at 2.2% (8 of 352 strings) Translated using Weblate (Lithuanian) Currently translated at 20.8% (10 of 48 strings) Translated using Weblate (Lithuanian) Currently translated at 8.7% (7 of 80 strings) Translated using Weblate (Lithuanian) Currently translated at 11.2% (7 of 62 strings) Translated using Weblate (Lithuanian) Currently translated at 5.2% (6 of 115 strings) Translated using Weblate (Lithuanian) Currently translated at 13.6% (9 of 66 strings) Translated using Weblate (Lithuanian) Currently translated at 18.0% (9 of 50 strings) Translated using Weblate (Lithuanian) Currently translated at 21.7% (10 of 46 strings) Translated using Weblate (Lithuanian) Currently translated at 8.4% (36 of 427 strings) Co-authored-by: Hosted Weblate Co-authored-by: Povilas Arlauskas Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/audio/lt/ Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/components-camera/lt/ Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/components-dialog/lt/ Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/components-filter/lt/ Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/views-explore/lt/ Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/views-facelibrary/lt/ Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/views-live/lt/ Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/views-search/lt/ Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/views-settings/lt/ Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/views-system/lt/ Translation: Frigate NVR/audio Translation: Frigate NVR/components-camera Translation: Frigate NVR/components-dialog Translation: Frigate NVR/components-filter Translation: Frigate NVR/views-explore Translation: Frigate NVR/views-facelibrary Translation: Frigate NVR/views-live Translation: Frigate NVR/views-search Translation: Frigate NVR/views-settings Translation: Frigate NVR/views-system --- web/public/locales/lt/audio.json | 9 ++++++++- web/public/locales/lt/components/camera.json | 17 ++++++++++++++++- web/public/locales/lt/components/dialog.json | 18 +++++++++++++++++- web/public/locales/lt/components/filter.json | 12 +++++++++++- web/public/locales/lt/views/explore.json | 11 ++++++++++- web/public/locales/lt/views/faceLibrary.json | 6 ++++++ web/public/locales/lt/views/live.json | 10 +++++++++- web/public/locales/lt/views/search.json | 16 +++++++++++++++- web/public/locales/lt/views/settings.json | 8 +++++++- web/public/locales/lt/views/system.json | 15 ++++++++++++++- 10 files changed, 113 insertions(+), 9 deletions(-) diff --git a/web/public/locales/lt/audio.json b/web/public/locales/lt/audio.json index cbdaff5e4..7f9bbc8a4 100644 --- a/web/public/locales/lt/audio.json +++ b/web/public/locales/lt/audio.json @@ -27,5 +27,12 @@ "vehicle": "Mašina", "animal": "Gyvūnas", "bark": "Lojimas", - "goat": "Ožka" + "goat": "Ožka", + "bellow": "Apačioje", + "whoop": "Rėkavimas", + "whispering": "Šnabždėjimas", + "laughter": "Juokas", + "snicker": "Kikenimas", + "crying": "Verkimas", + "singing": "Dainavimas" } diff --git a/web/public/locales/lt/components/camera.json b/web/public/locales/lt/components/camera.json index 92dd1acac..11639ade2 100644 --- a/web/public/locales/lt/components/camera.json +++ b/web/public/locales/lt/components/camera.json @@ -2,6 +2,21 @@ "group": { "label": "Kamerų Grupės", "add": "Sukurti Kamerų Grupę", - "edit": "Modifikuoti Kamerų Grupę" + "edit": "Modifikuoti Kamerų Grupę", + "delete": { + "label": "Ištrinti Kamerų Grupę", + "confirm": { + "title": "Patvirtinti ištrynimą", + "desc": "Ar tikrai norite ištrinti šią kamerų grupę {{name}}?" + } + }, + "name": { + "label": "Pavadinimas", + "placeholder": "Įveskite pavadinimą…", + "errorMessage": { + "mustLeastCharacters": "Kamerų grupės pavadinimas turi būti bent 2 simbolių.", + "exists": "Kamerų grupės pavadinimas jau egzistuoja." + } + } } } diff --git a/web/public/locales/lt/components/dialog.json b/web/public/locales/lt/components/dialog.json index 67ce6dd28..4feb8d583 100644 --- a/web/public/locales/lt/components/dialog.json +++ b/web/public/locales/lt/components/dialog.json @@ -3,7 +3,23 @@ "title": "Ar įsitikinę kad norite perkrauti Frigate?", "button": "Perkrauti", "restarting": { - "title": "Frigate Persikrauna" + "title": "Frigate Persikrauna", + "content": "Šis puslapis persikraus už {{countdown}} sekundžių.", + "button": "Priverstinai Perkrauti Dabar" + } + }, + "explore": { + "plus": { + "review": { + "question": { + "ask_a": "Ar šis objektas yra {{label}}?", + "ask_an": "Ar šis objektas yra {{label}}?", + "label": "Patvirtinti šią etiketę į Frigate Plus" + } + }, + "submitToPlus": { + "label": "Pateiktį į Frigate+" + } } } } diff --git a/web/public/locales/lt/components/filter.json b/web/public/locales/lt/components/filter.json index 9f536682e..fff4ed16b 100644 --- a/web/public/locales/lt/components/filter.json +++ b/web/public/locales/lt/components/filter.json @@ -3,7 +3,17 @@ "labels": { "label": "Etiketės", "all": { - "title": "Visos Etiketės" + "title": "Visos Etiketės", + "short": "Etiketės" + }, + "count_one": "{{count}} Etiketė", + "count_other": "{{count}} Etiketės" + }, + "zones": { + "label": "Zonos", + "all": { + "title": "Visos Zonos", + "short": "Zonos" } } } diff --git a/web/public/locales/lt/views/explore.json b/web/public/locales/lt/views/explore.json index 6c1a674a3..0681c40c7 100644 --- a/web/public/locales/lt/views/explore.json +++ b/web/public/locales/lt/views/explore.json @@ -1,5 +1,14 @@ { "documentTitle": "Tyrinėti - Frigate", "generativeAI": "Generatyvinis DI", - "exploreMore": "Apžvelgti daugiau {{label}} objektų" + "exploreMore": "Apžvelgti daugiau {{label}} objektų", + "exploreIsUnavailable": { + "embeddingsReindexing": { + "startingUp": "Paleidžiama…", + "estimatedTime": "Apytikris likęs laikas:" + } + }, + "details": { + "timestamp": "Laiko žyma" + } } diff --git a/web/public/locales/lt/views/faceLibrary.json b/web/public/locales/lt/views/faceLibrary.json index 661af3f69..d4dce21f3 100644 --- a/web/public/locales/lt/views/faceLibrary.json +++ b/web/public/locales/lt/views/faceLibrary.json @@ -3,5 +3,11 @@ "addFace": "Apžiūrėkite naujų kolekcijų pridėjimą prie Veidų Bibliotekos.", "placeholder": "Įveskite pavadinimą šiai kolekcijai", "invalidName": "Netinkamas vardas. Vardai gali turėti tik raides, numerius, tarpus, apostrofus, pabraukimus ir brukšnelius." + }, + "details": { + "person": "Žmogus", + "face": "Veido detelės", + "timestamp": "Laiko žyma", + "unknown": "Nežinoma" } } diff --git a/web/public/locales/lt/views/live.json b/web/public/locales/lt/views/live.json index aba6862ad..5779ff4c9 100644 --- a/web/public/locales/lt/views/live.json +++ b/web/public/locales/lt/views/live.json @@ -1,5 +1,13 @@ { "documentTitle": "Gyvai - Frigate", "documentTitle.withCamera": "{{camera}} - Tiesiogiai - Frigate", - "lowBandwidthMode": "Mažo-pralaidumo Rėžimas" + "lowBandwidthMode": "Mažo-pralaidumo Rėžimas", + "cameraAudio": { + "enable": "Įgalinti Kamerų Garsą", + "disable": "Išjungti Kamerų Garsą" + }, + "twoWayTalk": { + "enable": "Įgalinti Dvipusį Pokalbį", + "disable": "Išjungti Dvipusį Pokalbį" + } } diff --git a/web/public/locales/lt/views/search.json b/web/public/locales/lt/views/search.json index be1c5ef31..d970b3d2d 100644 --- a/web/public/locales/lt/views/search.json +++ b/web/public/locales/lt/views/search.json @@ -1,4 +1,18 @@ { "search": "Paieška", - "savedSearches": "Išsaugotos Paieškos" + "savedSearches": "Išsaugotos Paieškos", + "searchFor": "Ieškoti {{inputValue}}", + "button": { + "clear": "Išvalyti paiešką", + "save": "Išsaugoti paiešką", + "delete": "Ištrinti išsaugotą paiešką", + "filterInformation": "Filtruoti informaciją", + "filterActive": "Aktyvūs filtrai" + }, + "trackedObjectId": "Sekamo Objekto ID", + "filter": { + "label": { + "cameras": "Kameros" + } + } } diff --git a/web/public/locales/lt/views/settings.json b/web/public/locales/lt/views/settings.json index 1325b5155..15a9e53c7 100644 --- a/web/public/locales/lt/views/settings.json +++ b/web/public/locales/lt/views/settings.json @@ -1,6 +1,12 @@ { "documentTitle": { "default": "Nustatymai - Frigate", - "authentication": "Autentifikavimo Nustatymai - Frigate" + "authentication": "Autentifikavimo Nustatymai - Frigate", + "camera": "Kameros Nustatymai - Frigate", + "object": "Derinti - Frigate", + "general": "Bendrieji Nustatymai - Frigate", + "frigatePlus": "Frigate+ Nustatymai - Frigate", + "notifications": "Pranešimų Nustatymai - Frigate", + "motionTuner": "Judesio Derinimas - Frigate" } } diff --git a/web/public/locales/lt/views/system.json b/web/public/locales/lt/views/system.json index affc4e658..fb9784cf7 100644 --- a/web/public/locales/lt/views/system.json +++ b/web/public/locales/lt/views/system.json @@ -1,6 +1,19 @@ { "documentTitle": { "cameras": "Kamerų Statistika - Frigate", - "storage": "Saugyklos Statistika - Frigate" + "storage": "Saugyklos Statistika - Frigate", + "logs": { + "frigate": "Frigate Žurnalas - Frigate", + "go2rtc": "Go2RTC Žurnalas - Frigate", + "nginx": "Nginx Žurnalas - Frigate" + }, + "general": "Bendroji Statistika - Frigate" + }, + "title": "Sistema", + "metrics": "Sistemos metrikos", + "logs": { + "download": { + "label": "Parsisiųsti Žurnalą" + } } } From 99f9c1529dea885a5d927c3ff9f880d851be47ed Mon Sep 17 00:00:00 2001 From: Hosted Weblate Date: Tue, 5 Aug 2025 17:23:59 +0200 Subject: [PATCH 004/144] Translated using Weblate (Portuguese (Brazil)) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently translated at 85.9% (98 of 114 strings) Translated using Weblate (Portuguese (Brazil)) Currently translated at 27.8% (98 of 352 strings) Translated using Weblate (Portuguese (Brazil)) Currently translated at 100.0% (62 of 62 strings) Translated using Weblate (Portuguese (Brazil)) Currently translated at 85.2% (98 of 115 strings) Translated using Weblate (Portuguese (Brazil)) Currently translated at 83.8% (99 of 118 strings) Translated using Weblate (Portuguese (Brazil)) Currently translated at 54.0% (99 of 183 strings) Translated using Weblate (Portuguese (Brazil)) Currently translated at 27.4% (117 of 427 strings) Update translation files Updated by "Squash Git commits" add-on in Weblate. Update translation files Updated by "Squash Git commits" add-on in Weblate. Translated using Weblate (Portuguese (Brazil)) Currently translated at 76.3% (87 of 114 strings) Translated using Weblate (Portuguese (Brazil)) Currently translated at 24.7% (87 of 352 strings) Translated using Weblate (Portuguese (Brazil)) Currently translated at 100.0% (80 of 80 strings) Translated using Weblate (Portuguese (Brazil)) Currently translated at 76.5% (88 of 115 strings) Translated using Weblate (Portuguese (Brazil)) Currently translated at 48.6% (89 of 183 strings) Translated using Weblate (Portuguese (Brazil)) Currently translated at 65.7% (75 of 114 strings) Translated using Weblate (Portuguese (Brazil)) Currently translated at 65.7% (75 of 114 strings) Translated using Weblate (Portuguese (Brazil)) Currently translated at 21.3% (75 of 352 strings) Translated using Weblate (Portuguese (Brazil)) Currently translated at 100.0% (48 of 48 strings) Translated using Weblate (Portuguese (Brazil)) Currently translated at 93.7% (75 of 80 strings) Translated using Weblate (Portuguese (Brazil)) Currently translated at 98.3% (61 of 62 strings) Translated using Weblate (Portuguese (Brazil)) Currently translated at 98.3% (61 of 62 strings) Translated using Weblate (Portuguese (Brazil)) Currently translated at 66.0% (76 of 115 strings) Translated using Weblate (Portuguese (Brazil)) Currently translated at 100.0% (66 of 66 strings) Translated using Weblate (Portuguese (Brazil)) Currently translated at 100.0% (66 of 66 strings) Translated using Weblate (Portuguese (Brazil)) Currently translated at 100.0% (50 of 50 strings) Translated using Weblate (Portuguese (Brazil)) Currently translated at 100.0% (46 of 46 strings) Translated using Weblate (Portuguese (Brazil)) Currently translated at 65.2% (77 of 118 strings) Translated using Weblate (Portuguese (Brazil)) Currently translated at 41.5% (76 of 183 strings) Translated using Weblate (Portuguese (Brazil)) Currently translated at 20.6% (88 of 427 strings) Co-authored-by: Hosted Weblate Co-authored-by: Marcelo Popper Costa Co-authored-by: Rogério Mendes Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/audio/pt_BR/ Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/common/ Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/common/pt_BR/ Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/components-camera/pt_BR/ Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/components-dialog/pt_BR/ Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/components-filter/pt_BR/ Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/objects/pt_BR/ Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/views-explore/pt_BR/ Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/views-facelibrary/pt_BR/ Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/views-live/pt_BR/ Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/views-search/pt_BR/ Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/views-settings/pt_BR/ Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/views-system/pt_BR/ Translation: Frigate NVR/audio Translation: Frigate NVR/common Translation: Frigate NVR/components-camera Translation: Frigate NVR/components-dialog Translation: Frigate NVR/components-filter Translation: Frigate NVR/objects Translation: Frigate NVR/views-explore Translation: Frigate NVR/views-facelibrary Translation: Frigate NVR/views-live Translation: Frigate NVR/views-search Translation: Frigate NVR/views-settings Translation: Frigate NVR/views-system --- web/public/locales/pt-BR/audio.json | 66 ++++++++++- web/public/locales/pt-BR/common.json | 84 +++++++++++++- .../locales/pt-BR/components/camera.json | 4 +- .../locales/pt-BR/components/dialog.json | 10 +- .../locales/pt-BR/components/filter.json | 48 +++++++- web/public/locales/pt-BR/objects.json | 60 +++++++++- web/public/locales/pt-BR/views/explore.json | 105 +++++++++++++++++- .../locales/pt-BR/views/faceLibrary.json | 28 ++++- web/public/locales/pt-BR/views/live.json | 62 ++++++++++- web/public/locales/pt-BR/views/search.json | 7 +- web/public/locales/pt-BR/views/settings.json | 71 +++++++++++- web/public/locales/pt-BR/views/system.json | 83 +++++++++++++- 12 files changed, 607 insertions(+), 21 deletions(-) diff --git a/web/public/locales/pt-BR/audio.json b/web/public/locales/pt-BR/audio.json index dba654e5c..5e7560320 100644 --- a/web/public/locales/pt-BR/audio.json +++ b/web/public/locales/pt-BR/audio.json @@ -51,5 +51,69 @@ "gargling": "Gargarejo", "stomach_rumble": "Ronco de Estômago", "burping": "Arroto", - "skateboard": "Skate" + "skateboard": "Skate", + "hiccup": "Soluço", + "fart": "Flatulência", + "hands": "Mãos", + "finger_snapping": "Estalar de Dedos", + "clapping": "Palmas", + "heartbeat": "Batida de Coração", + "heart_murmur": "Sopro Cardíaco", + "cheering": "Comemoração", + "applause": "Aplausos", + "chatter": "Conversa", + "crowd": "Multidão", + "children_playing": "Crianças Brincando", + "animal": "Animal", + "pets": "Animais de Estimação", + "bark": "Latido", + "yip": "Latido / Grito Agudo", + "howl": "Uivado", + "bow_wow": "Bow Wow", + "growling": "Rosnado", + "whimper_dog": "Choro de Cachorro", + "purr": "Ronronado", + "meow": "Miado", + "hiss": "Sibilo", + "caterwaul": "Lamúria", + "livestock": "Animais de Criação", + "clip_clop": "Galope", + "neigh": "Relincho", + "door": "Porta", + "cattle": "Gado", + "moo": "Mugido", + "cowbell": "Sino de Vaca", + "mouse": "Rato", + "pig": "Porco", + "oink": "Grunhido de Porco", + "keyboard": "Teclado", + "goat": "Cabra", + "bleat": "Balido", + "fowl": "Ave", + "chicken": "Galinha", + "sink": "Pia", + "cluck": "Cacarejo", + "cock_a_doodle_doo": "Cacarejado", + "blender": "Liquidificador", + "turkey": "Peru", + "gobble": "Deglutição", + "clock": "Relógio", + "duck": "Pato", + "quack": "Grasnado", + "scissors": "Tesouras", + "goose": "Ganso", + "honk": "Buzina", + "hair_dryer": "Secador de Cabelo", + "wild_animals": "Animais Selvagens", + "toothbrush": "Escova de Dentes", + "roaring_cats": "Felinos Rugindo", + "roar": "Rugido", + "vehicle": "Veículo", + "chirp": "Piado", + "squawk": "Guincho Animal", + "pigeon": "Pombo", + "dogs": "Cachorros", + "rats": "Ratos", + "coo": "Arrulhado de Pombo", + "crow": "Corvo" } diff --git a/web/public/locales/pt-BR/common.json b/web/public/locales/pt-BR/common.json index 9224bd220..9c763ccb7 100644 --- a/web/public/locales/pt-BR/common.json +++ b/web/public/locales/pt-BR/common.json @@ -57,7 +57,89 @@ "formattedTimestampHourMinute": { "12hour": "h:mm aaa", "24hour": "HH:mm" + }, + "formattedTimestampHourMinuteSecond": { + "12hour": "h:mm:ss aaa", + "24hour": "HH:mm:ss" + }, + "formattedTimestampMonthDayHourMinute": { + "12hour": "d MMM, h:mm aaa", + "24hour": "d MMM, HH:mm" + }, + "formattedTimestampMonthDayYear": { + "12hour": "d MMM, yyyy", + "24hour": "d MMM, yyyy" + }, + "formattedTimestampMonthDayYearHourMinute": { + "12hour": "d MMM yyyy, h:mm aaa", + "24hour": "d MMM yyyy, HH:mm" + }, + "formattedTimestampMonthDay": "d MMM", + "formattedTimestampFilename": { + "12hour": "dd-MM-yy-hh-mm-ss", + "24hour": "dd-MM-yy-HH-mm-ss" } }, - "selectItem": "Selecione {{item}}" + "selectItem": "Selecione {{item}}", + "unit": { + "speed": { + "mph": "mph", + "kph": "km/h" + }, + "length": { + "feet": "pés", + "meters": "metros" + } + }, + "label": { + "back": "Voltar" + }, + "button": { + "apply": "Aplicar", + "reset": "Resetar", + "done": "Concluído", + "enabled": "Habilitado", + "enable": "Habilitar", + "disabled": "Desativado", + "disable": "Desativar", + "save": "Salvar", + "saving": "Salvando…", + "cancel": "Cancelar", + "close": "Fechar", + "copy": "Copiar", + "back": "Voltar", + "history": "Histórico", + "fullscreen": "Tela Inteira", + "exitFullscreen": "Sair da Tela Inteira", + "pictureInPicture": "Miniatura Flutuante", + "twoWayTalk": "Áudio Bidirecional", + "cameraAudio": "Áudio da Câmera", + "on": "LIGADO", + "off": "DESLIGADO", + "edit": "Editar", + "copyCoordinates": "Copiar coordenadas", + "delete": "Deletar", + "yes": "Sim", + "no": "Não", + "download": "Baixar", + "info": "Informação", + "suspended": "Suspenso", + "unsuspended": "Não Suspenso", + "play": "Reproduzir", + "unselect": "Deselecionar", + "export": "Exportar", + "deleteNow": "Deletar Agora", + "next": "Próximo" + }, + "menu": { + "system": "Sistema", + "systemMetrics": "Métricas de sistema", + "configuration": "Configuração", + "language": { + "hi": "हिन्दी (Hindi)", + "fr": "Français (Francês)" + }, + "systemLogs": "Logs de sistema", + "settings": "Configurações" + } } diff --git a/web/public/locales/pt-BR/components/camera.json b/web/public/locales/pt-BR/components/camera.json index 305bff197..322e63522 100644 --- a/web/public/locales/pt-BR/components/camera.json +++ b/web/public/locales/pt-BR/components/camera.json @@ -79,6 +79,8 @@ "zones": "Zonas", "mask": "Máscara", "motion": "Movimento", - "regions": "Regiões" + "regions": "Regiões", + "boundingBox": "Caixa Delimitadora", + "timestamp": "Timestamp" } } diff --git a/web/public/locales/pt-BR/components/dialog.json b/web/public/locales/pt-BR/components/dialog.json index 32249f87e..c3e450f27 100644 --- a/web/public/locales/pt-BR/components/dialog.json +++ b/web/public/locales/pt-BR/components/dialog.json @@ -100,11 +100,15 @@ "selected": "Tem certeza de que deseja excluir todos os vídeos gravados associados a este item de revisão?

Segure a tecla Shift para ignorar esta caixa de diálogo no futuro." }, "toast": { - "success": "As filmagens associadas aos itens de revisão selecionados foram excluídas com sucesso." - } + "success": "As filmagens associadas aos itens de revisão selecionados foram excluídas com sucesso.", + "error": "Falha ao deletar: {{error}}" + }, + "title": "Confirmar Exclusão" }, "button": { - "markAsReviewed": "Marcar como revisado" + "markAsReviewed": "Marcar como revisado", + "export": "Exportar", + "deleteNow": "Deletar Agora" } } } diff --git a/web/public/locales/pt-BR/components/filter.json b/web/public/locales/pt-BR/components/filter.json index 8bfd927b6..6e28b1c69 100644 --- a/web/public/locales/pt-BR/components/filter.json +++ b/web/public/locales/pt-BR/components/filter.json @@ -71,14 +71,56 @@ "title": "Configurações", "defaultView": { "title": "Visualização Padrão", - "desc": "Quando nenhum filtro é selecionado, exibir um sumário dos objetos mais recentes rastreados por categoria, ou exiba uma grade sem filtro." + "desc": "Quando nenhum filtro é selecionado, exibir um sumário dos objetos mais recentes rastreados por categoria, ou exiba uma grade sem filtro.", + "summary": "Sumário", + "unfilteredGrid": "Grade Sem Filtros" }, "gridColumns": { - "desc": "Selecione o número de colunas na visualização em grade." + "desc": "Selecione o número de colunas na visualização em grade.", + "title": "Colunas de Grade" }, "searchSource": { - "desc": "Escolha se deseja pesquisar nas miniaturas ou descrições dos seus objetos rastreados." + "desc": "Escolha se deseja pesquisar nas miniaturas ou descrições dos seus objetos rastreados.", + "label": "Buscar Fonte", + "options": { + "thumbnailImage": "Imagem da Miniatura", + "description": "Descrição" + } + } + }, + "date": { + "selectDateBy": { + "label": "Selecione uma data para filtrar" } } + }, + "logSettings": { + "label": "Nível de filtro de log", + "filterBySeverity": "Filtrar logs por severidade", + "loading": { + "title": "Carregando", + "desc": "Quando o painel de log é rolado para baixo, novos logs são transmitidos automaticamente conforme são adicionados." + }, + "disableLogStreaming": "Desativar o log de tranmissão", + "allLogs": "Todos os logs" + }, + "trackedObjectDelete": { + "title": "Confirmar Exclusão", + "desc": "Deletar esses {{objectLength}} objetos rastreados remove as capturas de imagem, qualquer embeddings salvos, e quaisquer entradas do ciclo de vida associadas do objeto. Gravações desses objetos rastreados na visualização de Histórico NÃO irão ser deletadas.

Tem certeza que quer proceder?

Segure a tecla Shift para pular esse diálogo no futuro.", + "toast": { + "success": "Objetos rastreados deletados com sucesso.", + "error": "Falha ao deletar objeto rastreado: {{errorMessage}}" + } + }, + "zoneMask": { + "filterBy": "Filtrar por máscara de zona" + }, + "recognizedLicensePlates": { + "title": "Placas de Identificação Reconhecidas", + "loadFailed": "Falha ao carregar placas de identificação reconhecidas.", + "loading": "Carregando placas de identificação reconhecidas…", + "placeholder": "Digite para pesquisar por placas de identificação…", + "noLicensePlatesFound": "Nenhuma placa de identificação encontrada.", + "selectPlatesFromList": "Seleciona uma ou mais placas da lista." } } diff --git a/web/public/locales/pt-BR/objects.json b/web/public/locales/pt-BR/objects.json index 40c632c16..830c7efcf 100644 --- a/web/public/locales/pt-BR/objects.json +++ b/web/public/locales/pt-BR/objects.json @@ -39,5 +39,63 @@ "baseball_bat": "Taco de Basebol", "baseball_glove": "Luva de Basebol", "skateboard": "Skate", - "plate": "Placa" + "plate": "Placa", + "surfboard": "Prancha de Surfe", + "tennis_racket": "Raquete de Tênis", + "bottle": "Garrafa", + "wine_glass": "Garrafa de Vinho", + "cup": "Copo", + "fork": "Garfo", + "knife": "Faca", + "spoon": "Colher", + "bowl": "Tigela", + "banana": "Banana", + "apple": "Maçã", + "animal": "Animal", + "sandwich": "Sanduíche", + "orange": "Laranja", + "broccoli": "Brócolis", + "bark": "Latido", + "carrot": "Cenoura", + "hot_dog": "Cachorro-Quente", + "pizza": "Pizza", + "donut": "Donut", + "cake": "Bolo", + "chair": "Cadeira", + "couch": "Sofá", + "potted_plant": "Planta em Vaso", + "bed": "Cama", + "mirror": "Espelho", + "dining_table": "Mesa de Jantar", + "window": "Janela", + "desk": "Mesa", + "toilet": "Vaso Sanitário", + "door": "Porta", + "tv": "TV", + "laptop": "Laptop", + "mouse": "Rato", + "remote": "Controle Remoto", + "keyboard": "Teclado", + "goat": "Cabra", + "cell_phone": "Celular", + "microwave": "Microondas", + "oven": "Forno", + "toaster": "Torradeira", + "sink": "Pia", + "refrigerator": "Geladeira", + "blender": "Liquidificador", + "book": "Livro", + "clock": "Relógio", + "vase": "Vaso", + "scissors": "Tesouras", + "teddy_bear": "Ursinho de Pelúcia", + "hair_dryer": "Secador de Cabelo", + "toothbrush": "Escova de Dentes", + "hair_brush": "Escova de Cabelo", + "vehicle": "Veículo", + "squirrel": "Esquilo", + "deer": "Veado", + "on_demand": "Sob Demanda", + "face": "Rosto", + "fox": "Raposa" } diff --git a/web/public/locales/pt-BR/views/explore.json b/web/public/locales/pt-BR/views/explore.json index 923eeca41..9d87f3579 100644 --- a/web/public/locales/pt-BR/views/explore.json +++ b/web/public/locales/pt-BR/views/explore.json @@ -31,7 +31,75 @@ } }, "details": { - "timestamp": "Carimbo de data e hora" + "timestamp": "Carimbo de data e hora", + "item": { + "title": "Rever Detalhe dos itens", + "desc": "Revisar os detalhes do item", + "button": { + "share": "Compartilhar esse item revisado", + "viewInExplore": "Ver em Explorar" + }, + "tips": { + "mismatch_one": "{{count}} objeto indisponível foi detectado e incluido nesse item de revisão. Esse objeto ou não se qualifica para um alerta ou detecção, ou já foi limpo/deletado.", + "mismatch_many": "{{count}} objetos indisponíveis foram detectados e incluídos nesse item de revisão. Esses objetos ou não se qualificam para um alerta ou detecção, ou já foi limpo/deletado.", + "mismatch_other": "", + "hasMissingObjects": "Ajustar a sua configuração se quiser que o Frigate salve objetos rastreados com as seguintes categorias: {{objects}}" + }, + "toast": { + "success": { + "regenerate": "Uma nova descrição foi solicitada do {{provider}}. Dependendo da velocidade do seu fornecedor, a nova descrição pode levar algum tempo para regenerar.", + "updatedSublabel": "Sub-categoria atualizada com sucesso.", + "updatedLPR": "Placa de identificação atualizada com sucesso." + }, + "error": { + "regenerate": "Falha ao ligar para {{provider}} para uma descrição nova: {{errorMessage}}", + "updatedSublabelFailed": "Falha ao atualizar sub-categoria: {{errorMessage}}", + "updatedLPRFailed": "Falha ao atualizar placa de identificação: {{errorMessage}}" + } + } + }, + "label": "Categoria", + "editSubLabel": { + "title": "Editar sub-categoria", + "desc": "Nomeie uma nova sub categoria para esse(a) {{label}}", + "descNoLabel": "Nomeie uma nova sub-categoria para esse objeto rastreado" + }, + "editLPR": { + "title": "Editar placa de identificação", + "desc": "Entre um valor de placa de identificação para esse(a) {{label}}", + "descNoLabel": "Entre um novo valor de placa de identificação para esse objeto rastrado" + }, + "snapshotScore": { + "label": "Pontuação da Captura de Imagem" + }, + "topScore": { + "label": "Pontuação Mais Alta", + "info": "A pontuação mais alta é a pontuação mediana mais alta para o objeto rastreado, então pode ser diferente da pontuação mostrada na miniatura dos resultados de busca." + }, + "recognizedLicensePlate": "Placa de Identificação Reconhecida", + "estimatedSpeed": "Velocidade Estimada", + "objects": "Objetos", + "camera": "Câmera", + "zones": "Zonas", + "button": { + "findSimilar": "Encontrar Semelhante", + "regenerate": { + "title": "Regenerar", + "label": "Regenerar descrição de objetos rastreados" + } + }, + "description": { + "label": "Descrição", + "placeholder": "Descrição do objeto rastreado", + "aiTips": "O Frigate não solicitará a descrição do seu fornecedor de IA Generativa até que o ciclo de vida do objeto rastreado tenha finalizado." + }, + "expandRegenerationMenu": "Expandir menu de regeneração", + "regenerateFromSnapshot": "Regenerar a partir de Captura de Imagem", + "regenerateFromThumbnails": "Regenerar a partir de Miniaturas", + "tips": { + "descriptionSaved": "Descrição salva com sucesso", + "saveDescriptionFailed": "Falha ao atualizar a descrição: {{errorMessage}}" + } }, "trackedObjectDetails": "Detalhes do Objeto Rastreado", "type": { @@ -62,14 +130,43 @@ "heard": "{{label}} escutado(a)", "header": { "zones": "Zonas", - "area": "Área" - } + "area": "Área", + "ratio": "Proporção" + }, + "external": "{{label}} detectado(a)" }, "annotationSettings": { "title": "Configurações de anotação", "showAllZones": { - "title": "Mostrar todas as zonas" + "title": "Mostrar todas as zonas", + "desc": "Sempre exibir zonas nos quadros em que objetos entraram em uma zona." + }, + "offset": { + "label": "Deslocamento da Anotação", + "desc": "Esses dados vem do feed de detecção da sua câmera, porém estão sobrepondo imagens da gravação. É improvável que duas transmissões estejam perfeitamente sincronizadas. Como resultado, as caixas delimitadoras e a gravação não se alinharam perfeitamente. Porém, o campo annotation_offset pode ser utilizado para ajustar isso.", + "documentation": "Leia a documentação. ", + "millisecondsToOffset": "Milisegundos para separar detecções de anotações.Default: 0", + "tips": "DICA: Imagine que haja um clipe de evento com uma pessoa caminhando da esquerda para a direita. Se a caixa delimitadora da linha do tempo do evento está consistentemente à esquerda da pessoa, então o valor deve ser reduzido. Similarmente, se a pessoa está caminhando da esquerda para a direita e a caixa delimitadora está consistentemente à frente da pessoa, então o valor deve ser aumentado.", + "toast": { + "success": "O deslocamento de anotação para a câmera {{camera}} foi salvo no arquivo de configuração. Reinicie o Frigate para aplicar as alterações." + } } + }, + "carousel": { + "previous": "Slide anterior", + "next": "Próximo slide" + } + }, + "itemMenu": { + "findSimilar": { + "aria": "Encontrar objetos rastreados similares" + }, + "submitToPlus": { + "label": "Enviar ao Frigate+" + }, + "downloadVideo": { + "label": "Baixar vídeo", + "aria": "Baixar vídeo" } } } diff --git a/web/public/locales/pt-BR/views/faceLibrary.json b/web/public/locales/pt-BR/views/faceLibrary.json index affcc44ce..1ca2c5366 100644 --- a/web/public/locales/pt-BR/views/faceLibrary.json +++ b/web/public/locales/pt-BR/views/faceLibrary.json @@ -72,5 +72,31 @@ "aria": "Selecionar treinar", "empty": "Não há tentativas recentes de reconhecimento facial" }, - "selectFace": "Selecionar Rosto" + "selectFace": "Selecionar Rosto", + "trainFaceAs": "Treinar Rosto como:", + "trainFace": "Treinar Rosto", + "toast": { + "success": { + "uploadedImage": "Imagens enviadas com sucesso.", + "addFaceLibrary": "{{name}} foi adicionado com sucesso à Biblioteca de Rostos!", + "deletedFace_one": "{{count}} rosto apagado com sucesso.", + "deletedFace_many": "{{count}} rostos apagados com sucesso.", + "deletedFace_other": "{{count}} rostos apagados com sucesso.", + "trainedFace": "Rosto treinado com sucesso.", + "updatedFaceScore": "Pontuação de rosto atualizada com sucesso.", + "renamedFace": "O rosto foi renomeado com sucesso para {{name}}", + "deletedName_one": "{{count}} rosto foi deletado com sucesso.", + "deletedName_many": "{{count}} rostos foram deletados com sucesso.", + "deletedName_other": "" + }, + "error": { + "uploadingImageFailed": "Falha ao enviar a imagem: {{errorMessage}}", + "addFaceLibraryFailed": "Falha ao definir o nome do rosto: {{errorMessage}}", + "deleteFaceFailed": "Falha em deletar: {{errorMessage}}", + "deleteNameFailed": "Falha ao deletar nome: {{errorMessage}}", + "renameFaceFailed": "Falha ao renomear rosto: {{errorMessage}}", + "trainFailed": "Falha ao treinar: {{errorMessage}}", + "updateFaceScoreFailed": "Falha ao atualizar pontuação de rosto: {{errorMessage}}" + } + } } diff --git a/web/public/locales/pt-BR/views/live.json b/web/public/locales/pt-BR/views/live.json index 8af517b93..97ca4675c 100644 --- a/web/public/locales/pt-BR/views/live.json +++ b/web/public/locales/pt-BR/views/live.json @@ -93,6 +93,66 @@ "failedToStart": "Falha ao iniciar a gravação manual sob demanda.", "recordDisabledTips": "Como a gravação está desabilitada ou restrita na configuração desta câmera, apenas um instantâneo será salvo.", "end": "Fim da gravação sob demanda", - "failedToEnd": "Falha ao finalizar a gravação manual sob demanda." + "failedToEnd": "Falha ao finalizar a gravação manual sob demanda.", + "debugView": "Visualização de Depuração", + "ended": "Gravação manual sob demanda finalizada." + }, + "streamingSettings": "Configurações de Transmissão", + "notifications": "Notificações", + "audio": "Áudio", + "suspend": { + "forTime": "Suspender por: " + }, + "stream": { + "title": "Transmissão", + "audio": { + "tips": { + "title": "O áudio deve sair da sua câmera e configurado no go2rtc para essa transmissão.", + "documentation": "Leia da documentação. " + }, + "available": "Áudio disponível para essa transmissão", + "unavailable": "O áudio não está disponível para essa transmissão" + }, + "twoWayTalk": { + "tips": "O seu dispostivio precisa suportar esse recurso e o WebRTC precisa estar configurado para áudio bidirecional.", + "tips.documentation": "Leia a documentação. ", + "available": "Áudio bidirecional está disponível para essa transmissão", + "unavailable": "Áudio bidirecional está indisponível para essa transmissão" + }, + "lowBandwidth": { + "tips": "A transmissão ao vivo está em modo de economia de dados devido a erros de buffering ou de transmissão.", + "resetStream": "Resetar transmissão" + }, + "playInBackground": { + "label": "Reproduzir em segundo plano", + "tips": "Habilitar essa opção para continuar a transmissão quando o reprodutor estiver oculto." + } + }, + "cameraSettings": { + "title": "Configurações de {{camera}}", + "cameraEnabled": "Câmera Habilitada", + "objectDetection": "Detecção de Objeto", + "recording": "Gravação", + "snapshots": "Capturas de Imagem", + "audioDetection": "Detecção de Áudio", + "autotracking": "Auto Rastreamento" + }, + "history": { + "label": "Exibir gravação histórica" + }, + "effectiveRetainMode": { + "modes": { + "all": "Todos", + "motion": "Movimento", + "active_objects": "Objetos Ativos" + }, + "notAllTips": "A configuração de retenção da sua gravação do(a) {{source}} está definida para o modo: {{effectiveRetainMode}}, então essa gravação sob demanda irá manter somente os segmentos com o {{effectiveRetainModeName}}." + }, + "editLayout": { + "label": "Editar Layout", + "group": { + "label": "Editar Grupo de Câmera" + }, + "exitEdit": "Sair da Edição" } } diff --git a/web/public/locales/pt-BR/views/search.json b/web/public/locales/pt-BR/views/search.json index 7d3355c2a..095503568 100644 --- a/web/public/locales/pt-BR/views/search.json +++ b/web/public/locales/pt-BR/views/search.json @@ -57,11 +57,14 @@ }, "header": { "noFilters": "Filtros", - "activeFilters": "Filtros ativos" + "activeFilters": "Filtros ativos", + "currentFilterType": "Valores de Filtros" } }, "similaritySearch": { - "active": "Pesquisa por similaridade ativa" + "active": "Pesquisa por similaridade ativa", + "title": "Buscar por Similaridade", + "clear": "Limpar buscar por similaridade" }, "placeholder": { "search": "Pesquisar…" diff --git a/web/public/locales/pt-BR/views/settings.json b/web/public/locales/pt-BR/views/settings.json index 7ba7605d6..18b57f1e8 100644 --- a/web/public/locales/pt-BR/views/settings.json +++ b/web/public/locales/pt-BR/views/settings.json @@ -94,8 +94,77 @@ "desc": "A Busca Semântica no Frigate permite você encontrar objetos rastreados dentro dos seus itens revisados, usando ou a imagem em si, uma descrição de texto definida pelo usuário ou uma gerada automaticamente.", "readTheDocumentation": "Leia a Documentação", "reindexNow": { - "label": "Reindexar Agora" + "label": "Reindexar Agora", + "desc": "A reindexação irá regenerar os embeddings para todos os objetos rastreados. Esse processo roda em segundo plano e pode 100% da CPU e levar um tempo considerável dependendo do número de objetos rastreados que você possui.", + "confirmTitle": "Confirmar Reindexação", + "confirmDesc": "Tem certeza que quer reindexar todos os embeddings de objetos rastreados? Esse processo rodará em segundo plano porém utilizará 100% da CPU e levará uma quantidade de tempo considerável. Você pode acompanhar o progresso na página Explorar.", + "confirmButton": "Reindexar", + "success": "A reindexação iniciou com sucesso.", + "alreadyInProgress": "A reindexação já está em progresso.", + "error": "Falha ao iniciar a reindexação: {{errorMessage}}" + }, + "modelSize": { + "label": "Tamanho do Modelo", + "desc": "O tamanho do modelo usado para embeddings de pesquisa semântica.", + "small": { + "title": "pequeno", + "desc": "Usandopequeno emprega a versão quantizada do modelo que utiliza menos RAM e roda mais rápido na CPU, com diferenças negligíveis na qualidade dos embeddings." + }, + "large": { + "title": "grande", + "desc": "Usar grande emprega o modelo Jina completo e roda na GPU automáticamente caso aplicável." + } } + }, + "faceRecognition": { + "title": "Reconhecimento Facial", + "desc": "O reconhecimento facial permite que pessoas sejam associadas a nomes e quando seus rostos forem reconhecidos, o Frigate associará o nome da pessoa como uma sub-categoria. Essa informação é inclusa na UI, filtros e notificações.", + "readTheDocumentation": "Leia a Documentação", + "modelSize": { + "label": "Tamanho do Modelo", + "desc": "O tamanho do modelo usado para reconhecimento facial.", + "small": { + "title": "pequeno", + "desc": "Usar pequeno emprega o modelo de embedding de rosto FaceNet, que roda de maneira eficiente na maioria das CPUs." + }, + "large": { + "title": "grande", + "desc": "Usando o grande emprega um modelo de embedding de rosto ArcFace e irá automáticamente roda pela GPU se aplicável." + } + } + }, + "licensePlateRecognition": { + "title": "Reconhecimento de Placa de Identificação", + "desc": "O Frigate pode reconhecer placas de identificação em veículos e automáticamente adicionar os caracteres detectados ao campo placas_de_identificação_reconhecidas ou um nome conhecido como uma sub-categoria a objetos que são do tipo carro. Um uso típico é ler a placa de carros entrando em uma garagem ou carros passando pela rua.", + "readTheDocumentation": "Leia a Documentação" + }, + "restart_required": "Necessário reiniciar (configurações de enriquecimento foram alteradas)", + "toast": { + "success": "As regras de enriquecimento foram salvas. Reinicie o Frigate para aplicar as alterações.", + "error": "Falha ao salvar alterações de configurações: {{errorMessage}}" + } + }, + "camera": { + "title": "Configurações de Câmera", + "streams": { + "title": "Transmissões", + "desc": "Temporáriamente desativar a câmera até o Frigate reiniciar. Desatiar a câmera completamente impede o processamento da transmissão dessa câmera pelo Frigate. Detecções, gravações e depuração estarão indisponíveis.
Nota: Isso não desativa as retransmissões do go2rtc." + }, + "review": { + "title": "Revisar", + "desc": "Temporariamente habilitar/desabilitar alertas e detecções para essa câmera até o Frigate reiniciar. Quando desabilitado, nenhum novo item de revisão será gerado. ", + "alerts": "Alertas ", + "detections": "Detecções " + }, + "reviewClassification": { + "title": "Revisar Classificação", + "desc": "O Frigate categoriza itens de revisão como Alertas e Detecções. Por padrão, todas as pessoa e carros são considerados alertas. Você pode refinar a categorização dos seus itens revisados configurando as zonas requeridas para eles.", + "readTheDocumentation": "Leia a Documentação", + "noDefinedZones": "Nenhuma zona definida para essa câmera.", + "selectAlertsZones": "Selecionar as zonas para Alertas", + "selectDetectionsZones": "Selecionar as zonas para Detecções", + "objectAlertsTips": "Todos os {{alertsLabels}} objetos em {{cameraName}} serão exibidos como Alertas.", + "zoneObjectAlertsTips": "Todos os {{alertsLabels}} objetos detectados em {{zone}} em {{cameraName}} serão exibidos como Alertas." } } } diff --git a/web/public/locales/pt-BR/views/system.json b/web/public/locales/pt-BR/views/system.json index 1be320de0..f4c3c1f43 100644 --- a/web/public/locales/pt-BR/views/system.json +++ b/web/public/locales/pt-BR/views/system.json @@ -61,7 +61,8 @@ "title": "Saída SMI da Nvidia", "name": "Nome: {{name}}", "driver": "Motorista: {{driver}}", - "cudaComputerCapability": "Capacidade de Computação CUDA: {{cuda_compute}}" + "cudaComputerCapability": "Capacidade de Computação CUDA: {{cuda_compute}}", + "vbios": "Informação de VBios: {{vbios}}" }, "closeInfo": { "label": "Fechar informações da GPU" @@ -77,7 +78,85 @@ "npuMemory": "Memória da NPU" }, "otherProcesses": { - "title": "Outros processos" + "title": "Outros processos", + "processCpuUsage": "Uso de Processamento da CPU", + "processMemoryUsage": "Uso de Memória de Processos" } + }, + "storage": { + "title": "Armazenamento", + "overview": "Visão Geral", + "recordings": { + "title": "Gravações", + "earliestRecording": "Gravação mais recente disponível:", + "tips": "Esse valor representa o armazenamento total usado pelas gravações no banco de dados do Frigate. O Frigate não rastreia o uso de armazenamento para todos os arquivos do seu disco." + }, + "cameraStorage": { + "title": "Armazenamento da Câmera", + "camera": "Câmera", + "unusedStorageInformation": "Informação de Armazenamento Não Utilizado", + "storageUsed": "Armazenamento", + "percentageOfTotalUsed": "Porcentagem do Total", + "bandwidth": "Largura de Banda", + "unused": { + "title": "Não Utilizado", + "tips": "Esse valor por não representar com precisão o espaço livre disponí®el para o Frigate se você possui outros arquivos armazenados no seu drive além das gravações do Frigate. O Frigate não rastreia a utilização do armazenamento além de suas próprias gravações." + } + } + }, + "cameras": { + "title": "Câmeras", + "overview": "Visão Geral", + "info": { + "aspectRatio": "proporção", + "cameraProbeInfo": "{{camera}} Informação de Probe da Câmera", + "streamDataFromFFPROBE": "Os dados da tranmissão são obtidos com o ffprobe.", + "fetching": "Buscando Dados da Câmera", + "stream": "Transmissão {{idx}}", + "video": "Vídeo:", + "codec": "Codec:", + "resolution": "Resolução:", + "fps": "FPS:", + "unknown": "Desconhecido", + "audio": "Áudio:", + "error": "Erro: {{error}}", + "tips": { + "title": "Informação de Probe de Câmera" + } + }, + "framesAndDetections": "Quadros / Detecções", + "label": { + "camera": "câmera", + "detect": "detectar", + "skipped": "ignoradas", + "ffmpeg": "FFmpeg", + "capture": "captura", + "overallFramesPerSecond": "quadros por segundo em geral", + "overallDetectionsPerSecond": "detecções por segundo em geral", + "overallSkippedDetectionsPerSecond": "detecções puladas por segundo em geral", + "cameraFfmpeg": "{{camName}} FFmpeg", + "cameraCapture": "{{camName}} captura", + "cameraDetect": "{{camName}} detectar", + "cameraFramesPerSecond": "{{camName}} quadros por segundo", + "cameraDetectionsPerSecond": "{{camName}} detecções por segundo", + "cameraSkippedDetectionsPerSecond": "{{camName}} detecções puladas por segundo" + }, + "toast": { + "success": { + "copyToClipboard": "Dados do probe copiados para a área de transferência." + }, + "error": { + "unableToProbeCamera": "Não foi possível fazer o probe da câmera: {{errorMessage}}" + } + } + }, + "lastRefreshed": "Atualizado pela última vez: ", + "stats": { + "detectIsVerySlow": "{{detect}} está lento ({{speed}} ms)", + "ffmpegHighCpuUsage": "{{camera}} possui alta utilização de CPU para FFmpeg ({{ffmpegAvg}}%)", + "detectHighCpuUsage": "{{camera}} possui alta utilização de CPU para detecção ({{detectAvg}}%)" + }, + "enrichments": { + "title": "Enriquecimentos" } } From 878f401ad242ec66b26b13d95c63208ceb66d683 Mon Sep 17 00:00:00 2001 From: Hosted Weblate Date: Tue, 5 Aug 2025 17:24:00 +0200 Subject: [PATCH 005/144] Translated using Weblate (Indonesian) Currently translated at 18.7% (9 of 48 strings) Translated using Weblate (Indonesian) Currently translated at 8.6% (10 of 115 strings) Translated using Weblate (Indonesian) Currently translated at 41.6% (10 of 24 strings) Translated using Weblate (Indonesian) Currently translated at 18.0% (9 of 50 strings) Co-authored-by: Fikry Budi Hasbillah Co-authored-by: Hosted Weblate Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/components-dialog/id/ Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/views-events/id/ Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/views-explore/id/ Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/views-search/id/ Translation: Frigate NVR/components-dialog Translation: Frigate NVR/views-events Translation: Frigate NVR/views-explore Translation: Frigate NVR/views-search --- web/public/locales/id/components/dialog.json | 3 ++- web/public/locales/id/views/events.json | 3 ++- web/public/locales/id/views/explore.json | 5 ++++- web/public/locales/id/views/search.json | 3 ++- 4 files changed, 10 insertions(+), 4 deletions(-) diff --git a/web/public/locales/id/components/dialog.json b/web/public/locales/id/components/dialog.json index 1c1c73a5a..5d5f20fb8 100644 --- a/web/public/locales/id/components/dialog.json +++ b/web/public/locales/id/components/dialog.json @@ -16,7 +16,8 @@ }, "review": { "question": { - "label": "Konfirmasi label ini untuk Frigate Plus" + "label": "Konfirmasi label ini untuk Frigate Plus", + "ask_a": "Apakah objek ini adalah sebuah{{label}}?" } } } diff --git a/web/public/locales/id/views/events.json b/web/public/locales/id/views/events.json index 94379d015..f320bae8f 100644 --- a/web/public/locales/id/views/events.json +++ b/web/public/locales/id/views/events.json @@ -11,5 +11,6 @@ "alert": "Tidak ada peringatan untuk ditinjau", "motion": "Data gerakan tidak ditemukan" }, - "timeline.aria": "Pilih timeline" + "timeline.aria": "Pilih timeline", + "timeline": "Linimasa" } diff --git a/web/public/locales/id/views/explore.json b/web/public/locales/id/views/explore.json index 833c9d3d9..de062e132 100644 --- a/web/public/locales/id/views/explore.json +++ b/web/public/locales/id/views/explore.json @@ -7,7 +7,10 @@ "context": "Jelajahi dapat digunakan setelah embedding objek yang dilacak selesai di-reindex.", "startingUp": "Sedang memulai…", "estimatedTime": "Perkiraan waktu tersisa:", - "finishingShortly": "Selesai sesaat lagi" + "finishingShortly": "Selesai sesaat lagi", + "step": { + "thumbnailsEmbedded": "Keluku dilampirkan " + } } }, "details": { diff --git a/web/public/locales/id/views/search.json b/web/public/locales/id/views/search.json index f89280cd4..c4c598990 100644 --- a/web/public/locales/id/views/search.json +++ b/web/public/locales/id/views/search.json @@ -8,5 +8,6 @@ "delete": "Hapus pencarian yang disimpan", "filterInformation": "Saring Informasi", "filterActive": "Filter aktif" - } + }, + "trackedObjectId": "Tracked Object ID" } From ca3afa8ac4c92f229a620a93ad64b5623501a8fa Mon Sep 17 00:00:00 2001 From: Hosted Weblate Date: Tue, 5 Aug 2025 17:24:00 +0200 Subject: [PATCH 006/144] Translated using Weblate (Swedish) Currently translated at 28.9% (33 of 114 strings) Translated using Weblate (Swedish) Currently translated at 21.0% (24 of 114 strings) Translated using Weblate (Swedish) Currently translated at 14.9% (17 of 114 strings) Translated using Weblate (Swedish) Currently translated at 57.5% (38 of 66 strings) Co-authored-by: Hosted Weblate Co-authored-by: Lars Gustavsson Co-authored-by: Oscar Haraldsson Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/components-filter/sv/ Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/views-system/sv/ Translation: Frigate NVR/components-filter Translation: Frigate NVR/views-system --- web/public/locales/sv/components/filter.json | 3 +- web/public/locales/sv/views/system.json | 43 ++++++++++++++++++-- 2 files changed, 41 insertions(+), 5 deletions(-) diff --git a/web/public/locales/sv/components/filter.json b/web/public/locales/sv/components/filter.json index 645448287..c29110cc7 100644 --- a/web/public/locales/sv/components/filter.json +++ b/web/public/locales/sv/components/filter.json @@ -6,7 +6,8 @@ }, "label": "Etiketter", "count": "{{count}} Etiketter", - "count_one": "{{count}} Etikett" + "count_one": "{{count}} Etikett", + "count_other": "{{count}} Etiketter" }, "filter": "Filter", "zones": { diff --git a/web/public/locales/sv/views/system.json b/web/public/locales/sv/views/system.json index 4f46b0cbf..d10bf2e1d 100644 --- a/web/public/locales/sv/views/system.json +++ b/web/public/locales/sv/views/system.json @@ -4,7 +4,8 @@ "general": "Allmän statistik - Frigate", "cameras": "Kamerastatistik - Frigate", "logs": { - "frigate": "Frigate loggar - Frigate" + "frigate": "Frigate loggar - Frigate", + "go2rtc": "Go2RTC Loggar - Frigate" } }, "logs": { @@ -19,18 +20,52 @@ "type": { "label": "Typ", "timestamp": "Tidsstämpel", - "message": "Meddelande" + "message": "Meddelande", + "tag": "Tagg" }, "tips": "Loggarna strömmas från Server", "toast": { "error": { - "fetchingLogsFailed": "Fel vid hämtning av loggar: {{errorMessage}}" + "fetchingLogsFailed": "Fel vid hämtning av loggar: {{errorMessage}}", + "whileStreamingLogs": "Fel vid uppspelning av loggar: {{errorMessage}}" } } }, "title": "System", "metrics": "System detaljer", "general": { - "title": "Generellt" + "title": "Generellt", + "detector": { + "title": "Detektorer" + }, + "hardwareInfo": { + "title": "Hårdvaruinformation", + "gpuUsage": "GPU-användning", + "gpuMemory": "GPU-minne" + }, + "otherProcesses": { + "title": "Övriga processer" + } + }, + "storage": { + "cameraStorage": { + "storageUsed": "Lagring", + "percentageOfTotalUsed": "Procentandel av totalt", + "bandwidth": "Bandbredd", + "unused": { + "title": "Oanvänt", + "tips": "Det här värdet kanske inte korrekt representerar det lediga utrymmet tillgängligt för Frigate om du har andra filer lagrade på din hårddisk utöver Frigates inspelningar. Frigate spårar inte lagringsanvändning utanför sina egna inspelningar." + } + } + }, + "cameras": { + "title": "Kameror", + "overview": "Översikt", + "info": { + "aspectRatio": "bildförhållande" + }, + "label": { + "detect": "detektera" + } } } From 5fc030c3f6a753860ddd54c0e82e7077d844a34c Mon Sep 17 00:00:00 2001 From: Hosted Weblate Date: Tue, 5 Aug 2025 17:24:00 +0200 Subject: [PATCH 007/144] Translated using Weblate (Finnish) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently translated at 83.3% (40 of 48 strings) Translated using Weblate (Finnish) Currently translated at 53.2% (33 of 62 strings) Translated using Weblate (Finnish) Currently translated at 46.0% (53 of 115 strings) Translated using Weblate (Finnish) Currently translated at 50.0% (33 of 66 strings) Translated using Weblate (Finnish) Currently translated at 64.0% (32 of 50 strings) Translated using Weblate (Finnish) Currently translated at 13.5% (58 of 427 strings) Translated using Weblate (Finnish) Currently translated at 77.0% (37 of 48 strings) Translated using Weblate (Finnish) Currently translated at 48.3% (30 of 62 strings) Translated using Weblate (Finnish) Currently translated at 45.2% (52 of 115 strings) Translated using Weblate (Finnish) Currently translated at 46.9% (31 of 66 strings) Translated using Weblate (Finnish) Currently translated at 13.3% (57 of 427 strings) Co-authored-by: Hosted Weblate Co-authored-by: Julius Hyvönen Co-authored-by: Risto Toivanen Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/audio/fi/ Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/components-dialog/fi/ Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/components-filter/fi/ Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/views-explore/fi/ Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/views-facelibrary/fi/ Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/views-search/fi/ Translation: Frigate NVR/audio Translation: Frigate NVR/components-dialog Translation: Frigate NVR/components-filter Translation: Frigate NVR/views-explore Translation: Frigate NVR/views-facelibrary Translation: Frigate NVR/views-search --- web/public/locales/fi/audio.json | 5 ++++- web/public/locales/fi/components/dialog.json | 5 ++++- web/public/locales/fi/components/filter.json | 9 +++++++-- web/public/locales/fi/views/explore.json | 9 ++++++--- web/public/locales/fi/views/faceLibrary.json | 17 +++++++++++++---- web/public/locales/fi/views/search.json | 20 +++++++++++++++++++- 6 files changed, 53 insertions(+), 12 deletions(-) diff --git a/web/public/locales/fi/audio.json b/web/public/locales/fi/audio.json index c0da6a307..cb54c30be 100644 --- a/web/public/locales/fi/audio.json +++ b/web/public/locales/fi/audio.json @@ -54,5 +54,8 @@ "pant": "Huohottaa", "snort": "Haukkua", "cough": "Yskä", - "sneeze": "Niistää" + "sneeze": "Niistää", + "throat_clearing": "Kurkun selvittäminen", + "sniff": "Poimi", + "run": "Käynnistä" } diff --git a/web/public/locales/fi/components/dialog.json b/web/public/locales/fi/components/dialog.json index 3ed4834e2..4b59b0d8c 100644 --- a/web/public/locales/fi/components/dialog.json +++ b/web/public/locales/fi/components/dialog.json @@ -65,6 +65,9 @@ } }, "streaming": { - "label": "Kuvavirta" + "label": "Kuvavirta", + "restreaming": { + "disabled": "Uudelleentoisto ei ole käytettävissä tällä kameralla." + } } } diff --git a/web/public/locales/fi/components/filter.json b/web/public/locales/fi/components/filter.json index 8f440ce41..17bc1ceb2 100644 --- a/web/public/locales/fi/components/filter.json +++ b/web/public/locales/fi/components/filter.json @@ -38,7 +38,8 @@ "label": "Piirteet", "hasVideoClip": "Videoleike löytyy", "submittedToFrigatePlus": { - "label": "Lähetetty Frigate+:aan" + "label": "Lähetetty Frigate+:aan", + "tips": "Sinun on ensin suodatettava seuratut kohteet, joilla on tilannekuva.

Kohteita, joilla ei ole tilannekuvaa, ei voida lähettää Frigate+:aan." }, "hasSnapshot": "Tilannekuva löytyy" }, @@ -49,6 +50,10 @@ "scoreAsc": "Kohteen pisteet (Nouseva)", "scoreDesc": "Kohteen pisteet (Laskeva)", "speedAsc": "Arvioitu nopeus (Nouseva)", - "speedDesc": "Arvioitu nopeus (Laskeva)" + "speedDesc": "Arvioitu nopeus (Laskeva)", + "relevance": "Olennaisuus" + }, + "cameras": { + "label": "Kameran suodattimet" } } diff --git a/web/public/locales/fi/views/explore.json b/web/public/locales/fi/views/explore.json index 2bd28354a..c6950c941 100644 --- a/web/public/locales/fi/views/explore.json +++ b/web/public/locales/fi/views/explore.json @@ -27,10 +27,12 @@ "context": "Frigate lataa semanttista hakua varten vaadittavat upotusmallit. Tämä saattaa viedä useamman minuutin, riippuen yhteytesi nopeudesta.", "setup": { "visionModel": "Vision-malli", - "textModel": "Tekstimalli" + "textModel": "Tekstimalli", + "textTokenizer": "Tekstin osioija" }, "tips": { - "documentation": "Lue dokumentaatio" + "documentation": "Lue dokumentaatio", + "context": "Saatat haluta uudelleenindeksoida seurattavien kohteiden upotukset, kun mallit on ladattu." }, "error": "Tapahtui virhe. Tarkista Frigaten lokit." } @@ -74,7 +76,8 @@ "noImageFound": "Tältä aikaleimalta ei löytynyt kuvia.", "createObjectMask": "Luo kohdemaski", "scrollViewTips": "Vieritä katsoaksesi merkittäviä hetkiä kohteen elinkaarelta.", - "autoTrackingTips": "Kohteen rajojen sijainti on epätarkka automaattisesti seuraaville kameroille." + "autoTrackingTips": "Kohteen rajojen sijainti on epätarkka automaattisesti seuraaville kameroille.", + "adjustAnnotationSettings": "Säädä merkintäasetuksia" }, "trackedObjectDetails": "Seurattavien kohteiden tiedot", "type": { diff --git a/web/public/locales/fi/views/faceLibrary.json b/web/public/locales/fi/views/faceLibrary.json index e791acbd2..94f4895ca 100644 --- a/web/public/locales/fi/views/faceLibrary.json +++ b/web/public/locales/fi/views/faceLibrary.json @@ -14,7 +14,8 @@ "person": "Henkilö", "timestamp": "Aikaleima", "subLabelScore": "Alinimikkeen pisteet", - "face": "Kasvojen yksityiskohdat" + "face": "Kasvojen yksityiskohdat", + "scoreInfo": "Alatunnisteen pistemäärä on kaikkien tunnistettujen kasvojen varmuustasojen painotettu keskiarvo, joten se voi poiketa tilannekuvassa näkyvästä pistemäärästä." }, "documentTitle": "Kasvokirjasto - Frigate", "deleteFaceAttempts": { @@ -31,22 +32,30 @@ "selectItem": "Valitse {{item}}", "train": { "empty": "Ei viimeaikaisia kasvojentunnistusyrityksiä", - "title": "Koulutus" + "title": "Koulutus", + "aria": "Valitse kouluta" }, "collections": "Kokoelmat", "steps": { "faceName": "Anna nimi kasvoille", "uploadFace": "Lähetä kasvokuva", - "nextSteps": "Seuraavat vaiheet" + "nextSteps": "Seuraavat vaiheet", + "description": { + "uploadFace": "Lataa kuva henkilöstä {{name}}, jossa hänen kasvonsa näkyvät suoraan edestä päin. Kuvaa ei tarvitse rajata pelkkiin kasvoihin." + } }, "createFaceLibrary": { "title": "Luo kokoelma", "desc": "Luo uusi kokoelma", - "new": "Luo uusi kasvo" + "new": "Luo uusi kasvo", + "nextSteps": "Hyvän perustan luomiseksi huomioitavaa:
  • Käytä koulutus-välilehteä valitaksesi opetukseen kuvia kustakin tunnistetusta henkilöstä
  • Panosta mahdollisimman suoraan otettuihin kuviin; vältä kouluttamista kulmassa kuvatuilla kuvilla.
  • " }, "selectFace": "Valitse kasvo", "deleteFaceLibrary": { "title": "Poista nimi", "desc": "Haluatko varmasti poistaa kokoelman {{name}}? Tämä poistaa pysyvästi kaikki liitetyt kasvot." + }, + "renameFace": { + "title": "Uudelleennimeä kasvot" } } diff --git a/web/public/locales/fi/views/search.json b/web/public/locales/fi/views/search.json index 881a50c04..fab605088 100644 --- a/web/public/locales/fi/views/search.json +++ b/web/public/locales/fi/views/search.json @@ -37,8 +37,26 @@ "beforeDateBeLaterAfter": "'Ennen' ajan täytyy olla myöhemmin kun 'jälkeen' aika.", "afterDatebeEarlierBefore": "'Jälkeen' ajan täytyy olla aiemmin kun 'ennen' aika.", "minScoreMustBeLessOrEqualMaxScore": "Arvon 'min_score' täytyy olla pienempi tai yhtäsuuri kuin 'max_score'.", - "maxScoreMustBeGreaterOrEqualMinScore": "Arvon 'max_score' täytyy olla suurempi tai yhtäsuuri kuin 'min_score'." + "maxScoreMustBeGreaterOrEqualMinScore": "Arvon 'max_score' täytyy olla suurempi tai yhtäsuuri kuin 'min_score'.", + "minSpeedMustBeLessOrEqualMaxSpeed": "'Minimi nopeus' tulee olla pienempi tai yhtäsuuri kuin 'maksimi nopeus'.", + "maxSpeedMustBeGreaterOrEqualMinSpeed": "'Maksimi nopeus' tulee olla suurempi tai yhtä suuri kuin 'minimi nopeus'." } + }, + "tips": { + "desc": { + "exampleLabel": "Esimerkki:" + }, + "title": "Tekstisuodattimien käyttö" + }, + "header": { + "currentFilterType": "Suodata arvoja", + "noFilters": "Suodattimet", + "activeFilters": "Käytössä olevat suodattimet" } + }, + "similaritySearch": { + "title": "Samankaltaisten kohteiden haku", + "active": "Samankaltaisuushaku aktiivinen", + "clear": "Poista samankaltaisuushaku" } } From 334b6670e1909bf108f265d90ea03637957a0a76 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Wed, 6 Aug 2025 07:02:40 -0600 Subject: [PATCH 008/144] Add note for Gemini base url (#19399) --- docs/docs/configuration/genai.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/docs/docs/configuration/genai.md b/docs/docs/configuration/genai.md index 90e463df4..832398df9 100644 --- a/docs/docs/configuration/genai.md +++ b/docs/docs/configuration/genai.md @@ -105,6 +105,12 @@ genai: model: gemini-1.5-flash ``` +:::note + +To use a different Gemini-compatible API endpoint, set the `GEMINI_BASE_URL` environment variable to your provider's API URL. + +::: + ## OpenAI OpenAI does not have a free tier for their API. With the release of gpt-4o, pricing has been reduced and each generation should cost fractions of a cent if you choose to go this route. From b6b3178e3de4ac8c0ef59bf02aaaf2cbc81dc452 Mon Sep 17 00:00:00 2001 From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> Date: Wed, 6 Aug 2025 22:09:43 -0500 Subject: [PATCH 009/144] Fixes (#19406) * Fix api filter hook cameras, labels, sub labels, plates, and zones could be parsed as numeric values rather than strings, which would break the explore filter. This change adds an optional param to the useApiFilterArgs hook to always parse keys as string[] * fix notifications register button from being incorrectly disabled --- web/src/hooks/use-api-filter.ts | 43 +++++++++++-------- web/src/pages/Explore.tsx | 8 +++- .../settings/NotificationsSettingsView.tsx | 4 +- 3 files changed, 35 insertions(+), 20 deletions(-) diff --git a/web/src/hooks/use-api-filter.ts b/web/src/hooks/use-api-filter.ts index bffbff7e8..f9baead6e 100644 --- a/web/src/hooks/use-api-filter.ts +++ b/web/src/hooks/use-api-filter.ts @@ -47,9 +47,9 @@ export default function useApiFilter< return [filter, setFilter, searchParams]; } -export function useApiFilterArgs< - F extends FilterType, ->(): useApiFilterReturn { +export function useApiFilterArgs( + arrayKeys: string[], +): useApiFilterReturn { const [rawParams, setRawParams] = useSearchParams(); const setFilter = useCallback( @@ -64,30 +64,37 @@ export function useApiFilterArgs< const filter: { [key: string]: unknown } = {}; - rawParams.forEach((value, key) => { - const isValidNumber = /^-?\d+(\.\d+)?(?!.)/.test(value); - const isValidEventID = /^\d+\.\d+-[a-zA-Z0-9]+$/.test(value); + // always treat these keys as string[], not as a number or event id + const arrayKeySet = new Set(arrayKeys); - if ( - value != "true" && - value != "false" && - !isValidNumber && - !isValidEventID - ) { + rawParams.forEach((value, key) => { + if (arrayKeySet.has(key)) { filter[key] = value.includes(",") ? value.split(",") : [value]; } else { - if (value != undefined) { - try { - filter[key] = JSON.parse(value); - } catch { - filter[key] = `${value}`; + const isValidNumber = /^-?\d+(\.\d+)?(?!.)/.test(value); + const isValidEventID = /^\d+\.\d+-[a-zA-Z0-9]+$/.test(value); + + if ( + value != "true" && + value != "false" && + !isValidNumber && + !isValidEventID + ) { + filter[key] = value.includes(",") ? value.split(",") : [value]; + } else { + if (value != undefined) { + try { + filter[key] = JSON.parse(value); + } catch { + filter[key] = `${value}`; + } } } } }); return filter as F; - }, [rawParams]); + }, [rawParams, arrayKeys]); const searchParams = useMemo(() => { if (filter == undefined || Object.keys(filter).length == 0) { diff --git a/web/src/pages/Explore.tsx b/web/src/pages/Explore.tsx index 62d55fee9..9cb97ae2a 100644 --- a/web/src/pages/Explore.tsx +++ b/web/src/pages/Explore.tsx @@ -58,7 +58,13 @@ export default function Explore() { const [search, setSearch] = useState(""); const [searchFilter, setSearchFilter, searchSearchParams] = - useApiFilterArgs(); + useApiFilterArgs([ + "cameras", + "labels", + "sub_labels", + "recognized_license_plate", + "zones", + ]); const searchTerm = useMemo( () => searchSearchParams?.["query"] || "", diff --git a/web/src/views/settings/NotificationsSettingsView.tsx b/web/src/views/settings/NotificationsSettingsView.tsx index 410ba9742..36213fc0e 100644 --- a/web/src/views/settings/NotificationsSettingsView.tsx +++ b/web/src/views/settings/NotificationsSettingsView.tsx @@ -523,7 +523,9 @@ export default function NotificationView({ aria-label={t("notification.registerDevice")} disabled={ (!config?.notifications.enabled && - notificationCameras.length === 0) || + notificationCameras.length === 0 && + !form.watch("allEnabled") && + form.watch("cameras").length === 0) || publicKey == undefined } onClick={() => { From b15c799d8c232891ccf25d1ad33ee55cf651af47 Mon Sep 17 00:00:00 2001 From: Hosted Weblate Date: Thu, 7 Aug 2025 02:03:34 +0200 Subject: [PATCH 010/144] Translated using Weblate (Portuguese (Brazil)) Currently translated at 100.0% (114 of 114 strings) Translated using Weblate (Portuguese (Brazil)) Currently translated at 100.0% (114 of 114 strings) Translated using Weblate (Portuguese (Brazil)) Currently translated at 100.0% (114 of 114 strings) Translated using Weblate (Portuguese (Brazil)) Currently translated at 42.0% (148 of 352 strings) Translated using Weblate (Portuguese (Brazil)) Currently translated at 100.0% (115 of 115 strings) Translated using Weblate (Portuguese (Brazil)) Currently translated at 100.0% (118 of 118 strings) Translated using Weblate (Portuguese (Brazil)) Currently translated at 81.4% (149 of 183 strings) Translated using Weblate (Portuguese (Brazil)) Currently translated at 38.6% (165 of 427 strings) Co-authored-by: Deilson Peres Co-authored-by: Hosted Weblate Co-authored-by: Marcelo Popper Costa Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/audio/pt_BR/ Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/common/pt_BR/ Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/objects/pt_BR/ Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/views-explore/pt_BR/ Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/views-settings/pt_BR/ Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/views-system/pt_BR/ Translation: Frigate NVR/audio Translation: Frigate NVR/common Translation: Frigate NVR/objects Translation: Frigate NVR/views-explore Translation: Frigate NVR/views-settings Translation: Frigate NVR/views-system --- web/public/locales/pt-BR/audio.json | 50 +++++++- web/public/locales/pt-BR/common.json | 66 ++++++++++- web/public/locales/pt-BR/objects.json | 21 +++- web/public/locales/pt-BR/views/explore.json | 41 ++++++- web/public/locales/pt-BR/views/settings.json | 114 ++++++++++++++++++- web/public/locales/pt-BR/views/system.json | 24 +++- 6 files changed, 306 insertions(+), 10 deletions(-) diff --git a/web/public/locales/pt-BR/audio.json b/web/public/locales/pt-BR/audio.json index 5e7560320..b8db8b374 100644 --- a/web/public/locales/pt-BR/audio.json +++ b/web/public/locales/pt-BR/audio.json @@ -115,5 +115,53 @@ "dogs": "Cachorros", "rats": "Ratos", "coo": "Arrulhado de Pombo", - "crow": "Corvo" + "crow": "Corvo", + "caw": "Grasnado de Corvo", + "owl": "Coruja", + "hoot": "Chirriado de Coruja", + "flapping_wings": "Bater de Asas", + "patter": "Passos Leves", + "insect": "Inseto", + "cricket": "Grilo", + "mosquito": "Mosquito", + "fly": "Mosca", + "buzz": "Zumbido", + "frog": "Sapo", + "croak": "Coaxado", + "snake": "Cobra", + "rattle": "Guizo", + "whale_vocalization": "Vocalização de Baleia", + "music": "Música", + "musical_instrument": "Instrumento Musical", + "plucked_string_instrument": "Instrumento de Cordas Dedilhadas", + "guitar": "Violão", + "electric_guitar": "Guitarra", + "bass_guitar": "Baixo", + "acoustic_guitar": "Violão Acústico", + "steel_guitar": "Guitarra Havaiana", + "tapping": "Tapping", + "strum": "Dedilhado", + "banjo": "Banjo", + "sitar": "Sitar", + "mandolin": "Bandolim", + "zither": "Cítara", + "ukulele": "Ukulele", + "piano": "Piano", + "electric_piano": "Piano Elétrico", + "organ": "Órgão", + "electronic_organ": "Órgão Eletrônico", + "hammond_organ": "Órgão Hammond", + "synthesizer": "Sintetizador", + "sampler": "Sampler", + "harpsichord": "Cravo (Instrumento Musical)", + "percussion": "Percussão", + "drum_kit": "Kit de Baterias", + "drum_machine": "Bateria Eletrônica", + "drum": "Tambor", + "snare_drum": "Caixa Clara", + "rimshot": "Rimshot", + "drum_roll": "Tambores Rufando", + "bass_drum": "Bumbo", + "timpani": "Tímpanos (Instrumento Musical)", + "tabla": "Tabla" } diff --git a/web/public/locales/pt-BR/common.json b/web/public/locales/pt-BR/common.json index 9c763ccb7..f9230f0cc 100644 --- a/web/public/locales/pt-BR/common.json +++ b/web/public/locales/pt-BR/common.json @@ -137,9 +137,71 @@ "configuration": "Configuração", "language": { "hi": "हिन्दी (Hindi)", - "fr": "Français (Francês)" + "fr": "Français (Francês)", + "en": "English (Inglês)", + "es": "Español (Espanhol)", + "zhCN": "简体中文 (Chinês Simplificado)", + "ar": "العربية (Arábico)", + "pt": "Português (Português)", + "ru": "Русский (Russo)", + "de": "Deustch (Alemão)", + "ja": "日本語 (Japonês)", + "tr": "Türkçe (Turco)", + "it": "Italiano (Italiano)", + "nl": "Nederlands (Holandês)", + "sv": "Svenska (Sueco)", + "cs": "Čeština (Checo)", + "nb": "Norsk Bokmål (Bokmål Norueguês)", + "ko": "한국어 (Coreano)", + "vi": "Tiếng Việt (Vietnamita)", + "fa": "فارسی (Persa)", + "pl": "Polski (Polonês)", + "uk": "Українська (Ucraniano)", + "he": "עברית (Hebraico)", + "el": "Ελληνικά (Grego)", + "ro": "Română (Romeno)", + "hu": "Magyar (Húngaro)", + "fi": "Suomi (Finlandês)", + "da": "Dansk (Dinamarquês)", + "sk": "Slovenčina (Eslovaco)", + "yue": "粵語 (Cantonês)", + "th": "ไทย (Tailandês)", + "ca": "Català (Catalão)", + "withSystem": { + "label": "Usar as configurações de sistema para o idioma" + } }, "systemLogs": "Logs de sistema", - "settings": "Configurações" + "settings": "Configurações", + "configurationEditor": "Editor de Configuração", + "languages": "Idiomas", + "appearance": "Aparência", + "darkMode": { + "label": "Modo Escuro", + "light": "Claro", + "dark": "Escuro", + "withSystem": { + "label": "Use as configurações do sistema para modo claro ou escuro" + } + }, + "withSystem": "Sistema", + "theme": { + "label": "Tema", + "blue": "Azul", + "green": "Verde", + "nord": "Nord", + "red": "Vermelho", + "highcontrast": "Alto Contraste", + "default": "Padrão" + }, + "help": "Ajuda", + "documentation": { + "title": "Documentação", + "label": "Documentação do Frigate" + }, + "restart": "Reiniciar o Frigate", + "live": { + "title": "Ao Vivo" + } } } diff --git a/web/public/locales/pt-BR/objects.json b/web/public/locales/pt-BR/objects.json index 830c7efcf..48283d17b 100644 --- a/web/public/locales/pt-BR/objects.json +++ b/web/public/locales/pt-BR/objects.json @@ -97,5 +97,24 @@ "deer": "Veado", "on_demand": "Sob Demanda", "face": "Rosto", - "fox": "Raposa" + "fox": "Raposa", + "rabbit": "Coelho", + "raccoon": "Guaxinim", + "robot_lawnmower": "Cortador de Grama Robô", + "waste_bin": "Lixeira", + "license_plate": "Placa de Identificação", + "package": "Pacote", + "bbq_grill": "Grelha de Churrasco", + "amazon": "Amazon", + "usps": "USPS", + "ups": "UPS", + "fedex": "FedEx", + "dhl": "DHL", + "an_post": "An Post", + "purolator": "Purolator", + "postnl": "PostNL", + "nzpost": "NZPost", + "postnord": "PostNord", + "gls": "GLS", + "dpd": "DPD" } diff --git a/web/public/locales/pt-BR/views/explore.json b/web/public/locales/pt-BR/views/explore.json index 9d87f3579..ee5098f5a 100644 --- a/web/public/locales/pt-BR/views/explore.json +++ b/web/public/locales/pt-BR/views/explore.json @@ -159,14 +159,51 @@ }, "itemMenu": { "findSimilar": { - "aria": "Encontrar objetos rastreados similares" + "aria": "Encontrar objetos rastreados similares", + "label": "Encontrar similar" }, "submitToPlus": { - "label": "Enviar ao Frigate+" + "label": "Enviar ao Frigate+", + "aria": "Enviar ao Frigate Plus" }, "downloadVideo": { "label": "Baixar vídeo", "aria": "Baixar vídeo" + }, + "downloadSnapshot": { + "label": "Baixar captura de imagem", + "aria": "Baixar captura de imagem" + }, + "viewObjectLifecycle": { + "label": "Ver ciclo de vida do objeto", + "aria": "Exibir o ciclo de vida do objeto" + }, + "viewInHistory": { + "label": "Ver no Histórico", + "aria": "Ver no Histórico" + }, + "deleteTrackedObject": { + "label": "Deletar esse objeto rastreado" + } + }, + "dialog": { + "confirmDelete": { + "title": "Confirmar Exclusão", + "desc": "Deletar esse objeto rastreado remove a captura de imagem, qualquer embedding salvo, e quaisquer entradas de ciclo de vida de objeto associadas. Gravações desse objeto rastreado na visualização de Histórico NÃO serão deletadas.

    Tem certeza que quer prosseguir?" + } + }, + "noTrackedObjects": "Nenhum Objeto Rastreado Encontrado", + "fetchingTrackedObjectsFailed": "Erro ao buscar por objetos rastreados: {{errorMessage}}", + "trackedObjectsCount_one": "{{count}} objeto rastreado ", + "trackedObjectsCount_many": "{{count}} objetos rastreados ", + "trackedObjectsCount_other": "", + "searchResult": { + "tooltip": "Correspondência com {{type}} de {{confidence}}%", + "deleteTrackedObject": { + "toast": { + "success": "Objeto rastreado deletado com sucesso.", + "error": "Falha ao detectar objeto rastreado {{errorMessage}}" + } } } } diff --git a/web/public/locales/pt-BR/views/settings.json b/web/public/locales/pt-BR/views/settings.json index 18b57f1e8..b0b8b0a37 100644 --- a/web/public/locales/pt-BR/views/settings.json +++ b/web/public/locales/pt-BR/views/settings.json @@ -164,7 +164,119 @@ "selectAlertsZones": "Selecionar as zonas para Alertas", "selectDetectionsZones": "Selecionar as zonas para Detecções", "objectAlertsTips": "Todos os {{alertsLabels}} objetos em {{cameraName}} serão exibidos como Alertas.", - "zoneObjectAlertsTips": "Todos os {{alertsLabels}} objetos detectados em {{zone}} em {{cameraName}} serão exibidos como Alertas." + "zoneObjectAlertsTips": "Todos os {{alertsLabels}} objetos detectados em {{zone}} em {{cameraName}} serão exibidos como Alertas.", + "objectDetectionsTips": "Todos os objetos {{detectionsLabels}} não categorizados em {{cameraName}} serão exibidos como Detecções independente de qual zona eles estiverem.", + "zoneObjectDetectionsTips": { + "text": "Todos os objetos de {{detectionsLabels}} não categorizados em {{zone}} em {{cameraName}} serão exibidos como Detecções.", + "notSelectDetections": "Todos os objetos {{detectionsLabels}} detectados em {{zone}} em {{cameraName}} não categorizados como Alertas serão exibidos como Detecções independente da zona em que estiverem.", + "regardlessOfZoneObjectDetectionsTips": "Todos os objetos {{detectionsLabels}} não categorizados em {{cameraName}} serão exibidos como Detecções independente de quais zonas estiverem." + }, + "unsavedChanges": "Configurações de Classificação de Revisões Não Salvas para {{camera}}", + "limitDetections": "Limitar detecções a zonas específicas", + "toast": { + "success": "A configuração de Revisão de Classificação foi salva. Reinicie o Frigate para aplicar as mudanças." + } + } + }, + "masksAndZones": { + "filter": { + "all": "Todas as Máscaras e Zonas" + }, + "restart_required": "Reinicialização requerida (máscaras/zonas foram alteradas)", + "toast": { + "success": { + "copyCoordinates": "Coordenadas copiadas para {{polyName}} para a área de transferência." + }, + "error": { + "copyCoordinatesFailed": "Não foi possível copiar as coordenadas para a área de transferência." + } + }, + "motionMaskLabel": "Máscara de Movimento {{number}}", + "objectMaskLabel": "Máscara de Objeto {{number}} ({{label}})", + "form": { + "zoneName": { + "error": { + "mustBeAtLeastTwoCharacters": "O nome da zona deve conter ao menos 2 caracteres.", + "mustNotBeSameWithCamera": "O nome da zona não pode ser igual ao nome da câmera.", + "alreadyExists": "Uma zona com esse noma já existe para essa câmera.", + "mustNotContainPeriod": "O nome da zona não pode conter ponto final.", + "hasIllegalCharacter": "O nome da zona contém caracteres ilegais." + } + }, + "distance": { + "error": { + "text": "A distância deve sair maior ou igual a 0.1.", + "mustBeFilled": "Todos os campos de distância devem ser preenchidos para utilizar a estimativa de velocidade." + } + }, + "inertia": { + "error": { + "mustBeAboveZero": "A inércia deve ser maior que 0." + } + }, + "loiteringTime": { + "error": { + "mustBeGreaterOrEqualZero": "O tempo de permanência deve ser maior ou igual a zero." + } + }, + "speed": { + "error": { + "mustBeGreaterOrEqualTo": "O limiar de velocidade deve ser maior ou igual a 0.1." + } + }, + "polygonDrawing": { + "removeLastPoint": "Remover o ultimo ponto", + "reset": { + "label": "Limpar todos os pontos" + }, + "snapPoints": { + "true": "Pontos de encaixe", + "false": "Não encaixar os ponts" + }, + "delete": { + "title": "Confirmar Deletar", + "desc": "Tem certeza que quer deletar o {{type}} {{name}}?", + "success": "{{name}} foi deletado." + }, + "error": { + "mustBeFinished": "O desenho do polígono deve ser finalizado antes de salvar." + } + } + }, + "zones": { + "label": "Zonas", + "documentTitle": "Editar Zona - Frigate", + "desc": { + "title": "Zonas permitem que você defina uma área específica do quadro para que você possa determinar se um objeto está ou não em uma área em particular.", + "documentation": "Documentação" + }, + "add": "Adicionar Zona", + "edit": "Editar Zona", + "point_one": "{{count}} ponto", + "point_many": "{{count}} pontos", + "point_other": "", + "clickDrawPolygon": "Clique para desenhar um polígono na imagem.", + "name": { + "title": "Nome", + "inputPlaceHolder": "Digite um nome…", + "tips": "O nome deve ter no mínimo 2 caracteres e não pode ter o nome de uma câmera ou outra zona." + }, + "inertia": { + "title": "Inércia", + "desc": "Especifica por quantos quadros um objeto deve permanecer em uma zona para que seja considerado na zona. Padrão: 3" + }, + "loiteringTime": { + "title": "Tempo de Permanência", + "desc": "Define o tempo mínimo em segundos que o objeto deve estar na zona para ser ativado. Padrão: 0" + }, + "objects": { + "title": "Objetos", + "desc": "Lista de objetos que se aplicam a essa zona." + }, + "allObjects": "Todos os Objetos", + "speedEstimation": { + "title": "Estimativa de Velocidade" + } } } } diff --git a/web/public/locales/pt-BR/views/system.json b/web/public/locales/pt-BR/views/system.json index f4c3c1f43..ce0727459 100644 --- a/web/public/locales/pt-BR/views/system.json +++ b/web/public/locales/pt-BR/views/system.json @@ -152,11 +152,29 @@ }, "lastRefreshed": "Atualizado pela última vez: ", "stats": { - "detectIsVerySlow": "{{detect}} está lento ({{speed}} ms)", + "detectIsVerySlow": "{{detect}} está muito lento ({{speed}} ms)", "ffmpegHighCpuUsage": "{{camera}} possui alta utilização de CPU para FFmpeg ({{ffmpegAvg}}%)", - "detectHighCpuUsage": "{{camera}} possui alta utilização de CPU para detecção ({{detectAvg}}%)" + "detectHighCpuUsage": "{{camera}} possui alta utilização de CPU para detecção ({{detectAvg}}%)", + "healthy": "O sistema está saudável", + "cameraIsOffline": "{{camera}} está offline", + "reindexingEmbeddings": "Reindexando embeddings ({{processed}}% completado)", + "detectIsSlow": "{{detect}} está lento ({{speed}} ms)" }, "enrichments": { - "title": "Enriquecimentos" + "title": "Enriquecimentos", + "infPerSecond": "Inferências por Segundo", + "embeddings": { + "face_recognition": "Reconhecimento Facial", + "plate_recognition": "Reconhecimento de Placa", + "plate_recognition_speed": "Velocidade de Reconhecimento de Placas", + "text_embedding_speed": "Velocidade de Incorporação de Textos", + "yolov9_plate_detection_speed": "Velocidade de Reconhecimento de Placas do YOLOv9", + "yolov9_plate_detection": "Detecção de Placas do YOLOv9", + "image_embedding": "Incorporação de Imagem", + "text_embedding": "Incorporação de Texto", + "image_embedding_speed": "Velocidade de Incorporação de Imagem", + "face_embedding_speed": "Velocidade de Incorporação de Rosto", + "face_recognition_speed": "Velocidade de Reconhecimento de Rostos" + } } } From 30cf2748150f19feadb9c5981644f1de2b1ed212 Mon Sep 17 00:00:00 2001 From: Hosted Weblate Date: Thu, 7 Aug 2025 02:03:35 +0200 Subject: [PATCH 011/144] Translated using Weblate (Vietnamese) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently translated at 100.0% (114 of 114 strings) Translated using Weblate (Vietnamese) Currently translated at 100.0% (183 of 183 strings) Co-authored-by: Hosted Weblate Co-authored-by: Lâm Lê Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/common/vi/ Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/views-system/vi/ Translation: Frigate NVR/common Translation: Frigate NVR/views-system --- web/public/locales/vi/common.json | 2 +- web/public/locales/vi/views/system.json | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/web/public/locales/vi/common.json b/web/public/locales/vi/common.json index 87b9603bc..f6926a4d3 100644 --- a/web/public/locales/vi/common.json +++ b/web/public/locales/vi/common.json @@ -106,7 +106,7 @@ "nb": "Norsk Bokmål (Tiếng Na Uy)", "ko": "한국어 (Tiếng Hàn)", "pl": "Polski (Tiếng Ba Lan)", - "vi": "Tiếng Việt", + "vi": "Tiếng Việt (Tiếng Việt)", "fa": "فارسی (Tiếng Ba Tư)", "uk": "Українська (Tiếng Ukraina)", "he": "עברית (Tiếng Do Thái)", diff --git a/web/public/locales/vi/views/system.json b/web/public/locales/vi/views/system.json index 257cb0b0b..31da0a086 100644 --- a/web/public/locales/vi/views/system.json +++ b/web/public/locales/vi/views/system.json @@ -65,7 +65,7 @@ "camera": "Camera", "unusedStorageInformation": "Thông tin Lưu trữ Chưa sử dụng", "storageUsed": "Lưu trữ", - "percentageOfTotalUsed": "Phần trăm dung lượng sử dụng", + "percentageOfTotalUsed": "Tổng phần trăm", "bandwidth": "Băng thông", "unused": { "title": "Chưa sử dụng", From 2e1b81b380d3a471f3516ba0fab407286525cd6f Mon Sep 17 00:00:00 2001 From: Hosted Weblate Date: Thu, 7 Aug 2025 02:03:36 +0200 Subject: [PATCH 012/144] Translated using Weblate (Finnish) Currently translated at 31.5% (36 of 114 strings) Translated using Weblate (Finnish) Currently translated at 56.4% (35 of 62 strings) Translated using Weblate (Finnish) Currently translated at 51.5% (34 of 66 strings) Translated using Weblate (Finnish) Currently translated at 68.0% (34 of 50 strings) Translated using Weblate (Finnish) Currently translated at 13.8% (59 of 427 strings) Co-authored-by: Hosted Weblate Co-authored-by: Toni Liski Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/audio/fi/ Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/components-dialog/fi/ Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/components-filter/fi/ Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/views-facelibrary/fi/ Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/views-system/fi/ Translation: Frigate NVR/audio Translation: Frigate NVR/components-dialog Translation: Frigate NVR/components-filter Translation: Frigate NVR/views-facelibrary Translation: Frigate NVR/views-system --- web/public/locales/fi/audio.json | 3 ++- web/public/locales/fi/components/dialog.json | 6 +++++- web/public/locales/fi/components/filter.json | 5 ++++- web/public/locales/fi/views/faceLibrary.json | 6 +++++- web/public/locales/fi/views/system.json | 3 ++- 5 files changed, 18 insertions(+), 5 deletions(-) diff --git a/web/public/locales/fi/audio.json b/web/public/locales/fi/audio.json index cb54c30be..1623e89bd 100644 --- a/web/public/locales/fi/audio.json +++ b/web/public/locales/fi/audio.json @@ -57,5 +57,6 @@ "sneeze": "Niistää", "throat_clearing": "Kurkun selvittäminen", "sniff": "Poimi", - "run": "Käynnistä" + "run": "Käynnistä", + "shuffle": "Sekoitus" } diff --git a/web/public/locales/fi/components/dialog.json b/web/public/locales/fi/components/dialog.json index 4b59b0d8c..9a1ca575d 100644 --- a/web/public/locales/fi/components/dialog.json +++ b/web/public/locales/fi/components/dialog.json @@ -67,7 +67,11 @@ "streaming": { "label": "Kuvavirta", "restreaming": { - "disabled": "Uudelleentoisto ei ole käytettävissä tällä kameralla." + "disabled": "Uudelleentoisto ei ole käytettävissä tällä kameralla.", + "desc": { + "title": "Määritä go2rtc saadaksesi lisäreaaliaikanäkymän vaihtoehtoja ja ääntä tälle kameralle.", + "readTheDocumentation": "Lue dokumentaatio" + } } } } diff --git a/web/public/locales/fi/components/filter.json b/web/public/locales/fi/components/filter.json index 17bc1ceb2..5a21e5424 100644 --- a/web/public/locales/fi/components/filter.json +++ b/web/public/locales/fi/components/filter.json @@ -54,6 +54,9 @@ "relevance": "Olennaisuus" }, "cameras": { - "label": "Kameran suodattimet" + "label": "Kameran suodattimet", + "all": { + "title": "Kaikki kamerat" + } } } diff --git a/web/public/locales/fi/views/faceLibrary.json b/web/public/locales/fi/views/faceLibrary.json index 94f4895ca..041c7324f 100644 --- a/web/public/locales/fi/views/faceLibrary.json +++ b/web/public/locales/fi/views/faceLibrary.json @@ -56,6 +56,10 @@ "desc": "Haluatko varmasti poistaa kokoelman {{name}}? Tämä poistaa pysyvästi kaikki liitetyt kasvot." }, "renameFace": { - "title": "Uudelleennimeä kasvot" + "title": "Uudelleennimeä kasvot", + "desc": "Anna uusi nimi tälle {{name}}" + }, + "button": { + "deleteFaceAttempts": "Poista kasvot" } } diff --git a/web/public/locales/fi/views/system.json b/web/public/locales/fi/views/system.json index d8a32030b..5000e45c6 100644 --- a/web/public/locales/fi/views/system.json +++ b/web/public/locales/fi/views/system.json @@ -44,7 +44,8 @@ "gpuDecoder": "GPU-dekooderi", "gpuInfo": { "vainfoOutput": { - "title": "Vainfon tulostus" + "title": "Vainfon tulostus", + "returnCode": "Paluuarvo: {{code}}" }, "toast": { "success": "Kopioi GPU:n tiedot leikepöydälle" From f7184c8ed58b88634d441db3507e42a20d789b44 Mon Sep 17 00:00:00 2001 From: Hosted Weblate Date: Thu, 7 Aug 2025 05:09:51 +0200 Subject: [PATCH 013/144] Translated using Weblate (Vietnamese) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently translated at 100.0% (183 of 183 strings) Translated using Weblate (Vietnamese) Currently translated at 100.0% (114 of 114 strings) Translated using Weblate (Vietnamese) Currently translated at 100.0% (183 of 183 strings) Co-authored-by: Hosted Weblate Co-authored-by: Lâm Lê Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/common/vi/ Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/views-system/vi/ Translation: Frigate NVR/common Translation: Frigate NVR/views-system --- web/public/locales/vi/common.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web/public/locales/vi/common.json b/web/public/locales/vi/common.json index f6926a4d3..af34c5ee3 100644 --- a/web/public/locales/vi/common.json +++ b/web/public/locales/vi/common.json @@ -121,7 +121,7 @@ }, "yue": "粵語 (Tiếng Quảng Đông)", "ca": "Català (Tiếng Catalan)", - "th": "ไทย (Tiếng Thái Lan)" + "th": "ไทย (Tiếng Thái)" }, "system": "Hệ thống", "systemMetrics": "Thông số hệ thống", From 5e5beb98375449da4102d72d1dfd9db62cc947ef Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Thu, 7 Aug 2025 11:08:33 -0600 Subject: [PATCH 014/144] Fixes (#19414) * Don't assume video is 16 / 9 * Don't apply docker constraints for rockchp toolkit --- docker/rockchip/Dockerfile | 2 +- web/src/hooks/use-video-dimensions.ts | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/docker/rockchip/Dockerfile b/docker/rockchip/Dockerfile index efcd38110..5375d19e5 100644 --- a/docker/rockchip/Dockerfile +++ b/docker/rockchip/Dockerfile @@ -11,7 +11,7 @@ COPY docker/main/requirements-wheels.txt /requirements-wheels.txt COPY docker/rockchip/requirements-wheels-rk.txt /requirements-wheels-rk.txt RUN sed -i "/https:\/\//d" /requirements-wheels.txt RUN sed -i "/onnxruntime/d" /requirements-wheels.txt -RUN pip3 wheel --wheel-dir=/rk-wheels -c /requirements-wheels.txt -r /requirements-wheels-rk.txt +RUN pip3 wheel --wheel-dir=/rk-wheels -r /requirements-wheels-rk.txt RUN rm -rf /rk-wheels/opencv_python-* FROM deps AS rk-frigate diff --git a/web/src/hooks/use-video-dimensions.ts b/web/src/hooks/use-video-dimensions.ts index 448dd5078..1fad71dc8 100644 --- a/web/src/hooks/use-video-dimensions.ts +++ b/web/src/hooks/use-video-dimensions.ts @@ -17,7 +17,7 @@ export function useVideoDimensions( }); const videoAspectRatio = useMemo(() => { - return videoResolution.width / videoResolution.height || 16 / 9; + return videoResolution.width / videoResolution.height; }, [videoResolution]); const containerAspectRatio = useMemo(() => { @@ -25,7 +25,7 @@ export function useVideoDimensions( }, [containerWidth, containerHeight]); const videoDimensions = useMemo(() => { - if (!containerWidth || !containerHeight) + if (!containerWidth || !containerHeight || !videoAspectRatio) return { width: "100%", height: "100%" }; if (containerAspectRatio > videoAspectRatio) { const height = containerHeight; From 4869f46ab6fcdc101c4507ad9f40e299e5b649b2 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Thu, 7 Aug 2025 15:34:25 -0600 Subject: [PATCH 015/144] Fixes (#19420) * Remove torch install * notification fixes the pubkey was not being returned if notifications was not enabled at the global level * Put back * single condition check for fetching and disabling button --------- Co-authored-by: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> --- docker/rockchip/Dockerfile | 3 +- frigate/api/notification.py | 7 +- .../settings/NotificationsSettingsView.tsx | 116 ++++++++++-------- 3 files changed, 72 insertions(+), 54 deletions(-) diff --git a/docker/rockchip/Dockerfile b/docker/rockchip/Dockerfile index 5375d19e5..668250439 100644 --- a/docker/rockchip/Dockerfile +++ b/docker/rockchip/Dockerfile @@ -11,8 +11,9 @@ COPY docker/main/requirements-wheels.txt /requirements-wheels.txt COPY docker/rockchip/requirements-wheels-rk.txt /requirements-wheels-rk.txt RUN sed -i "/https:\/\//d" /requirements-wheels.txt RUN sed -i "/onnxruntime/d" /requirements-wheels.txt -RUN pip3 wheel --wheel-dir=/rk-wheels -r /requirements-wheels-rk.txt +RUN pip3 wheel --wheel-dir=/rk-wheels -c /requirements-wheels.txt -r /requirements-wheels-rk.txt RUN rm -rf /rk-wheels/opencv_python-* +RUN rm -rf /rk-wheels/torch-* FROM deps AS rk-frigate ARG TARGETARCH diff --git a/frigate/api/notification.py b/frigate/api/notification.py index 7858ec1a7..96ba96fdc 100644 --- a/frigate/api/notification.py +++ b/frigate/api/notification.py @@ -21,7 +21,12 @@ router = APIRouter(tags=[Tags.notifications]) @router.get("/notifications/pubkey") def get_vapid_pub_key(request: Request): - if not request.app.frigate_config.notifications.enabled: + config = request.app.frigate_config + notifications_enabled = config.notifications.enabled + camera_notifications_enabled = [ + c for c in config.cameras.values() if c.enabled and c.notifications.enabled + ] + if not (notifications_enabled or camera_notifications_enabled): return JSONResponse( content=({"success": False, "message": "Notifications are not enabled."}), status_code=400, diff --git a/web/src/views/settings/NotificationsSettingsView.tsx b/web/src/views/settings/NotificationsSettingsView.tsx index 36213fc0e..82d09107a 100644 --- a/web/src/views/settings/NotificationsSettingsView.tsx +++ b/web/src/views/settings/NotificationsSettingsView.tsx @@ -118,50 +118,6 @@ export default function NotificationView({ // eslint-disable-next-line react-hooks/exhaustive-deps }, [changedValue]); - // notification key handling - - const { data: publicKey } = useSWR( - config?.notifications?.enabled ? "notifications/pubkey" : null, - { revalidateOnFocus: false }, - ); - - const subscribeToNotifications = useCallback( - (registration: ServiceWorkerRegistration) => { - if (registration) { - addMessage( - "notification_settings", - t("notification.unsavedRegistrations"), - undefined, - "registration", - ); - - registration.pushManager - .subscribe({ - userVisibleOnly: true, - applicationServerKey: publicKey, - }) - .then((pushSubscription) => { - axios - .post("notifications/register", { - sub: pushSubscription, - }) - .catch(() => { - toast.error(t("notification.toast.error.registerFailed"), { - position: "top-center", - }); - pushSubscription.unsubscribe(); - registration.unregister(); - setRegistration(null); - }); - toast.success(t("notification.toast.success.registered"), { - position: "top-center", - }); - }); - } - }, - [publicKey, addMessage, t], - ); - // notification state const [registration, setRegistration] = @@ -206,7 +162,69 @@ export default function NotificationView({ }, }); - const watchCameras = form.watch("cameras"); + const watchAllEnabled = form.watch("allEnabled"); + const watchCameras = useMemo(() => form.watch("cameras") || [], [form]); + + const anyCameraNotificationsEnabled = useMemo( + () => + config && + Object.values(config.cameras).some( + (c) => + c.enabled_in_config && + c.notifications && + c.notifications.enabled_in_config, + ), + [config], + ); + + const shouldFetchPubKey = Boolean( + config && + (config.notifications?.enabled || anyCameraNotificationsEnabled) && + (watchAllEnabled || + (Array.isArray(watchCameras) && watchCameras.length > 0)), + ); + + const { data: publicKey } = useSWR( + shouldFetchPubKey ? "notifications/pubkey" : null, + { revalidateOnFocus: false }, + ); + + const subscribeToNotifications = useCallback( + (registration: ServiceWorkerRegistration) => { + if (registration) { + addMessage( + "notification_settings", + t("notification.unsavedRegistrations"), + undefined, + "registration", + ); + + registration.pushManager + .subscribe({ + userVisibleOnly: true, + applicationServerKey: publicKey, + }) + .then((pushSubscription) => { + axios + .post("notifications/register", { + sub: pushSubscription, + }) + .catch(() => { + toast.error(t("notification.toast.error.registerFailed"), { + position: "top-center", + }); + pushSubscription.unsubscribe(); + registration.unregister(); + setRegistration(null); + }); + toast.success(t("notification.toast.success.registered"), { + position: "top-center", + }); + }); + } + }, + [publicKey, addMessage, t], + ); useEffect(() => { if (watchCameras.length > 0) { @@ -521,13 +539,7 @@ export default function NotificationView({ )} + {config?.cameras[search?.camera].audio_transcription.enabled && + search?.label == "speech" && + search?.end_time && ( + + )} diff --git a/web/src/pages/Explore.tsx b/web/src/pages/Explore.tsx index 09e576551..b6a4a43e5 100644 --- a/web/src/pages/Explore.tsx +++ b/web/src/pages/Explore.tsx @@ -257,15 +257,13 @@ export default function Explore() { // mutation and revalidation - const trackedObjectUpdate = useTrackedObjectUpdate(); + const { payload: wsUpdate } = useTrackedObjectUpdate(); useEffect(() => { - if (trackedObjectUpdate) { + if (wsUpdate && wsUpdate.type == "description") { mutate(); } - // mutate / revalidate when event description updates come in - // eslint-disable-next-line react-hooks/exhaustive-deps - }, [trackedObjectUpdate]); + }, [wsUpdate, mutate]); // embeddings reindex progress diff --git a/web/src/types/frigateConfig.ts b/web/src/types/frigateConfig.ts index 355d4cb72..cf2bf1476 100644 --- a/web/src/types/frigateConfig.ts +++ b/web/src/types/frigateConfig.ts @@ -41,6 +41,11 @@ export interface CameraConfig { min_volume: number; num_threads: number; }; + audio_transcription: { + enabled: boolean; + enabled_in_config: boolean; + live_enabled: boolean; + }; best_image_timeout: number; birdseye: { enabled: boolean; @@ -296,6 +301,10 @@ export interface FrigateConfig { num_threads: number; }; + audio_transcription: { + enabled: boolean; + }; + birdseye: BirdseyeConfig; cameras: { diff --git a/web/src/types/ws.ts b/web/src/types/ws.ts index 3badd961d..d1e810494 100644 --- a/web/src/types/ws.ts +++ b/web/src/types/ws.ts @@ -58,6 +58,7 @@ export interface FrigateCameraState { snapshots: boolean; record: boolean; audio: boolean; + audio_transcription: boolean; notifications: boolean; notifications_suspended: number; autotracking: boolean; @@ -84,3 +85,21 @@ export type EmbeddingsReindexProgressType = { }; export type ToggleableSetting = "ON" | "OFF"; + +export type TrackedObjectUpdateType = + | "description" + | "lpr" + | "transcription" + | "face"; + +export type TrackedObjectUpdateReturnType = { + type: TrackedObjectUpdateType; + id: string; + camera: string; + description?: string; + name?: string; + plate?: string; + score?: number; + timestamp?: number; + text?: string; +} | null; diff --git a/web/src/views/explore/ExploreView.tsx b/web/src/views/explore/ExploreView.tsx index f680b6566..ca13f2986 100644 --- a/web/src/views/explore/ExploreView.tsx +++ b/web/src/views/explore/ExploreView.tsx @@ -75,13 +75,13 @@ export default function ExploreView({ }, {}); }, [events]); - const trackedObjectUpdate = useTrackedObjectUpdate(); + const { payload: wsUpdate } = useTrackedObjectUpdate(); useEffect(() => { - mutate(); - // mutate / revalidate when event description updates come in - // eslint-disable-next-line react-hooks/exhaustive-deps - }, [trackedObjectUpdate]); + if (wsUpdate && wsUpdate.type == "description") { + mutate(); + } + }, [wsUpdate, mutate]); // update search detail when results change diff --git a/web/src/views/live/LiveCameraView.tsx b/web/src/views/live/LiveCameraView.tsx index b972e1a39..9e9e0e974 100644 --- a/web/src/views/live/LiveCameraView.tsx +++ b/web/src/views/live/LiveCameraView.tsx @@ -1,5 +1,7 @@ import { + useAudioLiveTranscription, useAudioState, + useAudioTranscriptionState, useAutotrackingState, useDetectState, useEnabledState, @@ -90,6 +92,8 @@ import { LuX, } from "react-icons/lu"; import { + MdClosedCaption, + MdClosedCaptionDisabled, MdNoPhotography, MdOutlineRestartAlt, MdPersonOff, @@ -197,6 +201,29 @@ export default function LiveCameraView({ const { payload: enabledState } = useEnabledState(camera.name); const cameraEnabled = enabledState === "ON"; + // for audio transcriptions + + const { payload: audioTranscriptionState, send: sendTranscription } = + useAudioTranscriptionState(camera.name); + const { payload: transcription } = useAudioLiveTranscription(camera.name); + const transcriptionRef = useRef(null); + + useEffect(() => { + if (transcription) { + if (transcriptionRef.current) { + transcriptionRef.current.scrollTop = + transcriptionRef.current.scrollHeight; + } + } + }, [transcription]); + + useEffect(() => { + return () => { + // disable transcriptions when unmounting + if (audioTranscriptionState == "ON") sendTranscription("OFF"); + }; + }, [audioTranscriptionState, sendTranscription]); + // click overlay for ptzs const [clickOverlay, setClickOverlay] = useState(false); @@ -567,6 +594,9 @@ export default function LiveCameraView({ autotrackingEnabled={ camera.onvif.autotracking.enabled_in_config } + transcriptionEnabled={ + camera.audio_transcription.enabled_in_config + } fullscreen={fullscreen} streamName={streamName ?? ""} setStreamName={setStreamName} @@ -626,6 +656,16 @@ export default function LiveCameraView({ /> + {camera?.audio?.enabled_in_config && + audioTranscriptionState == "ON" && + transcription != null && ( +
    + {transcription} +
    + )} {camera.onvif.host != "" && ( @@ -984,6 +1024,7 @@ type FrigateCameraFeaturesProps = { recordingEnabled: boolean; audioDetectEnabled: boolean; autotrackingEnabled: boolean; + transcriptionEnabled: boolean; fullscreen: boolean; streamName: string; setStreamName?: (value: string | undefined) => void; @@ -1003,6 +1044,7 @@ function FrigateCameraFeatures({ recordingEnabled, audioDetectEnabled, autotrackingEnabled, + transcriptionEnabled, fullscreen, streamName, setStreamName, @@ -1035,6 +1077,8 @@ function FrigateCameraFeatures({ const { payload: audioState, send: sendAudio } = useAudioState(camera.name); const { payload: autotrackingState, send: sendAutotracking } = useAutotrackingState(camera.name); + const { payload: transcriptionState, send: sendTranscription } = + useAudioTranscriptionState(camera.name); // roles @@ -1198,6 +1242,27 @@ function FrigateCameraFeatures({ disabled={!cameraEnabled} /> )} + {audioDetectEnabled && transcriptionEnabled && ( + + sendTranscription(transcriptionState == "ON" ? "OFF" : "ON") + } + disabled={!cameraEnabled || audioState == "OFF"} + /> + )} {autotrackingEnabled && ( )} + {audioDetectEnabled && transcriptionEnabled && ( + + sendTranscription(transcriptionState == "ON" ? "OFF" : "ON") + } + /> + )} {autotrackingEnabled && ( Date: Thu, 29 May 2025 17:51:32 -0600 Subject: [PATCH 042/144] Implement API to train classification models (#18475) --- docker/main/Dockerfile | 3 + docker/main/requirements-wheels.txt | 3 + frigate/api/classification.py | 35 +++++- frigate/config/classification.py | 3 +- frigate/config/config.py | 4 + .../real_time/custom_classification.py | 78 ++++++++++--- frigate/embeddings/maintainer.py | 4 +- frigate/util/classification.py | 108 ++++++++++++++++++ 8 files changed, 219 insertions(+), 19 deletions(-) create mode 100644 frigate/util/classification.py diff --git a/docker/main/Dockerfile b/docker/main/Dockerfile index 1cf752ed5..90e174d10 100644 --- a/docker/main/Dockerfile +++ b/docker/main/Dockerfile @@ -227,6 +227,9 @@ ENV OPENCV_FFMPEG_LOGLEVEL=8 # Set HailoRT to disable logging ENV HAILORT_LOGGER_PATH=NONE +# TensorFlow error only +ENV TF_CPP_MIN_LOG_LEVEL=3 + ENV PATH="/usr/local/go2rtc/bin:/usr/local/tempio/bin:/usr/local/nginx/sbin:${PATH}" # Install dependencies diff --git a/docker/main/requirements-wheels.txt b/docker/main/requirements-wheels.txt index 59cc1ab9c..624983eb4 100644 --- a/docker/main/requirements-wheels.txt +++ b/docker/main/requirements-wheels.txt @@ -11,6 +11,9 @@ joserfc == 1.0.* pathvalidate == 3.2.* markupsafe == 3.0.* python-multipart == 0.0.12 +# Classification Model Training +tensorflow == 2.19.* ; platform_machine == 'aarch64' +tensorflow-cpu == 2.19.* ; platform_machine == 'x86_64' # General mypy == 1.6.1 onvif-zeep-async == 3.1.* diff --git a/frigate/api/classification.py b/frigate/api/classification.py index 19afd3a9a..98b716c67 100644 --- a/frigate/api/classification.py +++ b/frigate/api/classification.py @@ -7,7 +7,7 @@ import shutil from typing import Any import cv2 -from fastapi import APIRouter, Depends, Request, UploadFile +from fastapi import APIRouter, BackgroundTasks, Depends, Request, UploadFile from fastapi.responses import JSONResponse from pathvalidate import sanitize_filename from peewee import DoesNotExist @@ -19,10 +19,12 @@ from frigate.api.defs.request.classification_body import ( RenameFaceBody, ) from frigate.api.defs.tags import Tags +from frigate.config import FrigateConfig from frigate.config.camera import DetectConfig -from frigate.const import FACE_DIR +from frigate.const import FACE_DIR, MODEL_CACHE_DIR from frigate.embeddings import EmbeddingsContext from frigate.models import Event +from frigate.util.classification import train_classification_model from frigate.util.path import get_event_snapshot logger = logging.getLogger(__name__) @@ -442,3 +444,32 @@ def transcribe_audio(request: Request, body: AudioTranscriptionBody): }, status_code=500, ) + + +# custom classification training + + +@router.post("/classification/{name}/train") +async def train_configured_model( + request: Request, name: str, background_tasks: BackgroundTasks +): + config: FrigateConfig = request.app.frigate_config + + if name not in config.classification.custom: + return JSONResponse( + content=( + { + "success": False, + "message": f"{name} is not a known classification model.", + } + ), + status_code=404, + ) + + background_tasks.add_task( + train_classification_model, os.path.join(MODEL_CACHE_DIR, name) + ) + return JSONResponse( + content={"success": True, "message": "Started classification model training."}, + status_code=200, + ) diff --git a/frigate/config/classification.py b/frigate/config/classification.py index cd20a63ad..40a1183cd 100644 --- a/frigate/config/classification.py +++ b/frigate/config/classification.py @@ -85,8 +85,7 @@ class CustomClassificationObjectConfig(FrigateBaseModel): class CustomClassificationConfig(FrigateBaseModel): enabled: bool = Field(default=True, title="Enable running the model.") - model_path: str = Field(title="Path to custom classification tflite model.") - labelmap_path: str = Field(title="Path to custom classification model labelmap.") + name: str | None = Field(default=None, title="Name of classification model.") object_config: CustomClassificationObjectConfig | None = Field(default=None) state_config: CustomClassificationStateConfig | None = Field(default=None) diff --git a/frigate/config/config.py b/frigate/config/config.py index 5bca436b6..d912a574d 100644 --- a/frigate/config/config.py +++ b/frigate/config/config.py @@ -706,6 +706,10 @@ class FrigateConfig(FrigateBaseModel): verify_objects_track(camera_config, labelmap_objects) verify_lpr_and_face(self, camera_config) + # set names on classification configs + for name, config in self.classification.custom.items(): + config.name = name + self.objects.parse_all_objects(self.cameras) self.model.create_colormap(sorted(self.objects.all_objects)) self.model.check_and_load_plus_model(self.plus_api) diff --git a/frigate/data_processing/real_time/custom_classification.py b/frigate/data_processing/real_time/custom_classification.py index cd99508c9..f94c2b28c 100644 --- a/frigate/data_processing/real_time/custom_classification.py +++ b/frigate/data_processing/real_time/custom_classification.py @@ -2,6 +2,7 @@ import datetime import logging +import os from typing import Any import cv2 @@ -14,6 +15,7 @@ from frigate.comms.event_metadata_updater import ( from frigate.comms.inter_process import InterProcessRequestor from frigate.config import FrigateConfig from frigate.config.classification import CustomClassificationConfig +from frigate.const import CLIPS_DIR, MODEL_CACHE_DIR from frigate.util.builtin import load_labels from frigate.util.object import box_overlaps, calculate_region @@ -33,14 +35,14 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi): self, config: FrigateConfig, model_config: CustomClassificationConfig, - name: str, requestor: InterProcessRequestor, metrics: DataProcessorMetrics, ): super().__init__(config, metrics) self.model_config = model_config - self.name = name self.requestor = requestor + self.model_dir = os.path.join(MODEL_CACHE_DIR, self.model_config.name) + self.train_dir = os.path.join(CLIPS_DIR, self.model_config.name) self.interpreter: Interpreter = None self.tensor_input_details: dict[str, Any] = None self.tensor_output_details: dict[str, Any] = None @@ -50,13 +52,16 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi): def __build_detector(self) -> None: self.interpreter = Interpreter( - model_path=self.model_config.model_path, + model_path=os.path.join(self.model_dir, "model.tflite"), num_threads=2, ) self.interpreter.allocate_tensors() self.tensor_input_details = self.interpreter.get_input_details() self.tensor_output_details = self.interpreter.get_output_details() - self.labelmap = load_labels(self.model_config.labelmap_path, prefill=0) + self.labelmap = load_labels( + os.path.join(self.model_dir, "labelmap.txt"), + prefill=0, + ) def process_frame(self, frame_data: dict[str, Any], frame: np.ndarray): camera = frame_data.get("camera") @@ -105,15 +110,15 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi): ) rgb = cv2.cvtColor(frame, cv2.COLOR_YUV2RGB_I420) - input = rgb[ + frame = rgb[ y:y2, x:x2, ] - if input.shape != (224, 224): - input = cv2.resize(input, (224, 224)) + if frame.shape != (224, 224): + frame = cv2.resize(frame, (224, 224)) - input = np.expand_dims(input, axis=0) + input = np.expand_dims(frame, axis=0) self.interpreter.set_tensor(self.tensor_input_details[0]["index"], input) self.interpreter.invoke() res: np.ndarray = self.interpreter.get_tensor( @@ -123,9 +128,18 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi): best_id = np.argmax(probs) score = round(probs[best_id], 2) + write_classification_attempt( + self.train_dir, + cv2.cvtColor(frame, cv2.COLOR_RGB2BGR), + now, + self.labelmap[best_id], + score, + ) + if score >= camera_config.threshold: self.requestor.send_data( - f"{camera}/classification/{self.name}", self.labelmap[best_id] + f"{camera}/classification/{self.model_config.name}", + self.labelmap[best_id], ) def handle_request(self, topic, request_data): @@ -145,6 +159,8 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi): ): super().__init__(config, metrics) self.model_config = model_config + self.model_dir = os.path.join(MODEL_CACHE_DIR, self.model_config.name) + self.train_dir = os.path.join(self.model_dir, "train") self.interpreter: Interpreter = None self.sub_label_publisher = sub_label_publisher self.tensor_input_details: dict[str, Any] = None @@ -155,18 +171,22 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi): def __build_detector(self) -> None: self.interpreter = Interpreter( - model_path=self.model_config.model_path, + model_path=os.path.join(self.model_dir, "model.tflite"), num_threads=2, ) self.interpreter.allocate_tensors() self.tensor_input_details = self.interpreter.get_input_details() self.tensor_output_details = self.interpreter.get_output_details() - self.labelmap = load_labels(self.model_config.labelmap_path, prefill=0) + self.labelmap = load_labels( + os.path.join(self.model_dir, "labelmap.txt"), + prefill=0, + ) def process_frame(self, obj_data, frame): if obj_data["label"] not in self.model_config.object_config.objects: return + now = datetime.datetime.now().timestamp() x, y, x2, y2 = calculate_region( frame.shape, obj_data["box"][0], @@ -194,11 +214,17 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi): )[0] probs = res / res.sum(axis=0) best_id = np.argmax(probs) - score = round(probs[best_id], 2) - previous_score = self.detected_objects.get(obj_data["id"], 0.0) + write_classification_attempt( + self.train_dir, + cv2.cvtColor(frame, cv2.COLOR_RGB2BGR), + now, + self.labelmap[best_id], + score, + ) + if score <= previous_score: logger.debug(f"Score {score} is worse than previous score {previous_score}") return @@ -215,3 +241,29 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi): def expire_object(self, object_id, camera): if object_id in self.detected_objects: self.detected_objects.pop(object_id) + + +@staticmethod +def write_classification_attempt( + folder: str, + frame: np.ndarray, + timestamp: float, + label: str, + score: float, +) -> None: + if "-" in label: + label = label.replace("-", "_") + + file = os.path.join(folder, f"{timestamp}-{label}-{score}.webp") + os.makedirs(folder, exist_ok=True) + cv2.imwrite(file, frame) + + files = sorted( + filter(lambda f: (f.endswith(".webp")), os.listdir(folder)), + key=lambda f: os.path.getctime(os.path.join(folder, f)), + reverse=True, + ) + + # delete oldest face image if maximum is reached + if len(files) > 100: + os.unlink(os.path.join(folder, files[-1])) diff --git a/frigate/embeddings/maintainer.py b/frigate/embeddings/maintainer.py index 25601f014..9a2378221 100644 --- a/frigate/embeddings/maintainer.py +++ b/frigate/embeddings/maintainer.py @@ -150,10 +150,10 @@ class EmbeddingMaintainer(threading.Thread): ) ) - for name, model_config in self.config.classification.custom.items(): + for model_config in self.config.classification.custom.values(): self.realtime_processors.append( CustomStateClassificationProcessor( - self.config, model_config, name, self.requestor, self.metrics + self.config, model_config, self.requestor, self.metrics ) if model_config.state_config != None else CustomObjectClassificationProcessor( diff --git a/frigate/util/classification.py b/frigate/util/classification.py new file mode 100644 index 000000000..4ee5e1d54 --- /dev/null +++ b/frigate/util/classification.py @@ -0,0 +1,108 @@ +"""Util for classification models.""" + +import os + +import cv2 +import numpy as np +import tensorflow as tf +from tensorflow.keras import layers, models, optimizers +from tensorflow.keras.applications import MobileNetV2 +from tensorflow.keras.preprocessing.image import ImageDataGenerator + +BATCH_SIZE = 16 +EPOCHS = 50 +LEARNING_RATE = 0.001 + + +@staticmethod +def generate_representative_dataset_factory(dataset_dir: str): + def generate_representative_dataset(): + image_paths = [] + for root, dirs, files in os.walk(dataset_dir): + for file in files: + if file.lower().endswith((".jpg", ".jpeg", ".png")): + image_paths.append(os.path.join(root, file)) + + for path in image_paths[:300]: + img = cv2.imread(path) + img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) + img = cv2.resize(img, (224, 224)) + img_array = np.array(img, dtype=np.float32) / 255.0 + img_array = img_array[None, ...] + yield [img_array] + + return generate_representative_dataset + + +@staticmethod +def train_classification_model(model_dir: str) -> bool: + """Train a classification model.""" + dataset_dir = os.path.join(model_dir, "dataset") + num_classes = len( + [ + d + for d in os.listdir(dataset_dir) + if os.path.isdir(os.path.join(dataset_dir, d)) + ] + ) + + # Start with imagenet base model with 35% of channels in each layer + base_model = MobileNetV2( + input_shape=(224, 224, 3), + include_top=False, + weights="imagenet", + alpha=0.35, + ) + base_model.trainable = False # Freeze pre-trained layers + + model = models.Sequential( + [ + base_model, + layers.GlobalAveragePooling2D(), + layers.Dense(128, activation="relu"), + layers.Dropout(0.3), + layers.Dense(num_classes, activation="softmax"), + ] + ) + + model.compile( + optimizer=optimizers.Adam(learning_rate=LEARNING_RATE), + loss="categorical_crossentropy", + metrics=["accuracy"], + ) + + # create training set + datagen = ImageDataGenerator(rescale=1.0 / 255, validation_split=0.2) + train_gen = datagen.flow_from_directory( + dataset_dir, + target_size=(224, 224), + batch_size=BATCH_SIZE, + class_mode="categorical", + subset="training", + ) + + # write labelmap + class_indices = train_gen.class_indices + index_to_class = {v: k for k, v in class_indices.items()} + sorted_classes = [index_to_class[i] for i in range(len(index_to_class))] + with open(os.path.join(model_dir, "labelmap.txt"), "w") as f: + for class_name in sorted_classes: + f.write(f"{class_name}\n") + + # train the model + model.fit(train_gen, epochs=EPOCHS, verbose=0) + + # convert model to tflite + converter = tf.lite.TFLiteConverter.from_keras_model(model) + converter.optimizations = [tf.lite.Optimize.DEFAULT] + converter.representative_dataset = generate_representative_dataset_factory( + dataset_dir + ) + converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8] + converter.inference_input_type = tf.uint8 + converter.inference_output_type = tf.uint8 + tflite_model = converter.convert() + + # write model + with open(os.path.join(model_dir, "model.tflite"), "wb") as f: + f.write(tflite_model) From 0b9997015a94eb67b51b9e8f890f692490796d9d Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Fri, 30 May 2025 15:30:21 -0600 Subject: [PATCH 043/144] Intel updates (#18493) * Update openvino and onnxruntime * Install icd and level-zero-gpu deps from intel directly * Install * Add dep * Fix package install --- docker/main/install_deps.sh | 26 ++++++++++++++++++++++++-- docker/main/requirements-wheels.txt | 6 +++--- docker/tensorrt/requirements-amd64.txt | 2 +- 3 files changed, 28 insertions(+), 6 deletions(-) diff --git a/docker/main/install_deps.sh b/docker/main/install_deps.sh index 9684199f8..aed11dff4 100755 --- a/docker/main/install_deps.sh +++ b/docker/main/install_deps.sh @@ -71,11 +71,33 @@ if [[ "${TARGETARCH}" == "amd64" ]]; then echo "deb [arch=amd64 signed-by=/usr/share/keyrings/intel-graphics.gpg] https://repositories.intel.com/gpu/ubuntu jammy client" | tee /etc/apt/sources.list.d/intel-gpu-jammy.list apt-get -qq update apt-get -qq install --no-install-recommends --no-install-suggests -y \ - intel-opencl-icd=24.35.30872.31-996~22.04 intel-level-zero-gpu=1.3.29735.27-914~22.04 intel-media-va-driver-non-free=24.3.3-996~22.04 \ - libmfx1=23.2.2-880~22.04 libmfxgen1=24.2.4-914~22.04 libvpl2=1:2.13.0.0-996~22.04 + intel-media-va-driver-non-free libmfx1 libmfxgen1 libvpl2 + + apt-get -qq install -y ocl-icd-libopencl1 rm -f /usr/share/keyrings/intel-graphics.gpg rm -f /etc/apt/sources.list.d/intel-gpu-jammy.list + + # install legacy and standard intel icd and level-zero-gpu + # see https://github.com/intel/compute-runtime/blob/master/LEGACY_PLATFORMS.md for more info + # needed core package + wget https://github.com/intel/compute-runtime/releases/download/24.52.32224.5/libigdgmm12_22.5.5_amd64.deb + dpkg -i libigdgmm12_22.5.5_amd64.deb + rm libigdgmm12_22.5.5_amd64.deb + + # legacy packages + wget https://github.com/intel/compute-runtime/releases/download/24.35.30872.22/intel-opencl-icd-legacy1_24.35.30872.22_amd64.deb + wget https://github.com/intel/compute-runtime/releases/download/24.35.30872.22/intel-level-zero-gpu-legacy1_1.3.30872.22_amd64.deb + wget https://github.com/intel/intel-graphics-compiler/releases/download/igc-1.0.17537.20/intel-igc-opencl_1.0.17537.20_amd64.deb + wget https://github.com/intel/intel-graphics-compiler/releases/download/igc-1.0.17537.20/intel-igc-core_1.0.17537.20_amd64.deb + # standard packages + wget https://github.com/intel/compute-runtime/releases/download/24.52.32224.5/intel-opencl-icd_24.52.32224.5_amd64.deb + wget https://github.com/intel/compute-runtime/releases/download/24.52.32224.5/intel-level-zero-gpu_1.6.32224.5_amd64.deb + wget https://github.com/intel/intel-graphics-compiler/releases/download/v2.5.6/intel-igc-opencl-2_2.5.6+18417_amd64.deb + wget https://github.com/intel/intel-graphics-compiler/releases/download/v2.5.6/intel-igc-core-2_2.5.6+18417_amd64.deb + + dpkg -i *.deb + rm *.deb fi if [[ "${TARGETARCH}" == "arm64" ]]; then diff --git a/docker/main/requirements-wheels.txt b/docker/main/requirements-wheels.txt index 624983eb4..eabb75bef 100644 --- a/docker/main/requirements-wheels.txt +++ b/docker/main/requirements-wheels.txt @@ -41,9 +41,9 @@ opencv-python-headless == 4.11.0.* opencv-contrib-python == 4.11.0.* scipy == 1.14.* # OpenVino & ONNX -openvino == 2024.4.* -onnxruntime-openvino == 1.20.* ; platform_machine == 'x86_64' -onnxruntime == 1.20.* ; platform_machine == 'aarch64' +openvino == 2025.1.* +onnxruntime-openvino == 1.22.* ; platform_machine == 'x86_64' +onnxruntime == 1.22.* ; platform_machine == 'aarch64' # Embeddings transformers == 4.45.* # Generative AI diff --git a/docker/tensorrt/requirements-amd64.txt b/docker/tensorrt/requirements-amd64.txt index be4aaa066..63c68b583 100644 --- a/docker/tensorrt/requirements-amd64.txt +++ b/docker/tensorrt/requirements-amd64.txt @@ -14,5 +14,5 @@ nvidia_cusparse_cu12==12.5.1.*; platform_machine == 'x86_64' nvidia_nccl_cu12==2.23.4; platform_machine == 'x86_64' nvidia_nvjitlink_cu12==12.5.82; platform_machine == 'x86_64' onnx==1.16.*; platform_machine == 'x86_64' -onnxruntime-gpu==1.20.*; platform_machine == 'x86_64' +onnxruntime-gpu==1.22.*; platform_machine == 'x86_64' protobuf==3.20.3; platform_machine == 'x86_64' From 3f8ec723366c009b15ff729c31f606d3aba56e8b Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Fri, 30 May 2025 17:01:39 -0600 Subject: [PATCH 044/144] Tiered recordings (#18492) * Implement tiered recording * Add migration for record config * Update docs * Update reference docs * Fix preview query * Fix incorrect accesses * Fix * Fix * Fix * Fix --- docs/docs/configuration/record.md | 52 +++++------------------- docs/docs/configuration/reference.md | 16 ++++---- frigate/config/camera/record.py | 28 ++++++++----- frigate/config/config.py | 30 +------------- frigate/record/cleanup.py | 42 +++++++++++++------ frigate/record/maintainer.py | 25 +++++++----- frigate/util/config.py | 60 +++++++++++++++++++++++++++- 7 files changed, 143 insertions(+), 110 deletions(-) diff --git a/docs/docs/configuration/record.md b/docs/docs/configuration/record.md index 52c0f0c88..2745ef27d 100644 --- a/docs/docs/configuration/record.md +++ b/docs/docs/configuration/record.md @@ -13,14 +13,15 @@ H265 recordings can be viewed in Chrome 108+, Edge and Safari only. All other br ### Most conservative: Ensure all video is saved -For users deploying Frigate in environments where it is important to have contiguous video stored even if there was no detectable motion, the following config will store all video for 3 days. After 3 days, only video containing motion and overlapping with alerts or detections will be retained until 30 days have passed. +For users deploying Frigate in environments where it is important to have contiguous video stored even if there was no detectable motion, the following config will store all video for 3 days. After 3 days, only video containing motion will be saved for 7 days. After 7 days, only video containing motion and overlapping with alerts or detections will be retained until 30 days have passed. ```yaml record: enabled: True - retain: + continuous: days: 3 - mode: all + motion: + days: 7 alerts: retain: days: 30 @@ -38,9 +39,8 @@ In order to reduce storage requirements, you can adjust your config to only reta ```yaml record: enabled: True - retain: + motion: days: 3 - mode: motion alerts: retain: days: 30 @@ -58,7 +58,7 @@ If you only want to retain video that occurs during a tracked object, this confi ```yaml record: enabled: True - retain: + continuous: days: 0 alerts: retain: @@ -80,15 +80,17 @@ Retention configs support decimals meaning they can be configured to retain `0.5 ::: -### Continuous Recording +### Continuous and Motion Recording -The number of days to retain continuous recordings can be set via the following config where X is a number, by default continuous recording is disabled. +The number of days to retain continuous and motion recordings can be set via the following config where X is a number, by default continuous recording is disabled. ```yaml record: enabled: True - retain: + continuous: days: 1 # <- number of days to keep continuous recordings + motion: + days: 2 # <- number of days to keep motion recordings ``` Continuous recording supports different retention modes [which are described below](#what-do-the-different-retain-modes-mean) @@ -112,38 +114,6 @@ This configuration will retain recording segments that overlap with alerts and d **WARNING**: Recordings still must be enabled in the config. If a camera has recordings disabled in the config, enabling via the methods listed above will have no effect. -## What do the different retain modes mean? - -Frigate saves from the stream with the `record` role in 10 second segments. These options determine which recording segments are kept for continuous recording (but can also affect tracked objects). - -Let's say you have Frigate configured so that your doorbell camera would retain the last **2** days of continuous recording. - -- With the `all` option all 48 hours of those two days would be kept and viewable. -- With the `motion` option the only parts of those 48 hours would be segments that Frigate detected motion. This is the middle ground option that won't keep all 48 hours, but will likely keep all segments of interest along with the potential for some extra segments. -- With the `active_objects` option the only segments that would be kept are those where there was a true positive object that was not considered stationary. - -The same options are available with alerts and detections, except it will only save the recordings when it overlaps with a review item of that type. - -A configuration example of the above retain modes where all `motion` segments are stored for 7 days and `active objects` are stored for 14 days would be as follows: - -```yaml -record: - enabled: True - retain: - days: 7 - mode: motion - alerts: - retain: - days: 14 - mode: active_objects - detections: - retain: - days: 14 - mode: active_objects -``` - -The above configuration example can be added globally or on a per camera basis. - ## Can I have "continuous" recordings, but only at certain times? Using Frigate UI, Home Assistant, or MQTT, cameras can be automated to only record in certain situations or at certain times. diff --git a/docs/docs/configuration/reference.md b/docs/docs/configuration/reference.md index ab6374452..4be10000d 100644 --- a/docs/docs/configuration/reference.md +++ b/docs/docs/configuration/reference.md @@ -440,18 +440,18 @@ record: expire_interval: 60 # Optional: Two-way sync recordings database with disk on startup and once a day (default: shown below). sync_recordings: False - # Optional: Retention settings for recording - retain: + # Optional: Continuous retention settings + continuous: + # Optional: Number of days to retain recordings regardless of tracked objects or motion (default: shown below) + # NOTE: This should be set to 0 and retention should be defined in alerts and detections section below + # if you only want to retain recordings of alerts and detections. + days: 0 + # Optional: Motion retention settings + motion: # Optional: Number of days to retain recordings regardless of tracked objects (default: shown below) # NOTE: This should be set to 0 and retention should be defined in alerts and detections section below # if you only want to retain recordings of alerts and detections. days: 0 - # Optional: Mode for retention. Available options are: all, motion, and active_objects - # all - save all recording segments regardless of activity - # motion - save all recordings segments with any detected motion - # active_objects - save all recording segments with active/moving objects - # NOTE: this mode only applies when the days setting above is greater than 0 - mode: all # Optional: Recording Export Settings export: # Optional: Timelapse Output Args (default: shown below). diff --git a/frigate/config/camera/record.py b/frigate/config/camera/record.py index 52d11e2a5..09a7a84d5 100644 --- a/frigate/config/camera/record.py +++ b/frigate/config/camera/record.py @@ -22,27 +22,31 @@ __all__ = [ DEFAULT_TIME_LAPSE_FFMPEG_ARGS = "-vf setpts=0.04*PTS -r 30" +class RecordRetainConfig(FrigateBaseModel): + days: float = Field(default=0, ge=0, title="Default retention period.") + + class RetainModeEnum(str, Enum): all = "all" motion = "motion" active_objects = "active_objects" -class RecordRetainConfig(FrigateBaseModel): - days: float = Field(default=0, title="Default retention period.") - mode: RetainModeEnum = Field(default=RetainModeEnum.all, title="Retain mode.") - - class ReviewRetainConfig(FrigateBaseModel): - days: float = Field(default=10, title="Default retention period.") + days: float = Field(default=10, ge=0, title="Default retention period.") mode: RetainModeEnum = Field(default=RetainModeEnum.motion, title="Retain mode.") class EventsConfig(FrigateBaseModel): pre_capture: int = Field( - default=5, title="Seconds to retain before event starts.", le=MAX_PRE_CAPTURE + default=5, + title="Seconds to retain before event starts.", + le=MAX_PRE_CAPTURE, + ge=0, + ) + post_capture: int = Field( + default=5, ge=0, title="Seconds to retain after event ends." ) - post_capture: int = Field(default=5, title="Seconds to retain after event ends.") retain: ReviewRetainConfig = Field( default_factory=ReviewRetainConfig, title="Event retention settings." ) @@ -77,8 +81,12 @@ class RecordConfig(FrigateBaseModel): default=60, title="Number of minutes to wait between cleanup runs.", ) - retain: RecordRetainConfig = Field( - default_factory=RecordRetainConfig, title="Record retention settings." + continuous: RecordRetainConfig = Field( + default_factory=RecordRetainConfig, + title="Continuous recording retention settings.", + ) + motion: RecordRetainConfig = Field( + default_factory=RecordRetainConfig, title="Motion recording retention settings." ) detections: EventsConfig = Field( default_factory=EventsConfig, title="Detection specific retention settings." diff --git a/frigate/config/config.py b/frigate/config/config.py index d912a574d..49e57f3cf 100644 --- a/frigate/config/config.py +++ b/frigate/config/config.py @@ -48,7 +48,7 @@ from .camera.genai import GenAIConfig from .camera.motion import MotionConfig from .camera.notification import NotificationConfig from .camera.objects import FilterConfig, ObjectConfig -from .camera.record import RecordConfig, RetainModeEnum +from .camera.record import RecordConfig from .camera.review import ReviewConfig from .camera.snapshots import SnapshotsConfig from .camera.timestamp import TimestampStyleConfig @@ -204,33 +204,6 @@ def verify_valid_live_stream_names( ) -def verify_recording_retention(camera_config: CameraConfig) -> None: - """Verify that recording retention modes are ranked correctly.""" - rank_map = { - RetainModeEnum.all: 0, - RetainModeEnum.motion: 1, - RetainModeEnum.active_objects: 2, - } - - if ( - camera_config.record.retain.days != 0 - and rank_map[camera_config.record.retain.mode] - > rank_map[camera_config.record.alerts.retain.mode] - ): - logger.warning( - f"{camera_config.name}: Recording retention is configured for {camera_config.record.retain.mode} and alert retention is configured for {camera_config.record.alerts.retain.mode}. The more restrictive retention policy will be applied." - ) - - if ( - camera_config.record.retain.days != 0 - and rank_map[camera_config.record.retain.mode] - > rank_map[camera_config.record.detections.retain.mode] - ): - logger.warning( - f"{camera_config.name}: Recording retention is configured for {camera_config.record.retain.mode} and detection retention is configured for {camera_config.record.detections.retain.mode}. The more restrictive retention policy will be applied." - ) - - def verify_recording_segments_setup_with_reasonable_time( camera_config: CameraConfig, ) -> None: @@ -697,7 +670,6 @@ class FrigateConfig(FrigateBaseModel): verify_config_roles(camera_config) verify_valid_live_stream_names(self, camera_config) - verify_recording_retention(camera_config) verify_recording_segments_setup_with_reasonable_time(camera_config) verify_zone_objects_are_tracked(camera_config) verify_required_zones_exist(camera_config) diff --git a/frigate/record/cleanup.py b/frigate/record/cleanup.py index 1de08a899..9d1e28306 100644 --- a/frigate/record/cleanup.py +++ b/frigate/record/cleanup.py @@ -100,7 +100,11 @@ class RecordingCleanup(threading.Thread): ).execute() def expire_existing_camera_recordings( - self, expire_date: float, config: CameraConfig, reviews: ReviewSegment + self, + continuous_expire_date: float, + motion_expire_date: float, + config: CameraConfig, + reviews: ReviewSegment, ) -> None: """Delete recordings for existing camera based on retention config.""" # Get the timestamp for cutoff of retained days @@ -116,8 +120,14 @@ class RecordingCleanup(threading.Thread): Recordings.motion, ) .where( - Recordings.camera == config.name, - Recordings.end_time < expire_date, + (Recordings.camera == config.name) + & ( + ( + (Recordings.end_time < continuous_expire_date) + & (Recordings.motion == 0) + ) + | (Recordings.end_time < motion_expire_date) + ) ) .order_by(Recordings.start_time) .namedtuples() @@ -188,7 +198,7 @@ class RecordingCleanup(threading.Thread): Recordings.id << deleted_recordings_list[i : i + max_deletes] ).execute() - previews: Previews = ( + previews: list[Previews] = ( Previews.select( Previews.id, Previews.start_time, @@ -196,8 +206,9 @@ class RecordingCleanup(threading.Thread): Previews.path, ) .where( - Previews.camera == config.name, - Previews.end_time < expire_date, + (Previews.camera == config.name) + & (Previews.end_time < continuous_expire_date) + & (Previews.end_time < motion_expire_date) ) .order_by(Previews.start_time) .namedtuples() @@ -253,7 +264,9 @@ class RecordingCleanup(threading.Thread): logger.debug("Start deleted cameras.") # Handle deleted cameras - expire_days = self.config.record.retain.days + expire_days = max( + self.config.record.continuous.days, self.config.record.motion.days + ) expire_before = ( datetime.datetime.now() - datetime.timedelta(days=expire_days) ).timestamp() @@ -291,9 +304,12 @@ class RecordingCleanup(threading.Thread): now = datetime.datetime.now() self.expire_review_segments(config, now) - - expire_days = config.record.retain.days - expire_date = (now - datetime.timedelta(days=expire_days)).timestamp() + continuous_expire_date = ( + now - datetime.timedelta(days=config.record.continuous.days) + ).timestamp() + motion_expire_date = ( + now - datetime.timedelta(days=config.record.motion.days) + ).timestamp() # Get all the reviews to check against reviews: ReviewSegment = ( @@ -306,13 +322,15 @@ class RecordingCleanup(threading.Thread): ReviewSegment.camera == camera, # need to ensure segments for all reviews starting # before the expire date are included - ReviewSegment.start_time < expire_date, + ReviewSegment.start_time < motion_expire_date, ) .order_by(ReviewSegment.start_time) .namedtuples() ) - self.expire_existing_camera_recordings(expire_date, config, reviews) + self.expire_existing_camera_recordings( + continuous_expire_date, motion_expire_date, config, reviews + ) logger.debug(f"End camera: {camera}.") logger.debug("End all cameras.") diff --git a/frigate/record/maintainer.py b/frigate/record/maintainer.py index 7f13451d6..ace9a5d24 100644 --- a/frigate/record/maintainer.py +++ b/frigate/record/maintainer.py @@ -285,12 +285,16 @@ class RecordingMaintainer(threading.Thread): Path(cache_path).unlink(missing_ok=True) return - # if cached file's start_time is earlier than the retain days for the camera - # meaning continuous recording is not enabled - if start_time <= ( - datetime.datetime.now().astimezone(datetime.timezone.utc) - - datetime.timedelta(days=self.config.cameras[camera].record.retain.days) - ): + record_config = self.config.cameras[camera].record + highest = None + + if record_config.continuous.days > 0: + highest = "continuous" + elif record_config.motion.days > 0: + highest = "motion" + + # continuous / motion recording is not enabled + if highest is None: # if the cached segment overlaps with the review items: overlaps = False for review in reviews: @@ -344,8 +348,7 @@ class RecordingMaintainer(threading.Thread): ).astimezone(datetime.timezone.utc) if end_time < retain_cutoff: self.drop_segment(cache_path) - # else retain days includes this segment - # meaning continuous recording is enabled + # continuous / motion is enabled else: # assume that empty means the relevant recording info has not been received yet camera_info = self.object_recordings_info[camera] @@ -360,7 +363,11 @@ class RecordingMaintainer(threading.Thread): ).astimezone(datetime.timezone.utc) >= end_time ): - record_mode = self.config.cameras[camera].record.retain.mode + record_mode = ( + RetainModeEnum.all + if highest == "continuous" + else RetainModeEnum.motion + ) return await self.move_segment( camera, start_time, end_time, duration, cache_path, record_mode ) diff --git a/frigate/util/config.py b/frigate/util/config.py index 70492adbc..98267b9ea 100644 --- a/frigate/util/config.py +++ b/frigate/util/config.py @@ -13,7 +13,7 @@ from frigate.util.services import get_video_properties logger = logging.getLogger(__name__) -CURRENT_CONFIG_VERSION = "0.16-0" +CURRENT_CONFIG_VERSION = "0.17-0" DEFAULT_CONFIG_FILE = os.path.join(CONFIG_DIR, "config.yml") @@ -91,6 +91,13 @@ def migrate_frigate_config(config_file: str): yaml.dump(new_config, f) previous_version = "0.16-0" + if previous_version < "0.17-0": + logger.info(f"Migrating frigate config from {previous_version} to 0.17-0...") + new_config = migrate_017_0(config) + with open(config_file, "w") as f: + yaml.dump(new_config, f) + previous_version = "0.17-0" + logger.info("Finished frigate config migration...") @@ -340,6 +347,57 @@ def migrate_016_0(config: dict[str, dict[str, Any]]) -> dict[str, dict[str, Any] return new_config +def migrate_017_0(config: dict[str, dict[str, Any]]) -> dict[str, dict[str, Any]]: + """Handle migrating frigate config to 0.16-0""" + new_config = config.copy() + + # migrate global to new recording configuration + global_record_retain = config.get("record", {}).get("retain") + + if global_record_retain: + continuous = {"days": 0} + motion = {"days": 0} + days = global_record_retain.get("days") + mode = global_record_retain.get("mode", "all") + + if days: + if mode == "all": + continuous["days"] = days + else: + motion["days"] = days + + new_config["record"]["continuous"] = continuous + new_config["record"]["motion"] = motion + + del new_config["record"]["retain"] + + for name, camera in config.get("cameras", {}).items(): + camera_config: dict[str, dict[str, Any]] = camera.copy() + camera_record_retain = camera_config.get("record", {}).get("retain") + + if camera_record_retain: + continuous = {"days": 0} + motion = {"days": 0} + days = camera_record_retain.get("days") + mode = camera_record_retain.get("mode", "all") + + if days: + if mode == "all": + continuous["days"] = days + else: + motion["days"] = days + + camera_config["record"]["continuous"] = continuous + camera_config["record"]["motion"] = motion + + del camera_config["record"]["retain"] + + new_config["cameras"][name] = camera_config + + new_config["version"] = "0.17-0" + return new_config + + def get_relative_coordinates( mask: Optional[Union[str, list]], frame_shape: tuple[int, int] ) -> Union[str, list]: From b77e6f5ebcae74cb16dc99160b5e72d60e4955ae Mon Sep 17 00:00:00 2001 From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> Date: Sun, 1 Jun 2025 15:21:12 -0500 Subject: [PATCH 045/144] Upgrade PaddleOCR models to v4 (rec) and v5 (det) (#18505) The PP_OCRv5 text detection models have greatly improved over v3. The v5 recognition model makes improvements to challenging handwriting and uncommon characters, which are not necessary for LPR, so using v4 seemed like a better choice to continue to keep inference time as low as possible. Also included is the full dictionary for Chinese character support. --- .../common/license_plate/mixin.py | 226 +++++++++--------- frigate/embeddings/onnx/lpr_embedding.py | 11 +- 2 files changed, 126 insertions(+), 111 deletions(-) diff --git a/frigate/data_processing/common/license_plate/mixin.py b/frigate/data_processing/common/license_plate/mixin.py index 2c68ce374..2d63c1c69 100644 --- a/frigate/data_processing/common/license_plate/mixin.py +++ b/frigate/data_processing/common/license_plate/mixin.py @@ -22,7 +22,7 @@ from frigate.comms.event_metadata_updater import ( EventMetadataPublisher, EventMetadataTypeEnum, ) -from frigate.const import CLIPS_DIR +from frigate.const import CLIPS_DIR, MODEL_CACHE_DIR from frigate.embeddings.onnx.lpr_embedding import LPR_EMBEDDING_SIZE from frigate.types import TrackedObjectUpdateTypesEnum from frigate.util.builtin import EventsPerSecond, InferenceSpeed @@ -43,7 +43,11 @@ class LicensePlateProcessingMixin: self.plates_det_second = EventsPerSecond() self.plates_det_second.start() self.event_metadata_publisher = EventMetadataPublisher() - self.ctc_decoder = CTCDecoder() + self.ctc_decoder = CTCDecoder( + character_dict_path=os.path.join( + MODEL_CACHE_DIR, "paddleocr-onnx", "ppocr_keys_v1.txt" + ) + ) self.batch_size = 6 # Detection specific parameters @@ -1595,113 +1599,121 @@ class CTCDecoder: for each decoded character sequence. """ - def __init__(self): + def __init__(self, character_dict_path=None): """ - Initialize the CTCDecoder with a list of characters and a character map. + Initializes the CTCDecoder. + :param character_dict_path: Path to the character dictionary file. + If None, a default (English-focused) list is used. + For Chinese models, this should point to the correct + character dictionary file provided with the model. + """ + self.characters = [] + if character_dict_path and os.path.exists(character_dict_path): + with open(character_dict_path, "r", encoding="utf-8") as f: + self.characters = ["blank"] + [ + line.strip() for line in f if line.strip() + ] + else: + self.characters = [ + "blank", + "0", + "1", + "2", + "3", + "4", + "5", + "6", + "7", + "8", + "9", + ":", + ";", + "<", + "=", + ">", + "?", + "@", + "A", + "B", + "C", + "D", + "E", + "F", + "G", + "H", + "I", + "J", + "K", + "L", + "M", + "N", + "O", + "P", + "Q", + "R", + "S", + "T", + "U", + "V", + "W", + "X", + "Y", + "Z", + "[", + "\\", + "]", + "^", + "_", + "`", + "a", + "b", + "c", + "d", + "e", + "f", + "g", + "h", + "i", + "j", + "k", + "l", + "m", + "n", + "o", + "p", + "q", + "r", + "s", + "t", + "u", + "v", + "w", + "x", + "y", + "z", + "{", + "|", + "}", + "~", + "!", + '"', + "#", + "$", + "%", + "&", + "'", + "(", + ")", + "*", + "+", + ",", + "-", + ".", + "/", + " ", + " ", + ] - The character set includes digits, letters, special characters, and a "blank" token - (used by the CTC model for decoding purposes). A character map is created to map - indices to characters. - """ - self.characters = [ - "blank", - "0", - "1", - "2", - "3", - "4", - "5", - "6", - "7", - "8", - "9", - ":", - ";", - "<", - "=", - ">", - "?", - "@", - "A", - "B", - "C", - "D", - "E", - "F", - "G", - "H", - "I", - "J", - "K", - "L", - "M", - "N", - "O", - "P", - "Q", - "R", - "S", - "T", - "U", - "V", - "W", - "X", - "Y", - "Z", - "[", - "\\", - "]", - "^", - "_", - "`", - "a", - "b", - "c", - "d", - "e", - "f", - "g", - "h", - "i", - "j", - "k", - "l", - "m", - "n", - "o", - "p", - "q", - "r", - "s", - "t", - "u", - "v", - "w", - "x", - "y", - "z", - "{", - "|", - "}", - "~", - "!", - '"', - "#", - "$", - "%", - "&", - "'", - "(", - ")", - "*", - "+", - ",", - "-", - ".", - "/", - " ", - " ", - ] self.char_map = {i: char for i, char in enumerate(self.characters)} def __call__( diff --git a/frigate/embeddings/onnx/lpr_embedding.py b/frigate/embeddings/onnx/lpr_embedding.py index ac981da8d..1b5b9acd0 100644 --- a/frigate/embeddings/onnx/lpr_embedding.py +++ b/frigate/embeddings/onnx/lpr_embedding.py @@ -32,13 +32,15 @@ class PaddleOCRDetection(BaseEmbedding): device: str = "AUTO", ): model_file = ( - "detection-large.onnx" if model_size == "large" else "detection-small.onnx" + "detection_v5-large.onnx" + if model_size == "large" + else "detection_v5-small.onnx" ) super().__init__( model_name="paddleocr-onnx", model_file=model_file, download_urls={ - model_file: f"https://github.com/hawkeye217/paddleocr-onnx/raw/refs/heads/master/models/{model_file}" + model_file: f"https://github.com/hawkeye217/paddleocr-onnx/raw/refs/heads/master/models/v5/{model_file}" }, ) self.requestor = requestor @@ -156,9 +158,10 @@ class PaddleOCRRecognition(BaseEmbedding): ): super().__init__( model_name="paddleocr-onnx", - model_file="recognition.onnx", + model_file="recognition_v4.onnx", download_urls={ - "recognition.onnx": "https://github.com/hawkeye217/paddleocr-onnx/raw/refs/heads/master/models/recognition.onnx" + "recognition_v4.onnx": "https://github.com/hawkeye217/paddleocr-onnx/raw/refs/heads/master/models/v4/recognition_v4.onnx", + "ppocr_keys_v1.txt": "https://github.com/hawkeye217/paddleocr-onnx/raw/refs/heads/master/models/v4/ppocr_keys_v1.txt", }, ) self.requestor = requestor From ac7fb29b326fea16dcdc031f41c9d460a9f38757 Mon Sep 17 00:00:00 2001 From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> Date: Tue, 3 Jun 2025 06:53:48 -0500 Subject: [PATCH 046/144] Audio transcription tweaks (#18540) * use model runner * unload whisper model when live transcription is complete --- .../common/audio_transcription/model.py | 81 +++++++ .../real_time/audio_transcription.py | 205 +++++++++--------- .../real_time/whisper_online.py | 5 +- frigate/data_processing/types.py | 7 + frigate/events/audio.py | 42 ++-- 5 files changed, 220 insertions(+), 120 deletions(-) create mode 100644 frigate/data_processing/common/audio_transcription/model.py diff --git a/frigate/data_processing/common/audio_transcription/model.py b/frigate/data_processing/common/audio_transcription/model.py new file mode 100644 index 000000000..0fe5ddb5c --- /dev/null +++ b/frigate/data_processing/common/audio_transcription/model.py @@ -0,0 +1,81 @@ +"""Set up audio transcription models based on model size.""" + +import logging +import os + +import sherpa_onnx +from faster_whisper.utils import download_model + +from frigate.comms.inter_process import InterProcessRequestor +from frigate.const import MODEL_CACHE_DIR +from frigate.data_processing.types import AudioTranscriptionModel +from frigate.util.downloader import ModelDownloader + +logger = logging.getLogger(__name__) + + +class AudioTranscriptionModelRunner: + def __init__( + self, + device: str = "CPU", + model_size: str = "small", + ): + self.model: AudioTranscriptionModel = None + self.requestor = InterProcessRequestor() + + if model_size == "large": + # use the Whisper download function instead of our own + logger.debug("Downloading Whisper audio transcription model") + download_model( + size_or_id="small" if device == "cuda" else "tiny", + local_files_only=False, + cache_dir=os.path.join(MODEL_CACHE_DIR, "whisper"), + ) + logger.debug("Whisper audio transcription model downloaded") + + else: + # small model as default + download_path = os.path.join(MODEL_CACHE_DIR, "sherpa-onnx") + HF_ENDPOINT = os.environ.get("HF_ENDPOINT", "https://huggingface.co") + self.model_files = { + "encoder.onnx": f"{HF_ENDPOINT}/csukuangfj/sherpa-onnx-streaming-zipformer-en-2023-06-26/resolve/main/encoder-epoch-99-avg-1-chunk-16-left-128.onnx", + "decoder.onnx": f"{HF_ENDPOINT}/csukuangfj/sherpa-onnx-streaming-zipformer-en-2023-06-26/resolve/main/decoder-epoch-99-avg-1-chunk-16-left-128.onnx", + "joiner.onnx": f"{HF_ENDPOINT}/csukuangfj/sherpa-onnx-streaming-zipformer-en-2023-06-26/resolve/main/joiner-epoch-99-avg-1-chunk-16-left-128.onnx", + "tokens.txt": f"{HF_ENDPOINT}/csukuangfj/sherpa-onnx-streaming-zipformer-en-2023-06-26/resolve/main/tokens.txt", + } + + if not all( + os.path.exists(os.path.join(download_path, n)) + for n in self.model_files.keys() + ): + self.downloader = ModelDownloader( + model_name="sherpa-onnx", + download_path=download_path, + file_names=self.model_files.keys(), + download_func=self.__download_models, + ) + self.downloader.ensure_model_files() + self.downloader.wait_for_download() + + self.model = sherpa_onnx.OnlineRecognizer.from_transducer( + tokens=os.path.join(MODEL_CACHE_DIR, "sherpa-onnx/tokens.txt"), + encoder=os.path.join(MODEL_CACHE_DIR, "sherpa-onnx/encoder.onnx"), + decoder=os.path.join(MODEL_CACHE_DIR, "sherpa-onnx/decoder.onnx"), + joiner=os.path.join(MODEL_CACHE_DIR, "sherpa-onnx/joiner.onnx"), + num_threads=2, + sample_rate=16000, + feature_dim=80, + enable_endpoint_detection=True, + rule1_min_trailing_silence=2.4, + rule2_min_trailing_silence=1.2, + rule3_min_utterance_length=300, + decoding_method="greedy_search", + provider="cpu", + ) + + def __download_models(self, path: str) -> None: + try: + file_name = os.path.basename(path) + ModelDownloader.download_from_url(self.model_files[file_name], path) + except Exception as e: + logger.error(f"Failed to download {path}: {e}") diff --git a/frigate/data_processing/real_time/audio_transcription.py b/frigate/data_processing/real_time/audio_transcription.py index 7ed644498..2e6d599eb 100644 --- a/frigate/data_processing/real_time/audio_transcription.py +++ b/frigate/data_processing/real_time/audio_transcription.py @@ -7,16 +7,20 @@ import threading from typing import Optional import numpy as np -import sherpa_onnx from frigate.comms.inter_process import InterProcessRequestor from frigate.config import CameraConfig, FrigateConfig from frigate.const import MODEL_CACHE_DIR -from frigate.util.downloader import ModelDownloader +from frigate.data_processing.common.audio_transcription.model import ( + AudioTranscriptionModelRunner, +) +from frigate.data_processing.real_time.whisper_online import ( + FasterWhisperASR, + OnlineASRProcessor, +) from ..types import DataProcessorMetrics from .api import RealTimeProcessorApi -from .whisper_online import FasterWhisperASR, OnlineASRProcessor logger = logging.getLogger(__name__) @@ -27,6 +31,7 @@ class AudioTranscriptionRealTimeProcessor(RealTimeProcessorApi): config: FrigateConfig, camera_config: CameraConfig, requestor: InterProcessRequestor, + model_runner: AudioTranscriptionModelRunner, metrics: DataProcessorMetrics, stop_event: threading.Event, ): @@ -34,95 +39,55 @@ class AudioTranscriptionRealTimeProcessor(RealTimeProcessorApi): self.config = config self.camera_config = camera_config self.requestor = requestor - self.recognizer = None self.stream = None + self.whisper_model = None + self.model_runner = model_runner self.transcription_segments = [] self.audio_queue = queue.Queue() self.stop_event = stop_event - if self.config.audio_transcription.model_size == "large": - self.asr = FasterWhisperASR( - modelsize="tiny", - device="cuda" - if self.config.audio_transcription.device == "GPU" - else "cpu", - lan=config.audio_transcription.language, - model_dir=os.path.join(MODEL_CACHE_DIR, "whisper"), - ) - self.asr.use_vad() # Enable Silero VAD for low-RMS audio - - else: - # small model as default - download_path = os.path.join(MODEL_CACHE_DIR, "sherpa-onnx") - HF_ENDPOINT = os.environ.get("HF_ENDPOINT", "https://huggingface.co") - self.model_files = { - "encoder.onnx": f"{HF_ENDPOINT}/csukuangfj/sherpa-onnx-streaming-zipformer-en-2023-06-26/resolve/main/encoder-epoch-99-avg-1-chunk-16-left-128.onnx", - "decoder.onnx": f"{HF_ENDPOINT}/csukuangfj/sherpa-onnx-streaming-zipformer-en-2023-06-26/resolve/main/decoder-epoch-99-avg-1-chunk-16-left-128.onnx", - "joiner.onnx": f"{HF_ENDPOINT}/csukuangfj/sherpa-onnx-streaming-zipformer-en-2023-06-26/resolve/main/joiner-epoch-99-avg-1-chunk-16-left-128.onnx", - "tokens.txt": f"{HF_ENDPOINT}/csukuangfj/sherpa-onnx-streaming-zipformer-en-2023-06-26/resolve/main/tokens.txt", - } - - if not all( - os.path.exists(os.path.join(download_path, n)) - for n in self.model_files.keys() - ): - self.downloader = ModelDownloader( - model_name="sherpa-onnx", - download_path=download_path, - file_names=self.model_files.keys(), - download_func=self.__download_models, - complete_func=self.__build_recognizer, - ) - self.downloader.ensure_model_files() - - self.__build_recognizer() - - def __download_models(self, path: str) -> None: - try: - file_name = os.path.basename(path) - ModelDownloader.download_from_url(self.model_files[file_name], path) - except Exception as e: - logger.error(f"Failed to download {path}: {e}") - def __build_recognizer(self) -> None: try: if self.config.audio_transcription.model_size == "large": - self.online = OnlineASRProcessor( - asr=self.asr, + # Whisper models need to be per-process and can only run one stream at a time + # TODO: try parallel: https://github.com/SYSTRAN/faster-whisper/issues/100 + logger.debug(f"Loading Whisper model for {self.camera_config.name}") + self.whisper_model = FasterWhisperASR( + modelsize="tiny", + device="cuda" + if self.config.audio_transcription.device == "GPU" + else "cpu", + lan=self.config.audio_transcription.language, + model_dir=os.path.join(MODEL_CACHE_DIR, "whisper"), + ) + self.whisper_model.use_vad() + self.stream = OnlineASRProcessor( + asr=self.whisper_model, ) else: - self.recognizer = sherpa_onnx.OnlineRecognizer.from_transducer( - tokens=os.path.join(MODEL_CACHE_DIR, "sherpa-onnx/tokens.txt"), - encoder=os.path.join(MODEL_CACHE_DIR, "sherpa-onnx/encoder.onnx"), - decoder=os.path.join(MODEL_CACHE_DIR, "sherpa-onnx/decoder.onnx"), - joiner=os.path.join(MODEL_CACHE_DIR, "sherpa-onnx/joiner.onnx"), - num_threads=2, - sample_rate=16000, - feature_dim=80, - enable_endpoint_detection=True, - rule1_min_trailing_silence=2.4, - rule2_min_trailing_silence=1.2, - rule3_min_utterance_length=300, - decoding_method="greedy_search", - provider="cpu", - ) - self.stream = self.recognizer.create_stream() - logger.debug("Audio transcription (live) initialized") + logger.debug(f"Loading sherpa stream for {self.camera_config.name}") + self.stream = self.model_runner.model.create_stream() + logger.debug( + f"Audio transcription (live) initialized for {self.camera_config.name}" + ) except Exception as e: logger.error( f"Failed to initialize live streaming audio transcription: {e}" ) - self.recognizer = None def __process_audio_stream( self, audio_data: np.ndarray ) -> Optional[tuple[str, bool]]: - if (not self.recognizer or not self.stream) and not self.online: - logger.debug( - "Audio transcription (streaming) recognizer or stream not initialized" - ) + if ( + self.model_runner.model is None + and self.config.audio_transcription.model_size == "small" + ): + logger.debug("Audio transcription (live) model not initialized") return None + if not self.stream: + self.__build_recognizer() + try: if audio_data.dtype != np.float32: audio_data = audio_data.astype(np.float32) @@ -135,10 +100,14 @@ class AudioTranscriptionRealTimeProcessor(RealTimeProcessorApi): if self.config.audio_transcription.model_size == "large": # large model - self.online.insert_audio_chunk(audio_data) - output = self.online.process_iter() + self.stream.insert_audio_chunk(audio_data) + output = self.stream.process_iter() text = output[2].strip() - is_endpoint = text.endswith((".", "!", "?")) + is_endpoint = ( + text.endswith((".", "!", "?")) + and sum(len(str(lines)) for lines in self.transcription_segments) + > 300 + ) if text: self.transcription_segments.append(text) @@ -150,11 +119,11 @@ class AudioTranscriptionRealTimeProcessor(RealTimeProcessorApi): # small model self.stream.accept_waveform(16000, audio_data) - while self.recognizer.is_ready(self.stream): - self.recognizer.decode_stream(self.stream) + while self.model_runner.model.is_ready(self.stream): + self.model_runner.model.decode_stream(self.stream) - text = self.recognizer.get_result(self.stream).strip() - is_endpoint = self.recognizer.is_endpoint(self.stream) + text = self.model_runner.model.get_result(self.stream).strip() + is_endpoint = self.model_runner.model.is_endpoint(self.stream) logger.debug(f"Transcription result: '{text}'") @@ -166,7 +135,7 @@ class AudioTranscriptionRealTimeProcessor(RealTimeProcessorApi): if is_endpoint and self.config.audio_transcription.model_size == "small": # reset sherpa if we've reached an endpoint - self.recognizer.reset(self.stream) + self.model_runner.model.reset(self.stream) return text, is_endpoint except Exception as e: @@ -190,10 +159,17 @@ class AudioTranscriptionRealTimeProcessor(RealTimeProcessorApi): logger.debug( f"Starting audio transcription thread for {self.camera_config.name}" ) + + # start with an empty transcription + self.requestor.send_data( + f"{self.camera_config.name}/audio/transcription", + "", + ) + while not self.stop_event.is_set(): try: # Get audio data from queue with a timeout to check stop_event - obj_data, audio = self.audio_queue.get(timeout=0.1) + _, audio = self.audio_queue.get(timeout=0.1) result = self.__process_audio_stream(audio) if not result: @@ -209,7 +185,7 @@ class AudioTranscriptionRealTimeProcessor(RealTimeProcessorApi): self.audio_queue.task_done() if is_endpoint: - self.reset(obj_data["camera"]) + self.reset() except queue.Empty: continue @@ -221,23 +197,7 @@ class AudioTranscriptionRealTimeProcessor(RealTimeProcessorApi): f"Stopping audio transcription thread for {self.camera_config.name}" ) - def reset(self, camera: str) -> None: - if self.config.audio_transcription.model_size == "large": - # get final output from whisper - output = self.online.finish() - self.transcription_segments = [] - - self.requestor.send_data( - f"{self.camera_config.name}/audio/transcription", - (output[2].strip() + " "), - ) - - # reset whisper - self.online.init() - else: - # reset sherpa - self.recognizer.reset(self.stream) - + def clear_audio_queue(self) -> None: # Clear the audio queue while not self.audio_queue.empty(): try: @@ -246,8 +206,54 @@ class AudioTranscriptionRealTimeProcessor(RealTimeProcessorApi): except queue.Empty: break + def reset(self) -> None: + if self.config.audio_transcription.model_size == "large": + # get final output from whisper + output = self.stream.finish() + self.transcription_segments = [] + + self.requestor.send_data( + f"{self.camera_config.name}/audio/transcription", + (output[2].strip() + " "), + ) + + # reset whisper + self.stream.init() + self.transcription_segments = [] + else: + # reset sherpa + self.model_runner.model.reset(self.stream) + logger.debug("Stream reset") + def check_unload_model(self) -> None: + # regularly called in the loop in audio maintainer + if ( + self.config.audio_transcription.model_size == "large" + and self.whisper_model is not None + ): + logger.debug(f"Unloading Whisper model for {self.camera_config.name}") + self.clear_audio_queue() + self.transcription_segments = [] + self.stream = None + self.whisper_model = None + + self.requestor.send_data( + f"{self.camera_config.name}/audio/transcription", + "", + ) + if ( + self.config.audio_transcription.model_size == "small" + and self.stream is not None + ): + logger.debug(f"Clearing sherpa stream for {self.camera_config.name}") + self.stream = None + + self.requestor.send_data( + f"{self.camera_config.name}/audio/transcription", + "", + ) + def stop(self) -> None: """Stop the transcription thread and clean up.""" self.stop_event.set() @@ -266,7 +272,6 @@ class AudioTranscriptionRealTimeProcessor(RealTimeProcessorApi): self, topic: str, request_data: dict[str, any] ) -> dict[str, any] | None: if topic == "clear_audio_recognizer": - self.recognizer = None self.stream = None self.__build_recognizer() return {"message": "Audio recognizer cleared and rebuilt", "success": True} diff --git a/frigate/data_processing/real_time/whisper_online.py b/frigate/data_processing/real_time/whisper_online.py index 96c1ce0cf..9b81d7fbe 100644 --- a/frigate/data_processing/real_time/whisper_online.py +++ b/frigate/data_processing/real_time/whisper_online.py @@ -139,8 +139,11 @@ class FasterWhisperASR(ASRBase): return model def transcribe(self, audio, init_prompt=""): + from faster_whisper import BatchedInferencePipeline + # tested: beam_size=5 is faster and better than 1 (on one 200 second document from En ESIC, min chunk 0.01) - segments, info = self.model.transcribe( + batched_model = BatchedInferencePipeline(model=self.model) + segments, info = batched_model.transcribe( audio, language=self.original_language, initial_prompt=init_prompt, diff --git a/frigate/data_processing/types.py b/frigate/data_processing/types.py index a19a856bf..5d083b32e 100644 --- a/frigate/data_processing/types.py +++ b/frigate/data_processing/types.py @@ -4,6 +4,10 @@ import multiprocessing as mp from enum import Enum from multiprocessing.sharedctypes import Synchronized +import sherpa_onnx + +from frigate.data_processing.real_time.whisper_online import FasterWhisperASR + class DataProcessorMetrics: image_embeddings_speed: Synchronized @@ -41,3 +45,6 @@ class PostProcessDataEnum(str, Enum): recording = "recording" review = "review" tracked_object = "tracked_object" + + +AudioTranscriptionModel = FasterWhisperASR | sherpa_onnx.OnlineRecognizer | None diff --git a/frigate/events/audio.py b/frigate/events/audio.py index dc6ee7128..aeeaf3b4f 100644 --- a/frigate/events/audio.py +++ b/frigate/events/audio.py @@ -30,6 +30,9 @@ from frigate.const import ( AUDIO_MIN_CONFIDENCE, AUDIO_SAMPLE_RATE, ) +from frigate.data_processing.common.audio_transcription.model import ( + AudioTranscriptionModelRunner, +) from frigate.data_processing.real_time.audio_transcription import ( AudioTranscriptionRealTimeProcessor, ) @@ -87,6 +90,10 @@ class AudioProcessor(util.Process): self.camera_metrics = camera_metrics self.cameras = cameras self.config = config + self.transcription_model_runner = AudioTranscriptionModelRunner( + self.config.audio_transcription.device, + self.config.audio_transcription.model_size, + ) def run(self) -> None: audio_threads: list[AudioEventMaintainer] = [] @@ -101,6 +108,7 @@ class AudioProcessor(util.Process): camera, self.config, self.camera_metrics, + self.transcription_model_runner, self.stop_event, ) audio_threads.append(audio_thread) @@ -130,6 +138,7 @@ class AudioEventMaintainer(threading.Thread): camera: CameraConfig, config: FrigateConfig, camera_metrics: dict[str, CameraMetrics], + audio_transcription_model_runner: AudioTranscriptionModelRunner, stop_event: threading.Event, ) -> None: super().__init__(name=f"{camera.name}_audio_event_processor") @@ -146,6 +155,7 @@ class AudioEventMaintainer(threading.Thread): self.ffmpeg_cmd = get_ffmpeg_command(self.camera_config.ffmpeg) self.logpipe = LogPipe(f"ffmpeg.{self.camera_config.name}.audio") self.audio_listener = None + self.audio_transcription_model_runner = audio_transcription_model_runner self.transcription_processor = None self.transcription_thread = None @@ -168,6 +178,7 @@ class AudioEventMaintainer(threading.Thread): config=self.config, camera_config=self.camera_config, requestor=self.requestor, + model_runner=self.audio_transcription_model_runner, metrics=self.camera_metrics[self.camera_config.name], stop_event=self.stop_event, ) @@ -223,18 +234,18 @@ class AudioEventMaintainer(threading.Thread): ) # run audio transcription - if self.transcription_processor is not None and ( - self.camera_config.audio_transcription.live_enabled - ): - self.transcribing = True - # process audio until we've reached the endpoint - self.transcription_processor.process_audio( - { - "id": f"{self.camera_config.name}_audio", - "camera": self.camera_config.name, - }, - audio, - ) + if self.transcription_processor is not None: + if self.camera_config.audio_transcription.live_enabled: + # process audio until we've reached the endpoint + self.transcription_processor.process_audio( + { + "id": f"{self.camera_config.name}_audio", + "camera": self.camera_config.name, + }, + audio, + ) + else: + self.transcription_processor.check_unload_model() self.expire_detections() @@ -309,13 +320,6 @@ class AudioEventMaintainer(threading.Thread): ) self.detections[detection["label"]] = None - # clear real-time transcription - if self.transcription_processor is not None: - self.transcription_processor.reset(self.camera_config.name) - self.requestor.send_data( - f"{self.camera_config.name}/audio/transcription", "" - ) - def expire_all_detections(self) -> None: """Immediately end all current detections""" now = datetime.datetime.now().timestamp() From 1c75ff59f1730cfe4ad9499467019eed8ee87d55 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Wed, 4 Jun 2025 17:09:55 -0600 Subject: [PATCH 047/144] Classification Model UI (#18571) * Setup basic training structure * Build out route * Handle model configs * Add image fetch APIs * Implement model training screen with dataset selection * Implement viewing of training images * Adjust directories * Implement viewing of images * Add support for deleting images * Implement full deletion * Implement classification model training * Improve naming * More renaming * Improve layout * Reduce logging * Cleanup --- frigate/api/classification.py | 173 ++++- .../real_time/custom_classification.py | 2 +- frigate/util/classification.py | 10 +- .../locales/en/views/classificationModel.json | 49 ++ web/src/App.tsx | 2 + .../overlay/ClassificationSelectionDialog.tsx | 155 ++++ web/src/hooks/use-navigation.ts | 11 +- web/src/pages/ClassificationModel.tsx | 18 + web/src/types/frigateConfig.ts | 20 + .../classification/ModelSelectionView.tsx | 63 ++ .../classification/ModelTrainingView.tsx | 661 ++++++++++++++++++ 11 files changed, 1156 insertions(+), 8 deletions(-) create mode 100644 web/public/locales/en/views/classificationModel.json create mode 100644 web/src/components/overlay/ClassificationSelectionDialog.tsx create mode 100644 web/src/pages/ClassificationModel.tsx create mode 100644 web/src/views/classification/ModelSelectionView.tsx create mode 100644 web/src/views/classification/ModelTrainingView.tsx diff --git a/frigate/api/classification.py b/frigate/api/classification.py index 98b716c67..da5d11d88 100644 --- a/frigate/api/classification.py +++ b/frigate/api/classification.py @@ -21,7 +21,7 @@ from frigate.api.defs.request.classification_body import ( from frigate.api.defs.tags import Tags from frigate.config import FrigateConfig from frigate.config.camera import DetectConfig -from frigate.const import FACE_DIR, MODEL_CACHE_DIR +from frigate.const import CLIPS_DIR, FACE_DIR from frigate.embeddings import EmbeddingsContext from frigate.models import Event from frigate.util.classification import train_classification_model @@ -449,6 +449,50 @@ def transcribe_audio(request: Request, body: AudioTranscriptionBody): # custom classification training +@router.get("/classification/{name}/dataset") +def get_classification_dataset(name: str): + dataset_dict: dict[str, list[str]] = {} + + dataset_dir = os.path.join(CLIPS_DIR, sanitize_filename(name), "dataset") + + if not os.path.exists(dataset_dir): + return JSONResponse(status_code=200, content={}) + + for name in os.listdir(dataset_dir): + category_dir = os.path.join(dataset_dir, name) + + if not os.path.isdir(category_dir): + continue + + dataset_dict[name] = [] + + for file in filter( + lambda f: (f.lower().endswith((".webp", ".png", ".jpg", ".jpeg"))), + os.listdir(category_dir), + ): + dataset_dict[name].append(file) + + return JSONResponse(status_code=200, content=dataset_dict) + + +@router.get("/classification/{name}/train") +def get_classification_images(name: str): + train_dir = os.path.join(CLIPS_DIR, sanitize_filename(name), "train") + + if not os.path.exists(train_dir): + return JSONResponse(status_code=200, content=[]) + + return JSONResponse( + status_code=200, + content=list( + filter( + lambda f: (f.lower().endswith((".webp", ".png", ".jpg", ".jpeg"))), + os.listdir(train_dir), + ) + ), + ) + + @router.post("/classification/{name}/train") async def train_configured_model( request: Request, name: str, background_tasks: BackgroundTasks @@ -466,10 +510,131 @@ async def train_configured_model( status_code=404, ) - background_tasks.add_task( - train_classification_model, os.path.join(MODEL_CACHE_DIR, name) - ) + background_tasks.add_task(train_classification_model, name) return JSONResponse( content={"success": True, "message": "Started classification model training."}, status_code=200, ) + + +@router.post( + "/classification/{name}/dataset/{category}/delete", + dependencies=[Depends(require_role(["admin"]))], +) +def delete_classification_dataset_images( + request: Request, name: str, category: str, body: dict = None +): + config: FrigateConfig = request.app.frigate_config + + if name not in config.classification.custom: + return JSONResponse( + content=( + { + "success": False, + "message": f"{name} is not a known classification model.", + } + ), + status_code=404, + ) + + json: dict[str, Any] = body or {} + list_of_ids = json.get("ids", "") + folder = os.path.join( + CLIPS_DIR, sanitize_filename(name), "dataset", sanitize_filename(category) + ) + + for id in list_of_ids: + file_path = os.path.join(folder, id) + + if os.path.isfile(file_path): + os.unlink(file_path) + + return JSONResponse( + content=({"success": True, "message": "Successfully deleted faces."}), + status_code=200, + ) + + +@router.post( + "/classification/{name}/dataset/categorize", + dependencies=[Depends(require_role(["admin"]))], +) +def categorize_classification_image(request: Request, name: str, body: dict = None): + config: FrigateConfig = request.app.frigate_config + + if name not in config.classification.custom: + return JSONResponse( + content=( + { + "success": False, + "message": f"{name} is not a known classification model.", + } + ), + status_code=404, + ) + + json: dict[str, Any] = body or {} + category = sanitize_filename(json.get("category", "")) + training_file_name = sanitize_filename(json.get("training_file", "")) + training_file = os.path.join(CLIPS_DIR, name, "train", training_file_name) + + if training_file_name and not os.path.isfile(training_file): + return JSONResponse( + content=( + { + "success": False, + "message": f"Invalid filename or no file exists: {training_file_name}", + } + ), + status_code=404, + ) + + new_name = f"{category}-{datetime.datetime.now().timestamp()}.png" + new_file_folder = os.path.join(CLIPS_DIR, name, "dataset", category) + + if not os.path.exists(new_file_folder): + os.mkdir(new_file_folder) + + # use opencv because webp images can not be used to train + img = cv2.imread(training_file) + cv2.imwrite(os.path.join(new_file_folder, new_name), img) + os.unlink(training_file) + + return JSONResponse( + content=({"success": True, "message": "Successfully deleted faces."}), + status_code=200, + ) + + +@router.post( + "/classification/{name}/train/delete", + dependencies=[Depends(require_role(["admin"]))], +) +def delete_classification_train_images(request: Request, name: str, body: dict = None): + config: FrigateConfig = request.app.frigate_config + + if name not in config.classification.custom: + return JSONResponse( + content=( + { + "success": False, + "message": f"{name} is not a known classification model.", + } + ), + status_code=404, + ) + + json: dict[str, Any] = body or {} + list_of_ids = json.get("ids", "") + folder = os.path.join(CLIPS_DIR, sanitize_filename(name), "train") + + for id in list_of_ids: + file_path = os.path.join(folder, id) + + if os.path.isfile(file_path): + os.unlink(file_path) + + return JSONResponse( + content=({"success": True, "message": "Successfully deleted faces."}), + status_code=200, + ) diff --git a/frigate/data_processing/real_time/custom_classification.py b/frigate/data_processing/real_time/custom_classification.py index f94c2b28c..0e254ab0d 100644 --- a/frigate/data_processing/real_time/custom_classification.py +++ b/frigate/data_processing/real_time/custom_classification.py @@ -42,7 +42,7 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi): self.model_config = model_config self.requestor = requestor self.model_dir = os.path.join(MODEL_CACHE_DIR, self.model_config.name) - self.train_dir = os.path.join(CLIPS_DIR, self.model_config.name) + self.train_dir = os.path.join(CLIPS_DIR, self.model_config.name, "train") self.interpreter: Interpreter = None self.tensor_input_details: dict[str, Any] = None self.tensor_output_details: dict[str, Any] = None diff --git a/frigate/util/classification.py b/frigate/util/classification.py index 4ee5e1d54..a8624870b 100644 --- a/frigate/util/classification.py +++ b/frigate/util/classification.py @@ -1,5 +1,6 @@ """Util for classification models.""" +import logging import os import cv2 @@ -9,6 +10,8 @@ from tensorflow.keras import layers, models, optimizers from tensorflow.keras.applications import MobileNetV2 from tensorflow.keras.preprocessing.image import ImageDataGenerator +from frigate.const import CLIPS_DIR, MODEL_CACHE_DIR + BATCH_SIZE = 16 EPOCHS = 50 LEARNING_RATE = 0.001 @@ -35,9 +38,10 @@ def generate_representative_dataset_factory(dataset_dir: str): @staticmethod -def train_classification_model(model_dir: str) -> bool: +def train_classification_model(model_name: str) -> bool: """Train a classification model.""" - dataset_dir = os.path.join(model_dir, "dataset") + dataset_dir = os.path.join(CLIPS_DIR, model_name, "dataset") + model_dir = os.path.join(MODEL_CACHE_DIR, model_name) num_classes = len( [ d @@ -46,6 +50,8 @@ def train_classification_model(model_dir: str) -> bool: ] ) + tf.get_logger().setLevel(logging.ERROR) + # Start with imagenet base model with 35% of channels in each layer base_model = MobileNetV2( input_shape=(224, 224, 3), diff --git a/web/public/locales/en/views/classificationModel.json b/web/public/locales/en/views/classificationModel.json new file mode 100644 index 000000000..eb09ecaa0 --- /dev/null +++ b/web/public/locales/en/views/classificationModel.json @@ -0,0 +1,49 @@ +{ + "button": { + "deleteClassificationAttempts": "Delete Classification Images", + "renameCategory": "Rename Class", + "deleteCategory": "Delete Class", + "deleteImages": "Delete Images" + }, + "toast": { + "success": { + "deletedCategory": "Deleted Class", + "deletedImage": "Deleted Images", + "categorizedImage": "Successfully Classified Image" + }, + "error": { + "deleteImageFailed": "Failed to delete: {{errorMessage}}", + "deleteCategoryFailed": "Failed to delete class: {{errorMessage}}", + "categorizeFailed": "Failed to categorize image: {{errorMessage}}" + } + }, + "deleteCategory": { + "title": "Delete Class", + "desc": "Are you sure you want to delete the class {{name}}? This will permanently delete all associated images and require re-training the model." + }, + "deleteDatasetImages": { + "title": "Delete Dataset Images", + "desc": "Are you sure you want to delete {{count}} images from {{dataset}}? This action cannot be undone and will require re-training the model." + }, + "deleteTrainImages": { + "title": "Delete Train Images", + "desc": "Are you sure you want to delete {{count}} images? This action cannot be undone." + }, + "renameCategory": { + "title": "Rename Class", + "desc": "Enter a new name for {{name}}. You will be required to retrain the model for the name change to take affect." + }, + "description": { + "invalidName": "Invalid name. Names can only include letters, numbers, spaces, apostrophes, underscores, and hyphens." + }, + "train": { + "title": "Train", + "aria": "Select Train" + }, + "categories": "Classes", + "createCategory": { + "new": "Create New Class" + }, + "categorizeImageAs": "Classify Image As:", + "categorizeImage": "Classify Image" +} diff --git a/web/src/App.tsx b/web/src/App.tsx index d3edbc3a2..cd7906e97 100644 --- a/web/src/App.tsx +++ b/web/src/App.tsx @@ -24,6 +24,7 @@ const System = lazy(() => import("@/pages/System")); const Settings = lazy(() => import("@/pages/Settings")); const UIPlayground = lazy(() => import("@/pages/UIPlayground")); const FaceLibrary = lazy(() => import("@/pages/FaceLibrary")); +const Classification = lazy(() => import("@/pages/ClassificationModel")); const Logs = lazy(() => import("@/pages/Logs")); const AccessDenied = lazy(() => import("@/pages/AccessDenied")); @@ -76,6 +77,7 @@ function DefaultAppView() { } /> } /> } /> + } /> } /> } /> diff --git a/web/src/components/overlay/ClassificationSelectionDialog.tsx b/web/src/components/overlay/ClassificationSelectionDialog.tsx new file mode 100644 index 000000000..7cb8ca156 --- /dev/null +++ b/web/src/components/overlay/ClassificationSelectionDialog.tsx @@ -0,0 +1,155 @@ +import { + Drawer, + DrawerClose, + DrawerContent, + DrawerDescription, + DrawerHeader, + DrawerTitle, + DrawerTrigger, +} from "@/components/ui/drawer"; +import { + DropdownMenu, + DropdownMenuContent, + DropdownMenuItem, + DropdownMenuLabel, + DropdownMenuTrigger, +} from "@/components/ui/dropdown-menu"; +import { + Tooltip, + TooltipContent, + TooltipTrigger, +} from "@/components/ui/tooltip"; +import { isDesktop, isMobile } from "react-device-detect"; +import { LuPlus } from "react-icons/lu"; +import { useTranslation } from "react-i18next"; +import { cn } from "@/lib/utils"; +import React, { ReactNode, useCallback, useMemo, useState } from "react"; +import TextEntryDialog from "./dialog/TextEntryDialog"; +import { Button } from "../ui/button"; +import { MdCategory } from "react-icons/md"; +import axios from "axios"; +import { toast } from "sonner"; + +type ClassificationSelectionDialogProps = { + className?: string; + classes: string[]; + modelName: string; + image: string; + onRefresh: () => void; + children: ReactNode; +}; +export default function ClassificationSelectionDialog({ + className, + classes, + modelName, + image, + onRefresh, + children, +}: ClassificationSelectionDialogProps) { + const { t } = useTranslation(["views/classificationModel"]); + + const onCategorizeImage = useCallback( + (category: string) => { + axios + .post(`/classification/${modelName}/dataset/categorize`, { + category, + training_file: image, + }) + .then((resp) => { + if (resp.status == 200) { + toast.success(t("toast.success.categorizedImage"), { + position: "top-center", + }); + onRefresh(); + } + }) + .catch((error) => { + const errorMessage = + error.response?.data?.message || + error.response?.data?.detail || + "Unknown error"; + toast.error(t("toast.error.categorizeFailed", { errorMessage }), { + position: "top-center", + }); + }); + }, + [modelName, image, onRefresh, t], + ); + + const isChildButton = useMemo( + () => React.isValidElement(children) && children.type === Button, + [children], + ); + + // control + const [newFace, setNewFace] = useState(false); + + // components + const Selector = isDesktop ? DropdownMenu : Drawer; + const SelectorTrigger = isDesktop ? DropdownMenuTrigger : DrawerTrigger; + const SelectorContent = isDesktop ? DropdownMenuContent : DrawerContent; + const SelectorItem = isDesktop + ? DropdownMenuItem + : (props: React.HTMLAttributes) => ( + +
    + + ); + + return ( +
    + {newFace && ( + onCategorizeImage(newCat)} + /> + )} + + + + + {children} + + + {isMobile && ( + + Details + Details + + )} + {t("categorizeImageAs")} +
    + setNewFace(true)} + > + + {t("createCategory.new")} + + {classes.sort().map((category) => ( + onCategorizeImage(category)} + > + + {category} + + ))} +
    +
    +
    + {t("categorizeImage")} +
    +
    + ); +} diff --git a/web/src/hooks/use-navigation.ts b/web/src/hooks/use-navigation.ts index 41ec7227f..d9bd6f6a4 100644 --- a/web/src/hooks/use-navigation.ts +++ b/web/src/hooks/use-navigation.ts @@ -6,7 +6,7 @@ import { isDesktop } from "react-device-detect"; import { FaCompactDisc, FaVideo } from "react-icons/fa"; import { IoSearch } from "react-icons/io5"; import { LuConstruction } from "react-icons/lu"; -import { MdVideoLibrary } from "react-icons/md"; +import { MdCategory, MdVideoLibrary } from "react-icons/md"; import { TbFaceId } from "react-icons/tb"; import useSWR from "swr"; @@ -16,6 +16,7 @@ export const ID_EXPLORE = 3; export const ID_EXPORT = 4; export const ID_PLAYGROUND = 5; export const ID_FACE_LIBRARY = 6; +export const ID_CLASSIFICATION = 7; export default function useNavigation( variant: "primary" | "secondary" = "primary", @@ -71,6 +72,14 @@ export default function useNavigation( url: "/faces", enabled: isDesktop && config?.face_recognition.enabled, }, + { + id: ID_CLASSIFICATION, + variant, + icon: MdCategory, + title: "menu.classification", + url: "/classification", + enabled: isDesktop, + }, ] as NavData[], [config?.face_recognition?.enabled, variant], ); diff --git a/web/src/pages/ClassificationModel.tsx b/web/src/pages/ClassificationModel.tsx new file mode 100644 index 000000000..c37d0b454 --- /dev/null +++ b/web/src/pages/ClassificationModel.tsx @@ -0,0 +1,18 @@ +import { useOverlayState } from "@/hooks/use-overlay-state"; +import { CustomClassificationModelConfig } from "@/types/frigateConfig"; +import ModelSelectionView from "@/views/classification/ModelSelectionView"; +import ModelTrainingView from "@/views/classification/ModelTrainingView"; + +export default function ClassificationModelPage() { + // training + + const [model, setModel] = useOverlayState( + "classificationModel", + ); + + if (model == undefined) { + return ; + } + + return ; +} diff --git a/web/src/types/frigateConfig.ts b/web/src/types/frigateConfig.ts index cf2bf1476..3ccc5b06d 100644 --- a/web/src/types/frigateConfig.ts +++ b/web/src/types/frigateConfig.ts @@ -279,6 +279,23 @@ export type CameraStreamingSettings = { volume: number; }; +export type CustomClassificationModelConfig = { + enabled: boolean; + name: string; + object_config: null | { + objects: string[]; + }; + state_config: null | { + cameras: { + [cameraName: string]: { + crop: [number, number, number, number]; + threshold: number; + }; + }; + motion: boolean; + }; +}; + export type GroupStreamingSettings = { [cameraName: string]: CameraStreamingSettings; }; @@ -316,6 +333,9 @@ export interface FrigateConfig { enabled: boolean; threshold: number; }; + custom: { + [modelKey: string]: CustomClassificationModelConfig; + }; }; database: { diff --git a/web/src/views/classification/ModelSelectionView.tsx b/web/src/views/classification/ModelSelectionView.tsx new file mode 100644 index 000000000..63133842a --- /dev/null +++ b/web/src/views/classification/ModelSelectionView.tsx @@ -0,0 +1,63 @@ +import ActivityIndicator from "@/components/indicators/activity-indicator"; +import { cn } from "@/lib/utils"; +import { + CustomClassificationModelConfig, + FrigateConfig, +} from "@/types/frigateConfig"; +import { useMemo } from "react"; +import { isMobile } from "react-device-detect"; +import useSWR from "swr"; + +type ModelSelectionViewProps = { + onClick: (model: CustomClassificationModelConfig) => void; +}; +export default function ModelSelectionView({ + onClick, +}: ModelSelectionViewProps) { + const { data: config } = useSWR("config", { + revalidateOnFocus: false, + }); + + const classificationConfigs = useMemo(() => { + if (!config) { + return []; + } + + return Object.values(config.classification.custom); + }, [config]); + + if (!config) { + return ; + } + + if (classificationConfigs.length == 0) { + return
    You need to setup a custom model configuration.
    ; + } + + return ( +
    + {classificationConfigs.map((config) => ( +
    onClick(config)} + onContextMenu={() => { + // e.stopPropagation(); + // e.preventDefault(); + // handleClickEvent(true); + }} + > +
    +
    + {config.name} ({config.state_config != null ? "State" : "Object"}{" "} + Classification) +
    +
    + ))} +
    + ); +} diff --git a/web/src/views/classification/ModelTrainingView.tsx b/web/src/views/classification/ModelTrainingView.tsx new file mode 100644 index 000000000..53ef7fa66 --- /dev/null +++ b/web/src/views/classification/ModelTrainingView.tsx @@ -0,0 +1,661 @@ +import { baseUrl } from "@/api/baseUrl"; +import TextEntryDialog from "@/components/overlay/dialog/TextEntryDialog"; +import { Button, buttonVariants } from "@/components/ui/button"; +import { + AlertDialog, + AlertDialogAction, + AlertDialogCancel, + AlertDialogContent, + AlertDialogDescription, + AlertDialogFooter, + AlertDialogHeader, + AlertDialogTitle, +} from "@/components/ui/alert-dialog"; +import { + Dialog, + DialogContent, + DialogDescription, + DialogHeader, + DialogTitle, +} from "@/components/ui/dialog"; +import { + DropdownMenu, + DropdownMenuContent, + DropdownMenuItem, + DropdownMenuSeparator, + DropdownMenuTrigger, +} from "@/components/ui/dropdown-menu"; +import { Toaster } from "@/components/ui/sonner"; +import { + Tooltip, + TooltipContent, + TooltipTrigger, +} from "@/components/ui/tooltip"; +import useKeyboardListener from "@/hooks/use-keyboard-listener"; +import useOptimisticState from "@/hooks/use-optimistic-state"; +import { cn } from "@/lib/utils"; +import { CustomClassificationModelConfig } from "@/types/frigateConfig"; +import { TooltipPortal } from "@radix-ui/react-tooltip"; +import axios from "axios"; +import { useCallback, useEffect, useMemo, useState } from "react"; +import { isDesktop, isMobile } from "react-device-detect"; +import { Trans, useTranslation } from "react-i18next"; +import { LuPencil, LuTrash2 } from "react-icons/lu"; +import { toast } from "sonner"; +import useSWR from "swr"; +import ClassificationSelectionDialog from "@/components/overlay/ClassificationSelectionDialog"; +import { TbCategoryPlus } from "react-icons/tb"; + +type ModelTrainingViewProps = { + model: CustomClassificationModelConfig; +}; +export default function ModelTrainingView({ model }: ModelTrainingViewProps) { + const { t } = useTranslation(["views/classificationModel"]); + const [page, setPage] = useState("train"); + const [pageToggle, setPageToggle] = useOptimisticState(page, setPage, 100); + + // dataset + + const { data: trainImages, mutate: refreshTrain } = useSWR( + `classification/${model.name}/train`, + ); + const { data: dataset, mutate: refreshDataset } = useSWR<{ + [id: string]: string[]; + }>(`classification/${model.name}/dataset`); + + // image multiselect + + const [selectedImages, setSelectedImages] = useState([]); + + const onClickImages = useCallback( + (images: string[], ctrl: boolean) => { + if (selectedImages.length == 0 && !ctrl) { + return; + } + + let newSelectedImages = [...selectedImages]; + + images.forEach((imageId) => { + const index = newSelectedImages.indexOf(imageId); + + if (index != -1) { + if (selectedImages.length == 1) { + newSelectedImages = []; + } else { + const copy = [ + ...newSelectedImages.slice(0, index), + ...newSelectedImages.slice(index + 1), + ]; + newSelectedImages = copy; + } + } else { + newSelectedImages.push(imageId); + } + }); + + setSelectedImages(newSelectedImages); + }, + [selectedImages, setSelectedImages], + ); + + // actions + + const trainModel = useCallback(() => { + axios.post(`classification/${model.name}/train`); + }, [model]); + + const [deleteDialogOpen, setDeleteDialogOpen] = useState( + null, + ); + + const onDelete = useCallback( + (ids: string[], isName: boolean = false) => { + const api = + pageToggle == "train" + ? `/classification/${model.name}/train/delete` + : `/classification/${model.name}/dataset/${pageToggle}/delete`; + + axios + .post(api, { ids }) + .then((resp) => { + setSelectedImages([]); + + if (resp.status == 200) { + if (isName) { + toast.success( + t("toast.success.deletedCategory", { count: ids.length }), + { + position: "top-center", + }, + ); + } else { + toast.success( + t("toast.success.deletedImage", { count: ids.length }), + { + position: "top-center", + }, + ); + } + + if (pageToggle == "train") { + refreshTrain(); + } else { + refreshDataset(); + } + } + }) + .catch((error) => { + const errorMessage = + error.response?.data?.message || + error.response?.data?.detail || + "Unknown error"; + if (isName) { + toast.error( + t("toast.error.deleteCategoryFailed", { errorMessage }), + { + position: "top-center", + }, + ); + } else { + toast.error(t("toast.error.deleteImageFailed", { errorMessage }), { + position: "top-center", + }); + } + }); + }, + [pageToggle, model, refreshTrain, refreshDataset, t], + ); + + // keyboard + + useKeyboardListener(["a", "Escape"], (key, modifiers) => { + if (modifiers.repeat || !modifiers.down) { + return; + } + + switch (key) { + case "a": + if (modifiers.ctrl) { + if (selectedImages.length) { + setSelectedImages([]); + } else { + setSelectedImages([ + ...(pageToggle === "train" + ? trainImages || [] + : dataset?.[pageToggle] || []), + ]); + } + } + break; + case "Escape": + setSelectedImages([]); + break; + } + }); + + useEffect(() => { + setSelectedImages([]); + }, [pageToggle]); + + return ( +
    + + + setDeleteDialogOpen(null)} + > + + + + {t( + pageToggle == "train" + ? "deleteTrainImages.title" + : "deleteDatasetImages.title", + )} + + + + + {pageToggle == "train" + ? "deleteTrainImages.desc" + : "deleteDatasetImages.desc"} + + + + + {t("button.cancel", { ns: "common" })} + + { + if (deleteDialogOpen) { + onDelete(deleteDialogOpen); + setDeleteDialogOpen(null); + } + }} + > + {t("button.delete", { ns: "common" })} + + + + + +
    + {}} + /> + {selectedImages?.length > 0 ? ( +
    +
    +
    {`${selectedImages.length} selected`}
    +
    {"|"}
    +
    setSelectedImages([])} + > + {t("button.unselect", { ns: "common" })} +
    +
    + +
    + ) : ( + + )} +
    + {pageToggle == "train" ? ( + + ) : ( + + )} +
    + ); +} + +type LibrarySelectorProps = { + pageToggle: string | undefined; + dataset: { [id: string]: string[] }; + trainImages: string[]; + setPageToggle: (toggle: string) => void; + onDelete: (ids: string[], isName: boolean) => void; + onRename: (old_name: string, new_name: string) => void; +}; +function LibrarySelector({ + pageToggle, + dataset, + trainImages, + setPageToggle, + onDelete, + onRename, +}: LibrarySelectorProps) { + const { t } = useTranslation(["views/classificationModel"]); + const [confirmDelete, setConfirmDelete] = useState(null); + const [renameFace, setRenameFace] = useState(null); + + const handleDeleteFace = useCallback( + (name: string) => { + // Get all image IDs for this face + const imageIds = dataset?.[name] || []; + + onDelete(imageIds, true); + setPageToggle("train"); + }, + [dataset, onDelete, setPageToggle], + ); + + const handleSetOpen = useCallback( + (open: boolean) => { + setRenameFace(open ? renameFace : null); + }, + [renameFace], + ); + + return ( + <> + !open && setConfirmDelete(null)} + > + + + {t("deleteCategory.title")} + + {t("deleteCategory.desc", { name: confirmDelete })} + + +
    + + +
    +
    +
    + + { + onRename(renameFace!, newName); + setRenameFace(null); + }} + defaultValue={renameFace || ""} + regexPattern={/^[\p{L}\p{N}\s'_-]{1,50}$/u} + regexErrorMessage={t("description.invalidName")} + /> + + + + + + + setPageToggle("train")} + > +
    {t("train.title")}
    +
    + ({trainImages.length}) +
    +
    + {trainImages.length > 0 && Object.keys(dataset).length > 0 && ( + <> + +
    + {t("categories")} +
    + + )} + {Object.keys(dataset).map((id) => ( + +
    setPageToggle(id)} + > + {id} + + ({dataset?.[id].length}) + +
    +
    + + + + + + + {t("button.renameCategory")} + + + + + + + + + + {t("button.deleteCategory")} + + + +
    +
    + ))} +
    +
    + + ); +} + +type DatasetGridProps = { + modelName: string; + categoryName: string; + images: string[]; + selectedImages: string[]; + onClickImages: (images: string[], ctrl: boolean) => void; + onDelete: (ids: string[]) => void; +}; +function DatasetGrid({ + modelName, + categoryName, + images, + selectedImages, + onClickImages, + onDelete, +}: DatasetGridProps) { + const { t } = useTranslation(["views/classificationModel"]); + + return ( +
    + {images.map((image) => ( +
    { + e.stopPropagation(); + + if (e.ctrlKey || e.metaKey) { + onClickImages([image], true); + } + }} + > +
    + +
    +
    +
    +
    + + + { + e.stopPropagation(); + onDelete([image]); + }} + /> + + + {t("button.deleteClassificationAttempts")} + + +
    +
    +
    +
    + ))} +
    + ); +} + +type TrainGridProps = { + model: CustomClassificationModelConfig; + classes: string[]; + trainImages: string[]; + selectedImages: string[]; + onClickImages: (images: string[], ctrl: boolean) => void; + onRefresh: () => void; + onDelete: (ids: string[]) => void; +}; +function TrainGrid({ + model, + classes, + trainImages, + selectedImages, + onClickImages, + onRefresh, + onDelete, +}: TrainGridProps) { + const { t } = useTranslation(["views/classificationModel"]); + + const trainData = useMemo( + () => + trainImages + .map((raw) => { + const parts = raw.replaceAll(".webp", "").split("-"); + return { + raw, + timestamp: parts[0], + label: parts[1], + score: Number.parseFloat(parts[2]) * 100, + }; + }) + .sort((a, b) => b.timestamp.localeCompare(a.timestamp)), + [trainImages], + ); + + return ( +
    + {trainData?.map((data) => ( +
    { + e.stopPropagation(); + onClickImages([data.raw], e.ctrlKey || e.metaKey); + }} + > +
    + +
    +
    +
    +
    +
    {data.label}
    +
    {data.score}%
    +
    +
    + + + + + + { + e.stopPropagation(); + onDelete([data.raw]); + }} + /> + + + {t("button.deleteClassificationAttempts")} + + +
    +
    +
    +
    + ))} +
    + ); +} From 765a28d8121b292fab94f55319d53d9e16005470 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Thu, 5 Jun 2025 09:13:12 -0600 Subject: [PATCH 048/144] Live classification model training (#18583) * Implement model training via ZMQ and add model states to represent training * Get model updates working * Improve toasts and model state * Clean up logging * Add back in --- frigate/api/classification.py | 10 ++- frigate/comms/embeddings_updater.py | 16 +++-- frigate/config/logger.py | 2 + .../real_time/custom_classification.py | 68 ++++++++++++++++++- frigate/embeddings/__init__.py | 5 ++ frigate/types.py | 2 + frigate/util/classification.py | 14 +++- .../locales/en/views/classificationModel.json | 7 +- web/src/types/ws.ts | 4 +- .../classification/ModelTrainingView.tsx | 62 ++++++++++++++++- 10 files changed, 168 insertions(+), 22 deletions(-) diff --git a/frigate/api/classification.py b/frigate/api/classification.py index da5d11d88..f234e5cae 100644 --- a/frigate/api/classification.py +++ b/frigate/api/classification.py @@ -7,7 +7,7 @@ import shutil from typing import Any import cv2 -from fastapi import APIRouter, BackgroundTasks, Depends, Request, UploadFile +from fastapi import APIRouter, Depends, Request, UploadFile from fastapi.responses import JSONResponse from pathvalidate import sanitize_filename from peewee import DoesNotExist @@ -24,7 +24,6 @@ from frigate.config.camera import DetectConfig from frigate.const import CLIPS_DIR, FACE_DIR from frigate.embeddings import EmbeddingsContext from frigate.models import Event -from frigate.util.classification import train_classification_model from frigate.util.path import get_event_snapshot logger = logging.getLogger(__name__) @@ -494,9 +493,7 @@ def get_classification_images(name: str): @router.post("/classification/{name}/train") -async def train_configured_model( - request: Request, name: str, background_tasks: BackgroundTasks -): +async def train_configured_model(request: Request, name: str): config: FrigateConfig = request.app.frigate_config if name not in config.classification.custom: @@ -510,7 +507,8 @@ async def train_configured_model( status_code=404, ) - background_tasks.add_task(train_classification_model, name) + context: EmbeddingsContext = request.app.embeddings + context.start_classification_training(name) return JSONResponse( content={"success": True, "message": "Started classification model training."}, status_code=200, diff --git a/frigate/comms/embeddings_updater.py b/frigate/comms/embeddings_updater.py index 00bc88b3d..5edb9e77d 100644 --- a/frigate/comms/embeddings_updater.py +++ b/frigate/comms/embeddings_updater.py @@ -9,16 +9,22 @@ SOCKET_REP_REQ = "ipc:///tmp/cache/embeddings" class EmbeddingsRequestEnum(Enum): + # audio + transcribe_audio = "transcribe_audio" + # custom classification + train_classification = "train_classification" + # face clear_face_classifier = "clear_face_classifier" - embed_description = "embed_description" - embed_thumbnail = "embed_thumbnail" - generate_search = "generate_search" recognize_face = "recognize_face" register_face = "register_face" reprocess_face = "reprocess_face" - reprocess_plate = "reprocess_plate" + # semantic search + embed_description = "embed_description" + embed_thumbnail = "embed_thumbnail" + generate_search = "generate_search" reindex = "reindex" - transcribe_audio = "transcribe_audio" + # LPR + reprocess_plate = "reprocess_plate" class EmbeddingsResponder: diff --git a/frigate/config/logger.py b/frigate/config/logger.py index e6e1c06d3..a3eed23d0 100644 --- a/frigate/config/logger.py +++ b/frigate/config/logger.py @@ -29,7 +29,9 @@ class LoggerConfig(FrigateBaseModel): logging.getLogger().setLevel(self.default.value.upper()) log_levels = { + "absl": LogLevel.error, "httpx": LogLevel.error, + "tensorflow": LogLevel.error, "werkzeug": LogLevel.error, "ws4py": LogLevel.error, **self.logs, diff --git a/frigate/data_processing/real_time/custom_classification.py b/frigate/data_processing/real_time/custom_classification.py index 0e254ab0d..df4baf70b 100644 --- a/frigate/data_processing/real_time/custom_classification.py +++ b/frigate/data_processing/real_time/custom_classification.py @@ -3,11 +3,13 @@ import datetime import logging import os +import threading from typing import Any import cv2 import numpy as np +from frigate.comms.embeddings_updater import EmbeddingsRequestEnum from frigate.comms.event_metadata_updater import ( EventMetadataPublisher, EventMetadataTypeEnum, @@ -15,8 +17,10 @@ from frigate.comms.event_metadata_updater import ( from frigate.comms.inter_process import InterProcessRequestor from frigate.config import FrigateConfig from frigate.config.classification import CustomClassificationConfig -from frigate.const import CLIPS_DIR, MODEL_CACHE_DIR +from frigate.const import CLIPS_DIR, MODEL_CACHE_DIR, UPDATE_MODEL_STATE +from frigate.types import ModelStatusTypesEnum from frigate.util.builtin import load_labels +from frigate.util.classification import train_classification_model from frigate.util.object import box_overlaps, calculate_region from ..types import DataProcessorMetrics @@ -63,6 +67,18 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi): prefill=0, ) + def __retrain_model(self) -> None: + train_classification_model(self.model_config.name) + self.__build_detector() + self.requestor.send_data( + UPDATE_MODEL_STATE, + { + "model": self.model_config.name, + "state": ModelStatusTypesEnum.complete, + }, + ) + logger.info(f"Successfully loaded updated model for {self.model_config.name}") + def process_frame(self, frame_data: dict[str, Any], frame: np.ndarray): camera = frame_data.get("camera") @@ -143,7 +159,24 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi): ) def handle_request(self, topic, request_data): - return None + if topic == EmbeddingsRequestEnum.train_classification.value: + if request_data.get("model_name") == self.model_config.name: + self.requestor.send_data( + UPDATE_MODEL_STATE, + { + "model": self.model_config.name, + "state": ModelStatusTypesEnum.training, + }, + ) + threading.Thread(target=self.__retrain_model).start() + return { + "success": True, + "message": f"Began training {self.model_config.name} model.", + } + else: + return None + else: + return None def expire_object(self, object_id, camera): pass @@ -182,6 +215,18 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi): prefill=0, ) + def __retrain_model(self) -> None: + train_classification_model(self.model_config.name) + self.__build_detector() + self.requestor.send_data( + UPDATE_MODEL_STATE, + { + "model": self.model_config.name, + "state": ModelStatusTypesEnum.complete, + }, + ) + logger.info(f"Successfully loaded updated model for {self.model_config.name}") + def process_frame(self, obj_data, frame): if obj_data["label"] not in self.model_config.object_config.objects: return @@ -236,7 +281,24 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi): self.detected_objects[obj_data["id"]] = score def handle_request(self, topic, request_data): - return None + if topic == EmbeddingsRequestEnum.train_classification.value: + if request_data.get("model_name") == self.model_config.name: + self.requestor.send_data( + UPDATE_MODEL_STATE, + { + "model": self.model_config.name, + "state": ModelStatusTypesEnum.training, + }, + ) + threading.Thread(target=self.__retrain_model).start() + return { + "success": True, + "message": f"Began training {self.model_config.name} model.", + } + else: + return None + else: + return None def expire_object(self, object_id, camera): if object_id in self.detected_objects: diff --git a/frigate/embeddings/__init__.py b/frigate/embeddings/__init__.py index a86edf76c..5c2a9005f 100644 --- a/frigate/embeddings/__init__.py +++ b/frigate/embeddings/__init__.py @@ -301,6 +301,11 @@ class EmbeddingsContext: def reindex_embeddings(self) -> dict[str, Any]: return self.requestor.send_data(EmbeddingsRequestEnum.reindex.value, {}) + def start_classification_training(self, model_name: str) -> dict[str, Any]: + return self.requestor.send_data( + EmbeddingsRequestEnum.train_classification.value, {"model_name": model_name} + ) + def transcribe_audio(self, event: dict[str, any]) -> dict[str, any]: return self.requestor.send_data( EmbeddingsRequestEnum.transcribe_audio.value, {"event": event} diff --git a/frigate/types.py b/frigate/types.py index ee48cc02b..a9e27ba90 100644 --- a/frigate/types.py +++ b/frigate/types.py @@ -21,6 +21,8 @@ class ModelStatusTypesEnum(str, Enum): downloading = "downloading" downloaded = "downloaded" error = "error" + training = "training" + complete = "complete" class TrackedObjectUpdateTypesEnum(str, Enum): diff --git a/frigate/util/classification.py b/frigate/util/classification.py index a8624870b..92da7c93e 100644 --- a/frigate/util/classification.py +++ b/frigate/util/classification.py @@ -1,7 +1,7 @@ """Util for classification models.""" -import logging import os +import sys import cv2 import numpy as np @@ -50,7 +50,13 @@ def train_classification_model(model_name: str) -> bool: ] ) - tf.get_logger().setLevel(logging.ERROR) + # TF and Keras are very loud with logging + # we want to avoid these logs so we + # temporarily redirect stdout / stderr + original_stdout = sys.stdout + original_stderr = sys.stderr + sys.stdout = open(os.devnull, "w") + sys.stderr = open(os.devnull, "w") # Start with imagenet base model with 35% of channels in each layer base_model = MobileNetV2( @@ -112,3 +118,7 @@ def train_classification_model(model_name: str) -> bool: # write model with open(os.path.join(model_dir, "model.tflite"), "wb") as f: f.write(tflite_model) + + # restore original stdout / stderr + sys.stdout = original_stdout + sys.stderr = original_stderr diff --git a/web/public/locales/en/views/classificationModel.json b/web/public/locales/en/views/classificationModel.json index eb09ecaa0..0af0179b9 100644 --- a/web/public/locales/en/views/classificationModel.json +++ b/web/public/locales/en/views/classificationModel.json @@ -9,12 +9,15 @@ "success": { "deletedCategory": "Deleted Class", "deletedImage": "Deleted Images", - "categorizedImage": "Successfully Classified Image" + "categorizedImage": "Successfully Classified Image", + "trainedModel": "Successfully trained model.", + "trainingModel": "Successfully started model training." }, "error": { "deleteImageFailed": "Failed to delete: {{errorMessage}}", "deleteCategoryFailed": "Failed to delete class: {{errorMessage}}", - "categorizeFailed": "Failed to categorize image: {{errorMessage}}" + "categorizeFailed": "Failed to categorize image: {{errorMessage}}", + "trainingFailed": "Failed to start model training: {{errorMessage}}" } }, "deleteCategory": { diff --git a/web/src/types/ws.ts b/web/src/types/ws.ts index d1e810494..06ec9ae1d 100644 --- a/web/src/types/ws.ts +++ b/web/src/types/ws.ts @@ -73,7 +73,9 @@ export type ModelState = | "not_downloaded" | "downloading" | "downloaded" - | "error"; + | "error" + | "training" + | "complete"; export type EmbeddingsReindexProgressType = { thumbnails: number; diff --git a/web/src/views/classification/ModelTrainingView.tsx b/web/src/views/classification/ModelTrainingView.tsx index 53ef7fa66..1f62a4f53 100644 --- a/web/src/views/classification/ModelTrainingView.tsx +++ b/web/src/views/classification/ModelTrainingView.tsx @@ -45,6 +45,9 @@ import { toast } from "sonner"; import useSWR from "swr"; import ClassificationSelectionDialog from "@/components/overlay/ClassificationSelectionDialog"; import { TbCategoryPlus } from "react-icons/tb"; +import { useModelState } from "@/api/ws"; +import { ModelState } from "@/types/ws"; +import ActivityIndicator from "@/components/indicators/activity-indicator"; type ModelTrainingViewProps = { model: CustomClassificationModelConfig; @@ -54,6 +57,33 @@ export default function ModelTrainingView({ model }: ModelTrainingViewProps) { const [page, setPage] = useState("train"); const [pageToggle, setPageToggle] = useOptimisticState(page, setPage, 100); + // model state + + const [wasTraining, setWasTraining] = useState(false); + const { payload: lastModelState } = useModelState(model.name, true); + const modelState = useMemo(() => { + if (!lastModelState || lastModelState == "downloaded") { + return "complete"; + } + + return lastModelState; + }, [lastModelState]); + + useEffect(() => { + if (!wasTraining) { + return; + } + + if (modelState == "complete") { + toast.success(t("toast.success.trainedModel"), { + position: "top-center", + }); + setWasTraining(false); + } + // only refresh when modelState changes + // eslint-disable-next-line react-hooks/exhaustive-deps + }, [modelState]); + // dataset const { data: trainImages, mutate: refreshTrain } = useSWR( @@ -101,8 +131,27 @@ export default function ModelTrainingView({ model }: ModelTrainingViewProps) { // actions const trainModel = useCallback(() => { - axios.post(`classification/${model.name}/train`); - }, [model]); + axios + .post(`classification/${model.name}/train`) + .then((resp) => { + if (resp.status == 200) { + setWasTraining(true); + toast.success(t("toast.success.trainingModel"), { + position: "top-center", + }); + } + }) + .catch((error) => { + const errorMessage = + error.response?.data?.message || + error.response?.data?.detail || + "Unknown error"; + + toast.error(t("toast.error.trainingFailed", { errorMessage }), { + position: "top-center", + }); + }); + }, [model, t]); const [deleteDialogOpen, setDeleteDialogOpen] = useState( null, @@ -274,7 +323,14 @@ export default function ModelTrainingView({ model }: ModelTrainingViewProps) {
    ) : ( - + )} {pageToggle == "train" ? ( From b1a65c88e8b37fdc8a7fc9ba42505d63bb2e8600 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Fri, 6 Jun 2025 10:29:44 -0600 Subject: [PATCH 049/144] Classification Model Metrics (#18595) * Add speed and rate metrics for custom classification models * Use metrics for classification models * Use keys * Cast to list --- frigate/app.py | 3 +- .../real_time/custom_classification.py | 28 ++++++++++++++++++- frigate/data_processing/types.py | 11 +++++++- frigate/stats/util.py | 8 ++++++ 4 files changed, 47 insertions(+), 3 deletions(-) diff --git a/frigate/app.py b/frigate/app.py index b6dd6c7b9..f534de6e0 100644 --- a/frigate/app.py +++ b/frigate/app.py @@ -92,11 +92,12 @@ class FrigateApp: self.log_queue: Queue = mp.Queue() self.camera_metrics: dict[str, CameraMetrics] = {} self.embeddings_metrics: DataProcessorMetrics | None = ( - DataProcessorMetrics() + DataProcessorMetrics(list(config.classification.custom.keys())) if ( config.semantic_search.enabled or config.lpr.enabled or config.face_recognition.enabled + or len(config.classification.custom) > 0 ) else None ) diff --git a/frigate/data_processing/real_time/custom_classification.py b/frigate/data_processing/real_time/custom_classification.py index df4baf70b..a718956e2 100644 --- a/frigate/data_processing/real_time/custom_classification.py +++ b/frigate/data_processing/real_time/custom_classification.py @@ -19,7 +19,7 @@ from frigate.config import FrigateConfig from frigate.config.classification import CustomClassificationConfig from frigate.const import CLIPS_DIR, MODEL_CACHE_DIR, UPDATE_MODEL_STATE from frigate.types import ModelStatusTypesEnum -from frigate.util.builtin import load_labels +from frigate.util.builtin import EventsPerSecond, InferenceSpeed, load_labels from frigate.util.classification import train_classification_model from frigate.util.object import box_overlaps, calculate_region @@ -51,6 +51,10 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi): self.tensor_input_details: dict[str, Any] = None self.tensor_output_details: dict[str, Any] = None self.labelmap: dict[int, str] = {} + self.classifications_per_second = EventsPerSecond() + self.inference_speed = InferenceSpeed( + self.metrics.classification_speeds[self.model_config.name] + ) self.last_run = datetime.datetime.now().timestamp() self.__build_detector() @@ -66,6 +70,7 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi): os.path.join(self.model_dir, "labelmap.txt"), prefill=0, ) + self.classifications_per_second.start() def __retrain_model(self) -> None: train_classification_model(self.model_config.name) @@ -79,7 +84,14 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi): ) logger.info(f"Successfully loaded updated model for {self.model_config.name}") + def __update_metrics(self, duration: float) -> None: + self.classifications_per_second.update() + self.inference_speed.update(duration) + def process_frame(self, frame_data: dict[str, Any], frame: np.ndarray): + self.metrics.classification_cps[ + self.model_config.name + ].value = self.classifications_per_second.eps() camera = frame_data.get("camera") if camera not in self.model_config.state_config.cameras: @@ -143,6 +155,7 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi): probs = res / res.sum(axis=0) best_id = np.argmax(probs) score = round(probs[best_id], 2) + self.__update_metrics(datetime.datetime.now().timestamp() - now) write_classification_attempt( self.train_dir, @@ -200,6 +213,10 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi): self.tensor_output_details: dict[str, Any] = None self.detected_objects: dict[str, float] = {} self.labelmap: dict[int, str] = {} + self.classifications_per_second = EventsPerSecond() + self.inference_speed = InferenceSpeed( + self.metrics.classification_speeds[self.model_config.name] + ) self.__build_detector() def __build_detector(self) -> None: @@ -227,7 +244,15 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi): ) logger.info(f"Successfully loaded updated model for {self.model_config.name}") + def __update_metrics(self, duration: float) -> None: + self.classifications_per_second.update() + self.inference_speed.update(duration) + def process_frame(self, obj_data, frame): + self.metrics.classification_cps[ + self.model_config.name + ].value = self.classifications_per_second.eps() + if obj_data["label"] not in self.model_config.object_config.objects: return @@ -261,6 +286,7 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi): best_id = np.argmax(probs) score = round(probs[best_id], 2) previous_score = self.detected_objects.get(obj_data["id"], 0.0) + self.__update_metrics(datetime.datetime.now().timestamp() - now) write_classification_attempt( self.train_dir, diff --git a/frigate/data_processing/types.py b/frigate/data_processing/types.py index 5d083b32e..783b0798e 100644 --- a/frigate/data_processing/types.py +++ b/frigate/data_processing/types.py @@ -20,8 +20,10 @@ class DataProcessorMetrics: alpr_pps: Synchronized yolov9_lpr_speed: Synchronized yolov9_lpr_pps: Synchronized + classification_speeds: dict[str, Synchronized] + classification_cps: dict[str, Synchronized] - def __init__(self): + def __init__(self, custom_classification_models: list[str]): self.image_embeddings_speed = mp.Value("d", 0.0) self.image_embeddings_eps = mp.Value("d", 0.0) self.text_embeddings_speed = mp.Value("d", 0.0) @@ -33,6 +35,13 @@ class DataProcessorMetrics: self.yolov9_lpr_speed = mp.Value("d", 0.0) self.yolov9_lpr_pps = mp.Value("d", 0.0) + if custom_classification_models: + self.classification_speeds = {} + self.classification_cps = {} + for key in custom_classification_models: + self.classification_speeds[key] = mp.Value("d", 0.0) + self.classification_cps[key] = mp.Value("d", 0.0) + class DataProcessorModelRunner: def __init__(self, requestor, device: str = "CPU", model_size: str = "large"): diff --git a/frigate/stats/util.py b/frigate/stats/util.py index e098bc541..f5807e1e6 100644 --- a/frigate/stats/util.py +++ b/frigate/stats/util.py @@ -354,6 +354,14 @@ def stats_snapshot( embeddings_metrics.yolov9_lpr_pps.value, 2 ) + for key in embeddings_metrics.classification_speeds.keys(): + stats["embeddings"][f"{key}_classification_speed"] = round( + embeddings_metrics.classification_speeds[key].value * 1000, 2 + ) + stats["embeddings"][f"{key}_classification"] = round( + embeddings_metrics.classification_cps[key].value, 2 + ) + get_processing_stats(config, stats, hwaccel_errors) stats["service"] = { From 7ce26087f7452ceb8d152413083f5f0d2c3ea043 Mon Sep 17 00:00:00 2001 From: Jimmy Date: Fri, 6 Jun 2025 14:41:04 -0500 Subject: [PATCH 050/144] Add Mesa Teflon as a TFLite detector (#18310) * Refactor common functions for tflite detector implementations * Add detector using mesa teflon delegate Non-EdgeTPU TFLite can use the standard .tflite format * Add mesa-teflon-delegate from bookworm-backports to arm64 images --- docker/main/install_deps.sh | 8 +++ frigate/config/config.py | 4 +- frigate/detectors/detector_utils.py | 74 +++++++++++++++++++++++++ frigate/detectors/plugins/cpu_tfl.py | 36 ++---------- frigate/detectors/plugins/teflon_tfl.py | 38 +++++++++++++ 5 files changed, 128 insertions(+), 32 deletions(-) create mode 100644 frigate/detectors/detector_utils.py create mode 100644 frigate/detectors/plugins/teflon_tfl.py diff --git a/docker/main/install_deps.sh b/docker/main/install_deps.sh index aed11dff4..bd9f363e9 100755 --- a/docker/main/install_deps.sh +++ b/docker/main/install_deps.sh @@ -31,6 +31,14 @@ unset DEBIAN_FRONTEND yes | dpkg -i /tmp/libedgetpu1-max.deb && export DEBIAN_FRONTEND=noninteractive rm /tmp/libedgetpu1-max.deb +# install mesa-teflon-delegate from bookworm-backports +# Only available for arm64 at the moment +if [[ "${TARGETARCH}" == "arm64" ]]; then + echo "deb http://deb.debian.org/debian bookworm-backports main" | tee /etc/apt/sources.list.d/bookworm-backports.list + apt-get -qq update + apt-get -qq install --no-install-recommends --no-install-suggests -y mesa-teflon-delegate/bookworm-backports +fi + # ffmpeg -> amd64 if [[ "${TARGETARCH}" == "amd64" ]]; then mkdir -p /usr/lib/ffmpeg/5.0 diff --git a/frigate/config/config.py b/frigate/config/config.py index 49e57f3cf..62c931c96 100644 --- a/frigate/config/config.py +++ b/frigate/config/config.py @@ -487,7 +487,9 @@ class FrigateConfig(FrigateBaseModel): model_config["path"] = detector_config.model_path if "path" not in model_config: - if detector_config.type == "cpu": + if detector_config.type == "cpu" or detector_config.type.endswith( + "_tfl" + ): model_config["path"] = "/cpu_model.tflite" elif detector_config.type == "edgetpu": model_config["path"] = "/edgetpu_model.tflite" diff --git a/frigate/detectors/detector_utils.py b/frigate/detectors/detector_utils.py new file mode 100644 index 000000000..d732de871 --- /dev/null +++ b/frigate/detectors/detector_utils.py @@ -0,0 +1,74 @@ +import logging +import os + +import numpy as np + +try: + from tflite_runtime.interpreter import Interpreter, load_delegate +except ModuleNotFoundError: + from tensorflow.lite.python.interpreter import Interpreter, load_delegate + + +logger = logging.getLogger(__name__) + + +def tflite_init(self, interpreter): + self.interpreter = interpreter + + self.interpreter.allocate_tensors() + + self.tensor_input_details = self.interpreter.get_input_details() + self.tensor_output_details = self.interpreter.get_output_details() + + +def tflite_detect_raw(self, tensor_input): + self.interpreter.set_tensor(self.tensor_input_details[0]["index"], tensor_input) + self.interpreter.invoke() + + boxes = self.interpreter.tensor(self.tensor_output_details[0]["index"])()[0] + class_ids = self.interpreter.tensor(self.tensor_output_details[1]["index"])()[0] + scores = self.interpreter.tensor(self.tensor_output_details[2]["index"])()[0] + count = int(self.interpreter.tensor(self.tensor_output_details[3]["index"])()[0]) + + detections = np.zeros((20, 6), np.float32) + + for i in range(count): + if scores[i] < 0.4 or i == 20: + break + detections[i] = [ + class_ids[i], + float(scores[i]), + boxes[i][0], + boxes[i][1], + boxes[i][2], + boxes[i][3], + ] + + return detections + + +def tflite_load_delegate_interpreter( + delegate_library: str, detector_config, device_config +): + try: + logger.info("Attempting to load NPU") + tf_delegate = load_delegate(delegate_library, device_config) + logger.info("NPU found") + interpreter = Interpreter( + model_path=detector_config.model.path, + experimental_delegates=[tf_delegate], + ) + return interpreter + except ValueError: + _, ext = os.path.splitext(detector_config.model.path) + + if ext and ext != ".tflite": + logger.error( + "Incorrect model used with NPU. Only .tflite models can be used with a TFLite delegate." + ) + else: + logger.error( + "No NPU was detected. If you do not have a TFLite device yet, you must configure CPU detectors." + ) + + raise diff --git a/frigate/detectors/plugins/cpu_tfl.py b/frigate/detectors/plugins/cpu_tfl.py index 8a54363e1..fc8db0f4b 100644 --- a/frigate/detectors/plugins/cpu_tfl.py +++ b/frigate/detectors/plugins/cpu_tfl.py @@ -1,12 +1,13 @@ import logging -import numpy as np from pydantic import Field from typing_extensions import Literal from frigate.detectors.detection_api import DetectionApi from frigate.detectors.detector_config import BaseDetectorConfig +from ..detector_utils import tflite_detect_raw, tflite_init + try: from tflite_runtime.interpreter import Interpreter except ModuleNotFoundError: @@ -27,39 +28,12 @@ class CpuTfl(DetectionApi): type_key = DETECTOR_KEY def __init__(self, detector_config: CpuDetectorConfig): - self.interpreter = Interpreter( + interpreter = Interpreter( model_path=detector_config.model.path, num_threads=detector_config.num_threads or 3, ) - self.interpreter.allocate_tensors() - - self.tensor_input_details = self.interpreter.get_input_details() - self.tensor_output_details = self.interpreter.get_output_details() + tflite_init(self, interpreter) def detect_raw(self, tensor_input): - self.interpreter.set_tensor(self.tensor_input_details[0]["index"], tensor_input) - self.interpreter.invoke() - - boxes = self.interpreter.tensor(self.tensor_output_details[0]["index"])()[0] - class_ids = self.interpreter.tensor(self.tensor_output_details[1]["index"])()[0] - scores = self.interpreter.tensor(self.tensor_output_details[2]["index"])()[0] - count = int( - self.interpreter.tensor(self.tensor_output_details[3]["index"])()[0] - ) - - detections = np.zeros((20, 6), np.float32) - - for i in range(count): - if scores[i] < 0.4 or i == 20: - break - detections[i] = [ - class_ids[i], - float(scores[i]), - boxes[i][0], - boxes[i][1], - boxes[i][2], - boxes[i][3], - ] - - return detections + return tflite_detect_raw(self, tensor_input) diff --git a/frigate/detectors/plugins/teflon_tfl.py b/frigate/detectors/plugins/teflon_tfl.py new file mode 100644 index 000000000..7e29d6630 --- /dev/null +++ b/frigate/detectors/plugins/teflon_tfl.py @@ -0,0 +1,38 @@ +import logging + +from typing_extensions import Literal + +from frigate.detectors.detection_api import DetectionApi +from frigate.detectors.detector_config import BaseDetectorConfig + +from ..detector_utils import ( + tflite_detect_raw, + tflite_init, + tflite_load_delegate_interpreter, +) + +logger = logging.getLogger(__name__) + +# Use _tfl suffix to default tflite model +DETECTOR_KEY = "teflon_tfl" + + +class TeflonDetectorConfig(BaseDetectorConfig): + type: Literal[DETECTOR_KEY] + + +class TeflonTfl(DetectionApi): + type_key = DETECTOR_KEY + + def __init__(self, detector_config: TeflonDetectorConfig): + # Location in Debian's mesa-teflon-delegate + delegate_library = "/usr/lib/teflon/libteflon.so" + device_config = {} + + interpreter = tflite_load_delegate_interpreter( + delegate_library, detector_config, device_config + ) + tflite_init(self, interpreter) + + def detect_raw(self, tensor_input): + return tflite_detect_raw(self, tensor_input) From 13b760346a20c77c1bc0162f3f46cfec0bc2cb37 Mon Sep 17 00:00:00 2001 From: FL42 <46161216+fl42@users.noreply.github.com> Date: Sat, 7 Jun 2025 20:43:29 +0200 Subject: [PATCH 051/144] feat: enable using GenAI for cameras with GenAI disabled from the API (#18616) --- docs/docs/configuration/genai.md | 2 +- .../api/defs/query/regenerate_query_parameters.py | 6 +++++- frigate/api/event.py | 5 +++-- frigate/embeddings/maintainer.py | 14 ++++++++++---- 4 files changed, 19 insertions(+), 8 deletions(-) diff --git a/docs/docs/configuration/genai.md b/docs/docs/configuration/genai.md index f76c075b7..51c0fee10 100644 --- a/docs/docs/configuration/genai.md +++ b/docs/docs/configuration/genai.md @@ -9,7 +9,7 @@ Requests for a description are sent off automatically to your AI provider at the ## Configuration -Generative AI can be enabled for all cameras or only for specific cameras. There are currently 3 native providers available to integrate with Frigate. Other providers that support the OpenAI standard API can also be used. See the OpenAI section below. +Generative AI can be enabled for all cameras or only for specific cameras. If GenAI is disabled for a camera, you can still manually generate descriptions for events using the HTTP API. There are currently 3 native providers available to integrate with Frigate. Other providers that support the OpenAI standard API can also be used. See the OpenAI section below. To use Generative AI, you must define a single provider at the global level of your Frigate configuration. If the provider you choose requires an API key, you may either directly paste it in your configuration, or store it in an environment variable prefixed with `FRIGATE_`. diff --git a/frigate/api/defs/query/regenerate_query_parameters.py b/frigate/api/defs/query/regenerate_query_parameters.py index bcce47b1b..af50ada2c 100644 --- a/frigate/api/defs/query/regenerate_query_parameters.py +++ b/frigate/api/defs/query/regenerate_query_parameters.py @@ -1,9 +1,13 @@ from typing import Optional -from pydantic import BaseModel +from pydantic import BaseModel, Field from frigate.events.types import RegenerateDescriptionEnum class RegenerateQueryParameters(BaseModel): source: Optional[RegenerateDescriptionEnum] = RegenerateDescriptionEnum.thumbnails + force: Optional[bool] = Field( + default=False, + description="Force (re)generating the description even if GenAI is disabled for this camera.", + ) diff --git a/frigate/api/event.py b/frigate/api/event.py index 27353e4b5..24a6c6f4a 100644 --- a/frigate/api/event.py +++ b/frigate/api/event.py @@ -1234,9 +1234,10 @@ def regenerate_description( camera_config = request.app.frigate_config.cameras[event.camera] - if camera_config.genai.enabled: + if camera_config.genai.enabled or params.force: request.app.event_metadata_updater.publish( - EventMetadataTypeEnum.regenerate_description, (event.id, params.source) + EventMetadataTypeEnum.regenerate_description, + (event.id, params.source, params.force), ) return JSONResponse( diff --git a/frigate/embeddings/maintainer.py b/frigate/embeddings/maintainer.py index 9a2378221..ce81c2bc4 100644 --- a/frigate/embeddings/maintainer.py +++ b/frigate/embeddings/maintainer.py @@ -473,11 +473,11 @@ class EmbeddingMaintainer(threading.Thread): if topic is None: return - event_id, source = payload + event_id, source, force = payload if event_id: self.handle_regenerate_description( - event_id, RegenerateDescriptionEnum(source) + event_id, RegenerateDescriptionEnum(source), force ) def _process_frame_updates(self) -> None: @@ -678,15 +678,21 @@ class EmbeddingMaintainer(threading.Thread): except Exception: return None - def handle_regenerate_description(self, event_id: str, source: str) -> None: + def handle_regenerate_description( + self, event_id: str, source: str, force: bool + ) -> None: try: event: Event = Event.get(Event.id == event_id) except DoesNotExist: logger.error(f"Event {event_id} not found for description regeneration") return + if self.genai_client is None: + logger.error("GenAI not enabled") + return + camera_config = self.config.cameras[event.camera] - if not camera_config.genai.enabled or self.genai_client is None: + if not camera_config.genai.enabled and not force: logger.error(f"GenAI not enabled for camera {event.camera}") return From 937459be479ef0cf963b42a126ced341999a0afe Mon Sep 17 00:00:00 2001 From: FL42 <46161216+fl42@users.noreply.github.com> Date: Sun, 8 Jun 2025 14:55:29 +0200 Subject: [PATCH 052/144] fix: Initialize GenAI client if GenAI is enabled globally (#18623) --- frigate/genai/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frigate/genai/__init__.py b/frigate/genai/__init__.py index 2c0aadbd9..28ea4af6e 100644 --- a/frigate/genai/__init__.py +++ b/frigate/genai/__init__.py @@ -63,7 +63,7 @@ def get_genai_client(config: FrigateConfig) -> Optional[GenAIClient]: c for c in config.cameras.values() if c.enabled and c.genai.enabled ] - if genai_cameras: + if genai_cameras or genai_config.enabled: load_providers() provider = PROVIDERS.get(genai_config.provider) if provider: From 40ab7d6c3807819085aab862828e404ac25a08c2 Mon Sep 17 00:00:00 2001 From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> Date: Sun, 8 Jun 2025 13:06:17 -0500 Subject: [PATCH 053/144] Make Birdseye clickable (#18628) * keep track of layout changes and publish on change * websocket hook * clickable overlay div to navigate to full camera view --- frigate/comms/dispatcher.py | 13 ++++ frigate/const.py | 1 + frigate/output/birdseye.py | 66 +++++++++++----- web/src/api/ws.tsx | 34 ++++++++ .../components/player/BirdseyeLivePlayer.tsx | 6 +- web/src/views/live/LiveBirdseyeView.tsx | 77 ++++++++++++++++++- 6 files changed, 175 insertions(+), 22 deletions(-) diff --git a/frigate/comms/dispatcher.py b/frigate/comms/dispatcher.py index 6fee166b7..0a9c439f4 100644 --- a/frigate/comms/dispatcher.py +++ b/frigate/comms/dispatcher.py @@ -21,6 +21,7 @@ from frigate.const import ( INSERT_PREVIEW, NOTIFICATION_TEST, REQUEST_REGION_GRID, + UPDATE_BIRDSEYE_LAYOUT, UPDATE_CAMERA_ACTIVITY, UPDATE_EMBEDDINGS_REINDEX_PROGRESS, UPDATE_EVENT_DESCRIPTION, @@ -55,6 +56,7 @@ class Dispatcher: self.camera_activity = CameraActivityManager(config, self.publish) self.model_state = {} self.embeddings_reindex = {} + self.birdseye_layout = {} self._camera_settings_handlers: dict[str, Callable] = { "audio": self._on_audio_command, @@ -168,6 +170,14 @@ class Dispatcher: json.dumps(self.embeddings_reindex.copy()), ) + def handle_update_birdseye_layout(): + if payload: + self.birdseye_layout = payload + self.publish("birdseye_layout", json.dumps(self.birdseye_layout)) + + def handle_birdseye_layout(): + self.publish("birdseye_layout", json.dumps(self.birdseye_layout.copy())) + def handle_on_connect(): camera_status = self.camera_activity.last_camera_activity.copy() cameras_with_status = camera_status.keys() @@ -205,6 +215,7 @@ class Dispatcher: "embeddings_reindex_progress", json.dumps(self.embeddings_reindex.copy()), ) + self.publish("birdseye_layout", json.dumps(self.birdseye_layout.copy())) def handle_notification_test(): self.publish("notification_test", "Test notification") @@ -220,10 +231,12 @@ class Dispatcher: UPDATE_EVENT_DESCRIPTION: handle_update_event_description, UPDATE_MODEL_STATE: handle_update_model_state, UPDATE_EMBEDDINGS_REINDEX_PROGRESS: handle_update_embeddings_reindex_progress, + UPDATE_BIRDSEYE_LAYOUT: handle_update_birdseye_layout, NOTIFICATION_TEST: handle_notification_test, "restart": handle_restart, "embeddingsReindexProgress": handle_embeddings_reindex_progress, "modelState": handle_model_state, + "birdseyeLayout": handle_birdseye_layout, "onConnect": handle_on_connect, } diff --git a/frigate/const.py b/frigate/const.py index 699a194ac..893e6eb52 100644 --- a/frigate/const.py +++ b/frigate/const.py @@ -112,6 +112,7 @@ UPDATE_CAMERA_ACTIVITY = "update_camera_activity" UPDATE_EVENT_DESCRIPTION = "update_event_description" UPDATE_MODEL_STATE = "update_model_state" UPDATE_EMBEDDINGS_REINDEX_PROGRESS = "handle_embeddings_reindex_progress" +UPDATE_BIRDSEYE_LAYOUT = "update_birdseye_layout" NOTIFICATION_TEST = "notification_test" # Stats Values diff --git a/frigate/output/birdseye.py b/frigate/output/birdseye.py index 78686fd63..a19436d5e 100644 --- a/frigate/output/birdseye.py +++ b/frigate/output/birdseye.py @@ -15,8 +15,9 @@ from typing import Any, Optional import cv2 import numpy as np +from frigate.comms.inter_process import InterProcessRequestor from frigate.config import BirdseyeModeEnum, FfmpegConfig, FrigateConfig -from frigate.const import BASE_DIR, BIRDSEYE_PIPE, INSTALL_DIR +from frigate.const import BASE_DIR, BIRDSEYE_PIPE, INSTALL_DIR, UPDATE_BIRDSEYE_LAYOUT from frigate.util.image import ( SharedMemoryFrameManager, copy_yuv_to_position, @@ -380,10 +381,24 @@ class BirdsEyeFrameManager: if mode == BirdseyeModeEnum.objects and object_box_count > 0: return True - def update_frame(self, frame: Optional[np.ndarray] = None) -> bool: + def get_camera_coordinates(self) -> dict[str, dict[str, int]]: + """Return the coordinates of each camera in the current layout.""" + coordinates = {} + for row in self.camera_layout: + for position in row: + camera_name, (x, y, width, height) = position + coordinates[camera_name] = { + "x": x, + "y": y, + "width": width, + "height": height, + } + return coordinates + + def update_frame(self, frame: Optional[np.ndarray] = None) -> tuple[bool, bool]: """ Update birdseye, optionally with a new frame. - When no frame is passed, check the layout and update for any disabled cameras. + Returns (frame_changed, layout_changed) to indicate if the frame or layout changed. """ # determine how many cameras are tracking objects within the last inactivity_threshold seconds @@ -421,19 +436,21 @@ class BirdsEyeFrameManager: max_camera_refresh = True self.last_refresh_time = now - # Track if the frame changes + # Track if the frame or layout changes frame_changed = False + layout_changed = False # If no active cameras and layout is already empty, no update needed if len(active_cameras) == 0: # if the layout is already cleared if len(self.camera_layout) == 0: - return False + return False, False # if the layout needs to be cleared self.camera_layout = [] self.active_cameras = set() self.clear_frame() frame_changed = True + layout_changed = True else: # Determine if layout needs resetting if len(self.active_cameras) - len(active_cameras) == 0: @@ -453,7 +470,7 @@ class BirdsEyeFrameManager: logger.debug("Resetting Birdseye layout...") self.clear_frame() self.active_cameras = active_cameras - + layout_changed = True # Layout is changing due to reset # this also converts added_cameras from a set to a list since we need # to pop elements in order active_cameras_to_add = sorted( @@ -503,7 +520,7 @@ class BirdsEyeFrameManager: # decrease scaling coefficient until height of all cameras can fit into the birdseye canvas while calculating: if self.stop_event.is_set(): - return + return frame_changed, layout_changed layout_candidate = self.calculate_layout( active_cameras_to_add, coefficient @@ -517,7 +534,7 @@ class BirdsEyeFrameManager: logger.error( "Error finding appropriate birdseye layout" ) - return + return frame_changed, layout_changed calculating = False self.canvas.set_coefficient(len(active_cameras), coefficient) @@ -535,7 +552,7 @@ class BirdsEyeFrameManager: if frame is not None: # Frame presence indicates a potential change frame_changed = True - return frame_changed + return frame_changed, layout_changed def calculate_layout( self, @@ -687,7 +704,11 @@ class BirdsEyeFrameManager: motion_count: int, frame_time: float, frame: np.ndarray, - ) -> bool: + ) -> tuple[bool, bool]: + """ + Update birdseye for a specific camera with new frame data. + Returns (frame_changed, layout_changed) to indicate if the frame or layout changed. + """ # don't process if birdseye is disabled for this camera camera_config = self.config.cameras[camera] force_update = False @@ -700,7 +721,7 @@ class BirdsEyeFrameManager: self.cameras[camera]["last_active_frame"] = 0 force_update = True else: - return False + return False, False # update the last active frame for the camera self.cameras[camera]["current_frame"] = frame.copy() @@ -712,21 +733,22 @@ class BirdsEyeFrameManager: # limit output to 10 fps if not force_update and (now - self.last_output_time) < 1 / 10: - return False + return False, False try: - updated_frame = self.update_frame(frame) + frame_changed, layout_changed = self.update_frame(frame) except Exception: - updated_frame = False + frame_changed, layout_changed = False, False self.active_cameras = [] self.camera_layout = [] print(traceback.format_exc()) # if the frame was updated or the fps is too low, send frame - if force_update or updated_frame or (now - self.last_output_time) > 1: + if force_update or frame_changed or (now - self.last_output_time) > 1: self.last_output_time = now - return True - return False + return True, layout_changed + + return False, layout_changed class Birdseye: @@ -755,6 +777,7 @@ class Birdseye: self.birdseye_manager = BirdsEyeFrameManager(config, stop_event) self.frame_manager = SharedMemoryFrameManager() self.stop_event = stop_event + self.requestor = InterProcessRequestor() if config.birdseye.restream: self.birdseye_buffer = self.frame_manager.create( @@ -789,15 +812,20 @@ class Birdseye: frame_time: float, frame: np.ndarray, ) -> None: - if self.birdseye_manager.update( + frame_changed, frame_layout_changed = self.birdseye_manager.update( camera, len([o for o in current_tracked_objects if not o["stationary"]]), len(motion_boxes), frame_time, frame, - ): + ) + if frame_changed: self.__send_new_frame() + if frame_layout_changed: + coordinates = self.birdseye_manager.get_camera_coordinates() + self.requestor.send_data(UPDATE_BIRDSEYE_LAYOUT, coordinates) + def stop(self) -> None: self.converter.join() self.broadcaster.join() diff --git a/web/src/api/ws.tsx b/web/src/api/ws.tsx index 79bf9e79d..78c596e13 100644 --- a/web/src/api/ws.tsx +++ b/web/src/api/ws.tsx @@ -426,6 +426,40 @@ export function useEmbeddingsReindexProgress( return { payload: data }; } +export function useBirdseyeLayout(revalidateOnFocus: boolean = true): { + payload: string; +} { + const { + value: { payload }, + send: sendCommand, + } = useWs("birdseye_layout", "birdseyeLayout"); + + const data = useDeepMemo(JSON.parse(payload as string)); + + useEffect(() => { + let listener = undefined; + if (revalidateOnFocus) { + sendCommand("birdseyeLayout"); + listener = () => { + if (document.visibilityState == "visible") { + sendCommand("birdseyeLayout"); + } + }; + addEventListener("visibilitychange", listener); + } + + return () => { + if (listener) { + removeEventListener("visibilitychange", listener); + } + }; + // we know that these deps are correct + // eslint-disable-next-line react-hooks/exhaustive-deps + }, [revalidateOnFocus]); + + return { payload: data }; +} + export function useMotionActivity(camera: string): { payload: string } { const { value: { payload }, diff --git a/web/src/components/player/BirdseyeLivePlayer.tsx b/web/src/components/player/BirdseyeLivePlayer.tsx index 286f19216..2e9461293 100644 --- a/web/src/components/player/BirdseyeLivePlayer.tsx +++ b/web/src/components/player/BirdseyeLivePlayer.tsx @@ -13,6 +13,7 @@ type LivePlayerProps = { liveMode: LivePlayerMode; pip?: boolean; containerRef: React.MutableRefObject; + playerRef?: React.MutableRefObject; onClick?: () => void; }; @@ -22,6 +23,7 @@ export default function BirdseyeLivePlayer({ liveMode, pip, containerRef, + playerRef, onClick, }: LivePlayerProps) { let player; @@ -76,7 +78,9 @@ export default function BirdseyeLivePlayer({ >
    -
    {player}
    +
    + {player} +
    ); } diff --git a/web/src/views/live/LiveBirdseyeView.tsx b/web/src/views/live/LiveBirdseyeView.tsx index ca28180bf..efded68f5 100644 --- a/web/src/views/live/LiveBirdseyeView.tsx +++ b/web/src/views/live/LiveBirdseyeView.tsx @@ -1,11 +1,13 @@ +import { useBirdseyeLayout } from "@/api/ws"; import CameraFeatureToggle from "@/components/dynamic/CameraFeatureToggle"; import ActivityIndicator from "@/components/indicators/activity-indicator"; import BirdseyeLivePlayer from "@/components/player/BirdseyeLivePlayer"; import { Button } from "@/components/ui/button"; import { TooltipProvider } from "@/components/ui/tooltip"; import { useResizeObserver } from "@/hooks/resize-observer"; +import { cn } from "@/lib/utils"; import { FrigateConfig } from "@/types/frigateConfig"; -import { useEffect, useMemo, useRef, useState } from "react"; +import { useCallback, useEffect, useMemo, useRef, useState } from "react"; import { isDesktop, isFirefox, @@ -122,6 +124,72 @@ export default function LiveBirdseyeView({ return "mse"; }, [config]); + const birdseyeLayout = useBirdseyeLayout(); + + // Click overlay handling + + const playerRef = useRef(null); + const handleOverlayClick = useCallback( + ( + e: React.MouseEvent | React.TouchEvent, + ) => { + let clientX; + let clientY; + if ("TouchEvent" in window && e.nativeEvent instanceof TouchEvent) { + clientX = e.nativeEvent.touches[0].clientX; + clientY = e.nativeEvent.touches[0].clientY; + } else if (e.nativeEvent instanceof MouseEvent) { + clientX = e.nativeEvent.clientX; + clientY = e.nativeEvent.clientY; + } + + if ( + playerRef.current && + clientX && + clientY && + config && + birdseyeLayout?.payload + ) { + const playerRect = playerRef.current.getBoundingClientRect(); + + // Calculate coordinates relative to player div, accounting for offset + const rawX = clientX - playerRect.left; + const rawY = clientY - playerRect.top; + + // Ensure click is within player bounds + if ( + rawX < 0 || + rawX > playerRect.width || + rawY < 0 || + rawY > playerRect.height + ) { + return; + } + + // Scale click coordinates to birdseye canvas resolution + const canvasX = rawX * (config.birdseye.width / playerRect.width); + const canvasY = rawY * (config.birdseye.height / playerRect.height); + + for (const [cameraName, coords] of Object.entries( + birdseyeLayout.payload, + )) { + const parsedCoords = + typeof coords === "string" ? JSON.parse(coords) : coords; + if ( + canvasX >= parsedCoords.x && + canvasX < parsedCoords.x + parsedCoords.width && + canvasY >= parsedCoords.y && + canvasY < parsedCoords.y + parsedCoords.height + ) { + navigate(`/#${cameraName}`); + break; + } + } + } + }, + [playerRef, config, birdseyeLayout, navigate], + ); + if (!config) { return ; } @@ -215,16 +283,21 @@ export default function LiveBirdseyeView({ }} >
    From 4b57e5e26505f987bc8b0a0818cb2e2f39e8f438 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Mon, 9 Jun 2025 08:25:33 -0600 Subject: [PATCH 054/144] Refactor TensorRT (#18643) * Combine base and arm trt detectors * Remove unused deps for amd64 build * Add missing packages and cleanup ldconfig * Expand packages for tensorflow model training * Cleanup * Refactor training to not reserve memory --- docker/tensorrt/requirements-amd64.txt | 1 + frigate/comms/embeddings_updater.py | 2 +- .../real_time/custom_classification.py | 56 ++++--------------- frigate/data_processing/types.py | 6 +- frigate/embeddings/__init__.py | 10 +++- frigate/util/classification.py | 51 +++++++++++++++-- 6 files changed, 68 insertions(+), 58 deletions(-) diff --git a/docker/tensorrt/requirements-amd64.txt b/docker/tensorrt/requirements-amd64.txt index 63c68b583..a7853aeec 100644 --- a/docker/tensorrt/requirements-amd64.txt +++ b/docker/tensorrt/requirements-amd64.txt @@ -13,6 +13,7 @@ nvidia_cusolver_cu12==11.6.3.*; platform_machine == 'x86_64' nvidia_cusparse_cu12==12.5.1.*; platform_machine == 'x86_64' nvidia_nccl_cu12==2.23.4; platform_machine == 'x86_64' nvidia_nvjitlink_cu12==12.5.82; platform_machine == 'x86_64' +tensorflow==2.19.*; platform_machine == 'x86_64' onnx==1.16.*; platform_machine == 'x86_64' onnxruntime-gpu==1.22.*; platform_machine == 'x86_64' protobuf==3.20.3; platform_machine == 'x86_64' diff --git a/frigate/comms/embeddings_updater.py b/frigate/comms/embeddings_updater.py index 5edb9e77d..f97319051 100644 --- a/frigate/comms/embeddings_updater.py +++ b/frigate/comms/embeddings_updater.py @@ -12,7 +12,7 @@ class EmbeddingsRequestEnum(Enum): # audio transcribe_audio = "transcribe_audio" # custom classification - train_classification = "train_classification" + reload_classification_model = "reload_classification_model" # face clear_face_classifier = "clear_face_classifier" recognize_face = "recognize_face" diff --git a/frigate/data_processing/real_time/custom_classification.py b/frigate/data_processing/real_time/custom_classification.py index a718956e2..f153b5b92 100644 --- a/frigate/data_processing/real_time/custom_classification.py +++ b/frigate/data_processing/real_time/custom_classification.py @@ -3,7 +3,6 @@ import datetime import logging import os -import threading from typing import Any import cv2 @@ -17,10 +16,8 @@ from frigate.comms.event_metadata_updater import ( from frigate.comms.inter_process import InterProcessRequestor from frigate.config import FrigateConfig from frigate.config.classification import CustomClassificationConfig -from frigate.const import CLIPS_DIR, MODEL_CACHE_DIR, UPDATE_MODEL_STATE -from frigate.types import ModelStatusTypesEnum +from frigate.const import CLIPS_DIR, MODEL_CACHE_DIR from frigate.util.builtin import EventsPerSecond, InferenceSpeed, load_labels -from frigate.util.classification import train_classification_model from frigate.util.object import box_overlaps, calculate_region from ..types import DataProcessorMetrics @@ -72,18 +69,6 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi): ) self.classifications_per_second.start() - def __retrain_model(self) -> None: - train_classification_model(self.model_config.name) - self.__build_detector() - self.requestor.send_data( - UPDATE_MODEL_STATE, - { - "model": self.model_config.name, - "state": ModelStatusTypesEnum.complete, - }, - ) - logger.info(f"Successfully loaded updated model for {self.model_config.name}") - def __update_metrics(self, duration: float) -> None: self.classifications_per_second.update() self.inference_speed.update(duration) @@ -172,19 +157,15 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi): ) def handle_request(self, topic, request_data): - if topic == EmbeddingsRequestEnum.train_classification.value: + if topic == EmbeddingsRequestEnum.reload_classification_model.value: if request_data.get("model_name") == self.model_config.name: - self.requestor.send_data( - UPDATE_MODEL_STATE, - { - "model": self.model_config.name, - "state": ModelStatusTypesEnum.training, - }, + self.__build_detector() + logger.info( + f"Successfully loaded updated model for {self.model_config.name}" ) - threading.Thread(target=self.__retrain_model).start() return { "success": True, - "message": f"Began training {self.model_config.name} model.", + "message": f"Loaded {self.model_config.name} model.", } else: return None @@ -232,18 +213,6 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi): prefill=0, ) - def __retrain_model(self) -> None: - train_classification_model(self.model_config.name) - self.__build_detector() - self.requestor.send_data( - UPDATE_MODEL_STATE, - { - "model": self.model_config.name, - "state": ModelStatusTypesEnum.complete, - }, - ) - logger.info(f"Successfully loaded updated model for {self.model_config.name}") - def __update_metrics(self, duration: float) -> None: self.classifications_per_second.update() self.inference_speed.update(duration) @@ -307,19 +276,14 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi): self.detected_objects[obj_data["id"]] = score def handle_request(self, topic, request_data): - if topic == EmbeddingsRequestEnum.train_classification.value: + if topic == EmbeddingsRequestEnum.reload_classification_model.value: if request_data.get("model_name") == self.model_config.name: - self.requestor.send_data( - UPDATE_MODEL_STATE, - { - "model": self.model_config.name, - "state": ModelStatusTypesEnum.training, - }, + logger.info( + f"Successfully loaded updated model for {self.model_config.name}" ) - threading.Thread(target=self.__retrain_model).start() return { "success": True, - "message": f"Began training {self.model_config.name} model.", + "message": f"Loaded {self.model_config.name} model.", } else: return None diff --git a/frigate/data_processing/types.py b/frigate/data_processing/types.py index 783b0798e..50f1ed561 100644 --- a/frigate/data_processing/types.py +++ b/frigate/data_processing/types.py @@ -20,8 +20,8 @@ class DataProcessorMetrics: alpr_pps: Synchronized yolov9_lpr_speed: Synchronized yolov9_lpr_pps: Synchronized - classification_speeds: dict[str, Synchronized] - classification_cps: dict[str, Synchronized] + classification_speeds: dict[str, Synchronized] = {} + classification_cps: dict[str, Synchronized] = {} def __init__(self, custom_classification_models: list[str]): self.image_embeddings_speed = mp.Value("d", 0.0) @@ -36,8 +36,6 @@ class DataProcessorMetrics: self.yolov9_lpr_pps = mp.Value("d", 0.0) if custom_classification_models: - self.classification_speeds = {} - self.classification_cps = {} for key in custom_classification_models: self.classification_speeds[key] = mp.Value("d", 0.0) self.classification_cps[key] = mp.Value("d", 0.0) diff --git a/frigate/embeddings/__init__.py b/frigate/embeddings/__init__.py index 5c2a9005f..037cadcf0 100644 --- a/frigate/embeddings/__init__.py +++ b/frigate/embeddings/__init__.py @@ -22,6 +22,7 @@ from frigate.data_processing.types import DataProcessorMetrics from frigate.db.sqlitevecq import SqliteVecQueueDatabase from frigate.models import Event, Recordings from frigate.util.builtin import serialize +from frigate.util.classification import kickoff_model_training from frigate.util.services import listen from .maintainer import EmbeddingMaintainer @@ -302,9 +303,12 @@ class EmbeddingsContext: return self.requestor.send_data(EmbeddingsRequestEnum.reindex.value, {}) def start_classification_training(self, model_name: str) -> dict[str, Any]: - return self.requestor.send_data( - EmbeddingsRequestEnum.train_classification.value, {"model_name": model_name} - ) + threading.Thread( + target=kickoff_model_training, + args=(self.requestor, model_name), + daemon=True, + ).start() + return {"success": True, "message": f"Began training {model_name} model."} def transcribe_audio(self, event: dict[str, any]) -> dict[str, any]: return self.requestor.send_data( diff --git a/frigate/util/classification.py b/frigate/util/classification.py index 92da7c93e..842f38fa2 100644 --- a/frigate/util/classification.py +++ b/frigate/util/classification.py @@ -10,7 +10,11 @@ from tensorflow.keras import layers, models, optimizers from tensorflow.keras.applications import MobileNetV2 from tensorflow.keras.preprocessing.image import ImageDataGenerator -from frigate.const import CLIPS_DIR, MODEL_CACHE_DIR +from frigate.comms.embeddings_updater import EmbeddingsRequestEnum, EmbeddingsRequestor +from frigate.comms.inter_process import InterProcessRequestor +from frigate.const import CLIPS_DIR, MODEL_CACHE_DIR, UPDATE_MODEL_STATE +from frigate.types import ModelStatusTypesEnum +from frigate.util import Process BATCH_SIZE = 16 EPOCHS = 50 @@ -18,7 +22,7 @@ LEARNING_RATE = 0.001 @staticmethod -def generate_representative_dataset_factory(dataset_dir: str): +def __generate_representative_dataset_factory(dataset_dir: str): def generate_representative_dataset(): image_paths = [] for root, dirs, files in os.walk(dataset_dir): @@ -38,7 +42,7 @@ def generate_representative_dataset_factory(dataset_dir: str): @staticmethod -def train_classification_model(model_name: str) -> bool: +def __train_classification_model(model_name: str) -> bool: """Train a classification model.""" dataset_dir = os.path.join(CLIPS_DIR, model_name, "dataset") model_dir = os.path.join(MODEL_CACHE_DIR, model_name) @@ -107,7 +111,7 @@ def train_classification_model(model_name: str) -> bool: # convert model to tflite converter = tf.lite.TFLiteConverter.from_keras_model(model) converter.optimizations = [tf.lite.Optimize.DEFAULT] - converter.representative_dataset = generate_representative_dataset_factory( + converter.representative_dataset = __generate_representative_dataset_factory( dataset_dir ) converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8] @@ -122,3 +126,42 @@ def train_classification_model(model_name: str) -> bool: # restore original stdout / stderr sys.stdout = original_stdout sys.stderr = original_stderr + + +@staticmethod +def kickoff_model_training( + embeddingRequestor: EmbeddingsRequestor, model_name: str +) -> None: + requestor = InterProcessRequestor() + requestor.send_data( + UPDATE_MODEL_STATE, + { + "model": model_name, + "state": ModelStatusTypesEnum.training, + }, + ) + + # run training in sub process so that + # tensorflow will free CPU / GPU memory + # upon training completion + training_process = Process( + target=__train_classification_model, + name=f"model_training:{model_name}", + args=(model_name,), + ) + training_process.start() + training_process.join() + + # reload model and mark training as complete + embeddingRequestor.send_data( + EmbeddingsRequestEnum.reload_classification_model.value, + {"model_name": model_name}, + ) + requestor.send_data( + UPDATE_MODEL_STATE, + { + "model": model_name, + "state": ModelStatusTypesEnum.complete, + }, + ) + requestor.stop() From faadea8e1fe4af2b77468f23d32fee259a459ee3 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Wed, 11 Jun 2025 11:25:30 -0600 Subject: [PATCH 055/144] Dynamic Management of Cameras (#18671) * Add base class for global config updates * Add or remove camera states * Move camera process management to separate thread * Move camera management fully to separate class * Cleanup * Stop camera processes when stop command is sent * Start processes dynamically when needed * Adjust * Leave extra room in tracked object queue for two cameras * Dynamically set extra config pieces * Add some TODOs * Fix type check * Simplify config updates * Improve typing * Correctly handle indexed entries * Cleanup * Create out SHM * Use ZMQ for signaling object detectoin is completed * Get camera correctly created * Cleanup for updating the cameras config * Cleanup * Don't enable audio if no cameras have audio transcription * Use exact string so similar camera names don't interfere * Add ability to update config via json body to config/set endpoint Additionally, update the config in a single rather than multiple calls for each updated key * fix autotracking calibration to support new config updater function --------- Co-authored-by: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> --- frigate/api/app.py | 45 +++- frigate/api/defs/request/app_body.py | 3 +- frigate/app.py | 166 +++------------ frigate/camera/activity_manager.py | 35 +-- frigate/camera/maintainer.py | 248 ++++++++++++++++++++++ frigate/comms/object_detector_signaler.py | 21 ++ frigate/comms/webpush.py | 9 +- frigate/config/camera/updater.py | 19 +- frigate/embeddings/maintainer.py | 11 + frigate/events/audio.py | 20 +- frigate/object_detection/base.py | 41 ++-- frigate/output/output.py | 12 +- frigate/ptz/autotrack.py | 11 +- frigate/record/maintainer.py | 4 +- frigate/review/maintainer.py | 3 + frigate/track/object_processing.py | 52 +++-- frigate/util/builtin.py | 53 +++-- frigate/video.py | 8 +- 18 files changed, 533 insertions(+), 228 deletions(-) create mode 100644 frigate/camera/maintainer.py create mode 100644 frigate/comms/object_detector_signaler.py diff --git a/frigate/api/app.py b/frigate/api/app.py index 351518673..d9e573d29 100644 --- a/frigate/api/app.py +++ b/frigate/api/app.py @@ -6,6 +6,7 @@ import json import logging import os import traceback +import urllib from datetime import datetime, timedelta from functools import reduce from io import StringIO @@ -36,8 +37,10 @@ from frigate.models import Event, Timeline from frigate.stats.prometheus import get_metrics, update_metrics from frigate.util.builtin import ( clean_camera_user_pass, + flatten_config_data, get_tz_modifiers, - update_yaml_from_url, + process_config_query_string, + update_yaml_file_bulk, ) from frigate.util.config import find_config_file from frigate.util.services import ( @@ -358,14 +361,37 @@ def config_set(request: Request, body: AppConfigSetBody): with open(config_file, "r") as f: old_raw_config = f.read() - f.close() try: - update_yaml_from_url(config_file, str(request.url)) + updates = {} + + # process query string parameters (takes precedence over body.config_data) + parsed_url = urllib.parse.urlparse(str(request.url)) + query_string = urllib.parse.parse_qs(parsed_url.query, keep_blank_values=True) + + # Filter out empty keys but keep blank values for non-empty keys + query_string = {k: v for k, v in query_string.items() if k} + + if query_string: + updates = process_config_query_string(query_string) + elif body.config_data: + updates = flatten_config_data(body.config_data) + + if not updates: + return JSONResponse( + content=( + {"success": False, "message": "No configuration data provided"} + ), + status_code=400, + ) + + # apply all updates in a single operation + update_yaml_file_bulk(config_file, updates) + + # validate the updated config with open(config_file, "r") as f: new_raw_config = f.read() - f.close() - # Validate the config schema + try: config = FrigateConfig.parse(new_raw_config) except Exception: @@ -390,12 +416,19 @@ def config_set(request: Request, body: AppConfigSetBody): ) if body.requires_restart == 0 or body.update_topic: + old_config: FrigateConfig = request.app.frigate_config request.app.frigate_config = config if body.update_topic and body.update_topic.startswith("config/cameras/"): _, _, camera, field = body.update_topic.split("/") - settings = config.get_nested_object(body.update_topic) + if field == "add": + settings = config.cameras[camera] + elif field == "remove": + settings = old_config.cameras[camera] + else: + settings = config.get_nested_object(body.update_topic) + request.app.config_publisher.publish_update( CameraConfigUpdateTopic(CameraConfigUpdateEnum[field], camera), settings, diff --git a/frigate/api/defs/request/app_body.py b/frigate/api/defs/request/app_body.py index 7456a6c77..7f8ca40ec 100644 --- a/frigate/api/defs/request/app_body.py +++ b/frigate/api/defs/request/app_body.py @@ -1,4 +1,4 @@ -from typing import Optional +from typing import Any, Dict, Optional from pydantic import BaseModel @@ -6,6 +6,7 @@ from pydantic import BaseModel class AppConfigSetBody(BaseModel): requires_restart: int = 1 update_topic: str | None = None + config_data: Optional[Dict[str, Any]] = None class AppPutPasswordBody(BaseModel): diff --git a/frigate/app.py b/frigate/app.py index f534de6e0..186ed1195 100644 --- a/frigate/app.py +++ b/frigate/app.py @@ -18,6 +18,7 @@ import frigate.util as util from frigate.api.auth import hash_password from frigate.api.fastapi_app import create_fastapi_app from frigate.camera import CameraMetrics, PTZMetrics +from frigate.camera.maintainer import CameraMaintainer from frigate.comms.base_communicator import Communicator from frigate.comms.dispatcher import Dispatcher from frigate.comms.event_metadata_updater import EventMetadataPublisher @@ -36,7 +37,6 @@ from frigate.const import ( FACE_DIR, MODEL_CACHE_DIR, RECORD_DIR, - SHM_FRAMES_VAR, THUMB_DIR, ) from frigate.data_processing.types import DataProcessorMetrics @@ -71,11 +71,9 @@ from frigate.storage import StorageMaintainer from frigate.timeline import TimelineProcessor from frigate.track.object_processing import TrackedObjectProcessor from frigate.util.builtin import empty_and_close_queue -from frigate.util.image import SharedMemoryFrameManager, UntrackedSharedMemory -from frigate.util.object import get_camera_regions_grid +from frigate.util.image import UntrackedSharedMemory from frigate.util.services import set_file_limit from frigate.version import VERSION -from frigate.video import capture_camera, track_camera from frigate.watchdog import FrigateWatchdog logger = logging.getLogger(__name__) @@ -87,7 +85,6 @@ class FrigateApp: self.stop_event: MpEvent = mp.Event() self.detection_queue: Queue = mp.Queue() self.detectors: dict[str, ObjectDetectProcess] = {} - self.detection_out_events: dict[str, MpEvent] = {} self.detection_shms: list[mp.shared_memory.SharedMemory] = [] self.log_queue: Queue = mp.Queue() self.camera_metrics: dict[str, CameraMetrics] = {} @@ -104,8 +101,6 @@ class FrigateApp: self.ptz_metrics: dict[str, PTZMetrics] = {} self.processes: dict[str, int] = {} self.embeddings: Optional[EmbeddingsContext] = None - self.region_grids: dict[str, list[list[dict[str, int]]]] = {} - self.frame_manager = SharedMemoryFrameManager() self.config = config def ensure_dirs(self) -> None: @@ -141,8 +136,16 @@ class FrigateApp: def init_queues(self) -> None: # Queue for cameras to push tracked objects to + # leaving room for 2 extra cameras to be added self.detected_frames_queue: Queue = mp.Queue( - maxsize=sum(camera.enabled for camera in self.config.cameras.values()) * 2 + maxsize=( + sum( + camera.enabled_in_config == True + for camera in self.config.cameras.values() + ) + + 2 + ) + * 2 ) # Queue for timeline events @@ -279,7 +282,9 @@ class FrigateApp: "synchronous": "NORMAL", # Safe when using WAL https://www.sqlite.org/pragma.html#pragma_synchronous }, timeout=max( - 60, 10 * len([c for c in self.config.cameras.values() if c.enabled]) + 60, + 10 + * len([c for c in self.config.cameras.values() if c.enabled_in_config]), ), load_vec_extension=self.config.semantic_search.enabled, ) @@ -309,7 +314,9 @@ class FrigateApp: def init_embeddings_client(self) -> None: genai_cameras = [ - c for c in self.config.cameras.values() if c.enabled and c.genai.enabled + c + for c in self.config.cameras.values() + if c.enabled_in_config and c.genai.enabled ] if ( @@ -358,8 +365,6 @@ class FrigateApp: def start_detectors(self) -> None: for name in self.config.cameras.keys(): - self.detection_out_events[name] = mp.Event() - try: largest_frame = max( [ @@ -391,7 +396,7 @@ class FrigateApp: self.detectors[name] = ObjectDetectProcess( name, self.detection_queue, - self.detection_out_events, + list(self.config.cameras.keys()), detector_config, ) @@ -426,69 +431,16 @@ class FrigateApp: output_processor.start() logger.info(f"Output process started: {output_processor.pid}") - def init_historical_regions(self) -> None: - # delete region grids for removed or renamed cameras - cameras = list(self.config.cameras.keys()) - Regions.delete().where(~(Regions.camera << cameras)).execute() - - # create or update region grids for each camera - for camera in self.config.cameras.values(): - assert camera.name is not None - self.region_grids[camera.name] = get_camera_regions_grid( - camera.name, - camera.detect, - max(self.config.model.width, self.config.model.height), - ) - - def start_camera_processors(self) -> None: - for name, config in self.config.cameras.items(): - if not self.config.cameras[name].enabled_in_config: - logger.info(f"Camera processor not started for disabled camera {name}") - continue - - camera_process = util.Process( - target=track_camera, - name=f"camera_processor:{name}", - args=( - name, - config, - self.config.model, - self.config.model.merged_labelmap, - self.detection_queue, - self.detection_out_events[name], - self.detected_frames_queue, - self.camera_metrics[name], - self.ptz_metrics[name], - self.region_grids[name], - ), - daemon=True, - ) - self.camera_metrics[name].process = camera_process - camera_process.start() - logger.info(f"Camera processor started for {name}: {camera_process.pid}") - - def start_camera_capture_processes(self) -> None: - shm_frame_count = self.shm_frame_count() - - for name, config in self.config.cameras.items(): - if not self.config.cameras[name].enabled_in_config: - logger.info(f"Capture process not started for disabled camera {name}") - continue - - # pre-create shms - for i in range(shm_frame_count): - frame_size = config.frame_shape_yuv[0] * config.frame_shape_yuv[1] - self.frame_manager.create(f"{config.name}_frame{i}", frame_size) - - capture_process = util.Process( - target=capture_camera, - name=f"camera_capture:{name}", - args=(config, shm_frame_count, self.camera_metrics[name]), - ) - capture_process.daemon = True - self.camera_metrics[name].capture_process = capture_process - capture_process.start() - logger.info(f"Capture process started for {name}: {capture_process.pid}") + def start_camera_processor(self) -> None: + self.camera_maintainer = CameraMaintainer( + self.config, + self.detection_queue, + self.detected_frames_queue, + self.camera_metrics, + self.ptz_metrics, + self.stop_event, + ) + self.camera_maintainer.start() def start_audio_processor(self) -> None: audio_cameras = [ @@ -548,45 +500,6 @@ class FrigateApp: self.frigate_watchdog = FrigateWatchdog(self.detectors, self.stop_event) self.frigate_watchdog.start() - def shm_frame_count(self) -> int: - total_shm = round(shutil.disk_usage("/dev/shm").total / pow(2, 20), 1) - - # required for log files + nginx cache - min_req_shm = 40 + 10 - - if self.config.birdseye.restream: - min_req_shm += 8 - - available_shm = total_shm - min_req_shm - cam_total_frame_size = 0.0 - - for camera in self.config.cameras.values(): - if camera.enabled and camera.detect.width and camera.detect.height: - cam_total_frame_size += round( - (camera.detect.width * camera.detect.height * 1.5 + 270480) - / 1048576, - 1, - ) - - if cam_total_frame_size == 0.0: - return 0 - - shm_frame_count = min( - int(os.environ.get(SHM_FRAMES_VAR, "50")), - int(available_shm / (cam_total_frame_size)), - ) - - logger.debug( - f"Calculated total camera size {available_shm} / {cam_total_frame_size} :: {shm_frame_count} frames for each camera in SHM" - ) - - if shm_frame_count < 20: - logger.warning( - f"The current SHM size of {total_shm}MB is too small, recommend increasing it to at least {round(min_req_shm + cam_total_frame_size * 20)}MB." - ) - - return shm_frame_count - def init_auth(self) -> None: if self.config.auth.enabled: if User.select().count() == 0: @@ -656,10 +569,8 @@ class FrigateApp: self.init_embeddings_client() self.start_video_output_processor() self.start_ptz_autotracker() - self.init_historical_regions() self.start_detected_frames_processor() - self.start_camera_processors() - self.start_camera_capture_processes() + self.start_camera_processor() self.start_audio_processor() self.start_storage_maintainer() self.start_stats_emitter() @@ -716,24 +627,6 @@ class FrigateApp: if self.onvif_controller: self.onvif_controller.close() - # ensure the capture processes are done - for camera, metrics in self.camera_metrics.items(): - capture_process = metrics.capture_process - if capture_process is not None: - logger.info(f"Waiting for capture process for {camera} to stop") - capture_process.terminate() - capture_process.join() - - # ensure the camera processors are done - for camera, metrics in self.camera_metrics.items(): - camera_process = metrics.process - if camera_process is not None: - logger.info(f"Waiting for process for {camera} to stop") - camera_process.terminate() - camera_process.join() - logger.info(f"Closing frame queue for {camera}") - empty_and_close_queue(metrics.frame_queue) - # ensure the detectors are done for detector in self.detectors.values(): detector.stop() @@ -778,7 +671,6 @@ class FrigateApp: self.event_metadata_updater.stop() self.inter_zmq_proxy.stop() - self.frame_manager.cleanup() while len(self.detection_shms) > 0: shm = self.detection_shms.pop() shm.close() diff --git a/frigate/camera/activity_manager.py b/frigate/camera/activity_manager.py index 6039a07f6..e10730931 100644 --- a/frigate/camera/activity_manager.py +++ b/frigate/camera/activity_manager.py @@ -3,7 +3,7 @@ from collections import Counter from typing import Any, Callable -from frigate.config.config import FrigateConfig +from frigate.config import CameraConfig, FrigateConfig class CameraActivityManager: @@ -23,26 +23,33 @@ class CameraActivityManager: if not camera_config.enabled_in_config: continue - self.last_camera_activity[camera_config.name] = {} - self.camera_all_object_counts[camera_config.name] = Counter() - self.camera_active_object_counts[camera_config.name] = Counter() + self.__init_camera(camera_config) - for zone, zone_config in camera_config.zones.items(): - if zone not in self.all_zone_labels: - self.zone_all_object_counts[zone] = Counter() - self.zone_active_object_counts[zone] = Counter() - self.all_zone_labels[zone] = set() + def __init_camera(self, camera_config: CameraConfig) -> None: + self.last_camera_activity[camera_config.name] = {} + self.camera_all_object_counts[camera_config.name] = Counter() + self.camera_active_object_counts[camera_config.name] = Counter() - self.all_zone_labels[zone].update( - zone_config.objects - if zone_config.objects - else camera_config.objects.track - ) + for zone, zone_config in camera_config.zones.items(): + if zone not in self.all_zone_labels: + self.zone_all_object_counts[zone] = Counter() + self.zone_active_object_counts[zone] = Counter() + self.all_zone_labels[zone] = set() + + self.all_zone_labels[zone].update( + zone_config.objects + if zone_config.objects + else camera_config.objects.track + ) def update_activity(self, new_activity: dict[str, dict[str, Any]]) -> None: all_objects: list[dict[str, Any]] = [] for camera in new_activity.keys(): + # handle cameras that were added dynamically + if camera not in self.camera_all_object_counts: + self.__init_camera(self.config.cameras[camera]) + new_objects = new_activity[camera].get("objects", []) all_objects.extend(new_objects) diff --git a/frigate/camera/maintainer.py b/frigate/camera/maintainer.py new file mode 100644 index 000000000..6abeb762e --- /dev/null +++ b/frigate/camera/maintainer.py @@ -0,0 +1,248 @@ +"""Create and maintain camera processes / management.""" + +import logging +import os +import shutil +import threading +from multiprocessing import Queue +from multiprocessing.synchronize import Event as MpEvent + +from frigate.camera import CameraMetrics, PTZMetrics +from frigate.config import FrigateConfig +from frigate.config.camera import CameraConfig +from frigate.config.camera.updater import ( + CameraConfigUpdateEnum, + CameraConfigUpdateSubscriber, +) +from frigate.const import SHM_FRAMES_VAR +from frigate.models import Regions +from frigate.util import Process as FrigateProcess +from frigate.util.builtin import empty_and_close_queue +from frigate.util.image import SharedMemoryFrameManager, UntrackedSharedMemory +from frigate.util.object import get_camera_regions_grid +from frigate.video import capture_camera, track_camera + +logger = logging.getLogger(__name__) + + +class CameraMaintainer(threading.Thread): + def __init__( + self, + config: FrigateConfig, + detection_queue: Queue, + detected_frames_queue: Queue, + camera_metrics: dict[str, CameraMetrics], + ptz_metrics: dict[str, PTZMetrics], + stop_event: MpEvent, + ): + super().__init__(name="camera_processor") + self.config = config + self.detection_queue = detection_queue + self.detected_frames_queue = detected_frames_queue + self.stop_event = stop_event + self.camera_metrics = camera_metrics + self.ptz_metrics = ptz_metrics + self.frame_manager = SharedMemoryFrameManager() + self.region_grids: dict[str, list[list[dict[str, int]]]] = {} + self.update_subscriber = CameraConfigUpdateSubscriber( + self.config, + {}, + [ + CameraConfigUpdateEnum.add, + CameraConfigUpdateEnum.remove, + ], + ) + self.shm_count = self.__calculate_shm_frame_count() + + def __init_historical_regions(self) -> None: + # delete region grids for removed or renamed cameras + cameras = list(self.config.cameras.keys()) + Regions.delete().where(~(Regions.camera << cameras)).execute() + + # create or update region grids for each camera + for camera in self.config.cameras.values(): + assert camera.name is not None + self.region_grids[camera.name] = get_camera_regions_grid( + camera.name, + camera.detect, + max(self.config.model.width, self.config.model.height), + ) + + def __calculate_shm_frame_count(self) -> int: + total_shm = round(shutil.disk_usage("/dev/shm").total / pow(2, 20), 1) + + # required for log files + nginx cache + min_req_shm = 40 + 10 + + if self.config.birdseye.restream: + min_req_shm += 8 + + available_shm = total_shm - min_req_shm + cam_total_frame_size = 0.0 + + for camera in self.config.cameras.values(): + if ( + camera.enabled_in_config + and camera.detect.width + and camera.detect.height + ): + cam_total_frame_size += round( + (camera.detect.width * camera.detect.height * 1.5 + 270480) + / 1048576, + 1, + ) + + # leave room for 2 cameras that are added dynamically, if a user wants to add more cameras they may need to increase the SHM size and restart after adding them. + cam_total_frame_size += 2 * round( + (camera.detect.width * camera.detect.height * 1.5 + 270480) / 1048576, + 1, + ) + + if cam_total_frame_size == 0.0: + return 0 + + shm_frame_count = min( + int(os.environ.get(SHM_FRAMES_VAR, "50")), + int(available_shm / (cam_total_frame_size)), + ) + + logger.debug( + f"Calculated total camera size {available_shm} / {cam_total_frame_size} :: {shm_frame_count} frames for each camera in SHM" + ) + + if shm_frame_count < 20: + logger.warning( + f"The current SHM size of {total_shm}MB is too small, recommend increasing it to at least {round(min_req_shm + cam_total_frame_size * 20)}MB." + ) + + return shm_frame_count + + def __start_camera_processor( + self, name: str, config: CameraConfig, runtime: bool = False + ) -> None: + if not config.enabled_in_config: + logger.info(f"Camera processor not started for disabled camera {name}") + return + + if runtime: + self.camera_metrics[name] = CameraMetrics() + self.ptz_metrics[name] = PTZMetrics(autotracker_enabled=False) + self.region_grids[name] = get_camera_regions_grid( + name, + config.detect, + max(self.config.model.width, self.config.model.height), + ) + + try: + largest_frame = max( + [ + det.model.height * det.model.width * 3 + if det.model is not None + else 320 + for det in self.config.detectors.values() + ] + ) + UntrackedSharedMemory(name=f"out-{name}", create=True, size=20 * 6 * 4) + UntrackedSharedMemory( + name=name, + create=True, + size=largest_frame, + ) + except FileExistsError: + pass + + camera_process = FrigateProcess( + target=track_camera, + name=f"camera_processor:{name}", + args=( + config.name, + config, + self.config.model, + self.config.model.merged_labelmap, + self.detection_queue, + self.detected_frames_queue, + self.camera_metrics[name], + self.ptz_metrics[name], + self.region_grids[name], + ), + daemon=True, + ) + self.camera_metrics[config.name].process = camera_process + camera_process.start() + logger.info(f"Camera processor started for {config.name}: {camera_process.pid}") + + def __start_camera_capture( + self, name: str, config: CameraConfig, runtime: bool = False + ) -> None: + if not config.enabled_in_config: + logger.info(f"Capture process not started for disabled camera {name}") + return + + # pre-create shms + for i in range(10 if runtime else self.shm_count): + frame_size = config.frame_shape_yuv[0] * config.frame_shape_yuv[1] + self.frame_manager.create(f"{config.name}_frame{i}", frame_size) + + capture_process = FrigateProcess( + target=capture_camera, + name=f"camera_capture:{name}", + args=(config, self.shm_count, self.camera_metrics[name]), + ) + capture_process.daemon = True + self.camera_metrics[name].capture_process = capture_process + capture_process.start() + logger.info(f"Capture process started for {name}: {capture_process.pid}") + + def __stop_camera_capture_process(self, camera: str) -> None: + capture_process = self.camera_metrics[camera].capture_process + if capture_process is not None: + logger.info(f"Waiting for capture process for {camera} to stop") + capture_process.terminate() + capture_process.join() + + def __stop_camera_process(self, camera: str) -> None: + metrics = self.camera_metrics[camera] + camera_process = metrics.process + if camera_process is not None: + logger.info(f"Waiting for process for {camera} to stop") + camera_process.terminate() + camera_process.join() + logger.info(f"Closing frame queue for {camera}") + empty_and_close_queue(metrics.frame_queue) + + def run(self): + self.__init_historical_regions() + + # start camera processes + for camera, config in self.config.cameras.items(): + self.__start_camera_processor(camera, config) + self.__start_camera_capture(camera, config) + + while not self.stop_event.wait(1): + updates = self.update_subscriber.check_for_updates() + + for update_type, updated_cameras in updates.items(): + if update_type == CameraConfigUpdateEnum.add.name: + for camera in updated_cameras: + self.__start_camera_processor( + camera, + self.update_subscriber.camera_configs[camera], + runtime=True, + ) + self.__start_camera_capture( + camera, self.update_subscriber.camera_configs[camera] + ) + elif update_type == CameraConfigUpdateEnum.remove.name: + self.__stop_camera_capture_process(camera) + self.__stop_camera_process(camera) + + # ensure the capture processes are done + for camera in self.camera_metrics.keys(): + self.__stop_camera_capture_process(camera) + + # ensure the camera processors are done + for camera in self.camera_metrics.keys(): + self.__stop_camera_process(camera) + + self.update_subscriber.stop() + self.frame_manager.cleanup() diff --git a/frigate/comms/object_detector_signaler.py b/frigate/comms/object_detector_signaler.py new file mode 100644 index 000000000..befc83e4d --- /dev/null +++ b/frigate/comms/object_detector_signaler.py @@ -0,0 +1,21 @@ +"""Facilitates communication between processes for object detection signals.""" + +from .zmq_proxy import Publisher, Subscriber + + +class ObjectDetectorPublisher(Publisher): + """Publishes signal for object detection to different processes.""" + + topic_base = "object_detector/" + + +class ObjectDetectorSubscriber(Subscriber): + """Simplifies receiving a signal for object detection.""" + + topic_base = "object_detector/" + + def __init__(self, topic: str) -> None: + super().__init__(topic) + + def check_for_update(self): + return super().check_for_update(timeout=5) diff --git a/frigate/comms/webpush.py b/frigate/comms/webpush.py index d93c3169b..7bc66f3b7 100644 --- a/frigate/comms/webpush.py +++ b/frigate/comms/webpush.py @@ -81,7 +81,7 @@ class WebPushClient(Communicator): # type: ignore[misc] "config/notifications", exact=True ) self.config_subscriber = CameraConfigUpdateSubscriber( - self.config.cameras, [CameraConfigUpdateEnum.notifications] + self.config, self.config.cameras, [CameraConfigUpdateEnum.notifications] ) def subscribe(self, receiver: Callable) -> None: @@ -170,7 +170,12 @@ class WebPushClient(Communicator): # type: ignore[misc] if updated_notification_config: self.config.notifications = updated_notification_config - self.config_subscriber.check_for_updates() + updates = self.config_subscriber.check_for_updates() + + if "add" in updates: + for camera in updates["add"]: + self.suspended_cameras[camera] = 0 + self.last_camera_notification_time[camera] = 0 if topic == "reviews": decoded = json.loads(payload) diff --git a/frigate/config/camera/updater.py b/frigate/config/camera/updater.py index 5ddc26d44..83536fc46 100644 --- a/frigate/config/camera/updater.py +++ b/frigate/config/camera/updater.py @@ -5,12 +5,13 @@ from enum import Enum from typing import Any from frigate.comms.config_updater import ConfigPublisher, ConfigSubscriber -from frigate.config import CameraConfig +from frigate.config import CameraConfig, FrigateConfig class CameraConfigUpdateEnum(str, Enum): """Supported camera config update types.""" + add = "add" # for adding a camera audio = "audio" audio_transcription = "audio_transcription" birdseye = "birdseye" @@ -20,6 +21,7 @@ class CameraConfigUpdateEnum(str, Enum): notifications = "notifications" objects = "objects" record = "record" + remove = "remove" # for removing a camera review = "review" snapshots = "snapshots" zones = "zones" @@ -49,9 +51,11 @@ class CameraConfigUpdatePublisher: class CameraConfigUpdateSubscriber: def __init__( self, + config: FrigateConfig | None, camera_configs: dict[str, CameraConfig], topics: list[CameraConfigUpdateEnum], ): + self.config = config self.camera_configs = camera_configs self.topics = topics @@ -68,14 +72,23 @@ class CameraConfigUpdateSubscriber: def __update_config( self, camera: str, update_type: CameraConfigUpdateEnum, updated_config: Any ) -> None: - config = self.camera_configs[camera] + if update_type == CameraConfigUpdateEnum.add: + self.config.cameras[camera] = updated_config + self.camera_configs[camera] = updated_config + return + elif update_type == CameraConfigUpdateEnum.remove: + self.config.cameras.pop(camera) + self.camera_configs.pop(camera) + return + + config = self.camera_configs.get(camera) if not config: return if update_type == CameraConfigUpdateEnum.audio: config.audio = updated_config - if update_type == CameraConfigUpdateEnum.audio_transcription: + elif update_type == CameraConfigUpdateEnum.audio_transcription: config.audio_transcription = updated_config elif update_type == CameraConfigUpdateEnum.birdseye: config.birdseye = updated_config diff --git a/frigate/embeddings/maintainer.py b/frigate/embeddings/maintainer.py index ce81c2bc4..0980a8ae8 100644 --- a/frigate/embeddings/maintainer.py +++ b/frigate/embeddings/maintainer.py @@ -29,6 +29,10 @@ from frigate.comms.recordings_updater import ( ) from frigate.config import FrigateConfig from frigate.config.camera.camera import CameraTypeEnum +from frigate.config.camera.updater import ( + CameraConfigUpdateEnum, + CameraConfigUpdateSubscriber, +) from frigate.const import ( CLIPS_DIR, UPDATE_EVENT_DESCRIPTION, @@ -87,6 +91,11 @@ class EmbeddingMaintainer(threading.Thread): self.config = config self.metrics = metrics self.embeddings = None + self.config_updater = CameraConfigUpdateSubscriber( + self.config, + self.config.cameras, + [CameraConfigUpdateEnum.add, CameraConfigUpdateEnum.remove], + ) if config.semantic_search.enabled: self.embeddings = Embeddings(config, db, metrics) @@ -198,6 +207,7 @@ class EmbeddingMaintainer(threading.Thread): def run(self) -> None: """Maintain a SQLite-vec database for semantic search.""" while not self.stop_event.is_set(): + self.config_updater.check_for_updates() self._process_requests() self._process_updates() self._process_recordings_updates() @@ -206,6 +216,7 @@ class EmbeddingMaintainer(threading.Thread): self._process_finalized() self._process_event_metadata() + self.config_updater.stop() self.event_subscriber.stop() self.event_end_subscriber.stop() self.recordings_subscriber.stop() diff --git a/frigate/events/audio.py b/frigate/events/audio.py index aeeaf3b4f..797a767ba 100644 --- a/frigate/events/audio.py +++ b/frigate/events/audio.py @@ -90,10 +90,19 @@ class AudioProcessor(util.Process): self.camera_metrics = camera_metrics self.cameras = cameras self.config = config - self.transcription_model_runner = AudioTranscriptionModelRunner( - self.config.audio_transcription.device, - self.config.audio_transcription.model_size, - ) + + if any( + [ + conf.audio_transcription.enabled_in_config + for conf in config.cameras.values() + ] + ): + self.transcription_model_runner = AudioTranscriptionModelRunner( + self.config.audio_transcription.device, + self.config.audio_transcription.model_size, + ) + else: + self.transcription_model_runner = None def run(self) -> None: audio_threads: list[AudioEventMaintainer] = [] @@ -138,7 +147,7 @@ class AudioEventMaintainer(threading.Thread): camera: CameraConfig, config: FrigateConfig, camera_metrics: dict[str, CameraMetrics], - audio_transcription_model_runner: AudioTranscriptionModelRunner, + audio_transcription_model_runner: AudioTranscriptionModelRunner | None, stop_event: threading.Event, ) -> None: super().__init__(name=f"{camera.name}_audio_event_processor") @@ -162,6 +171,7 @@ class AudioEventMaintainer(threading.Thread): # create communication for audio detections self.requestor = InterProcessRequestor() self.config_subscriber = CameraConfigUpdateSubscriber( + None, {self.camera_config.name: self.camera_config}, [ CameraConfigUpdateEnum.audio, diff --git a/frigate/object_detection/base.py b/frigate/object_detection/base.py index c77a720a0..86febc6a7 100644 --- a/frigate/object_detection/base.py +++ b/frigate/object_detection/base.py @@ -13,6 +13,10 @@ import numpy as np from setproctitle import setproctitle import frigate.util as util +from frigate.comms.object_detector_signaler import ( + ObjectDetectorPublisher, + ObjectDetectorSubscriber, +) from frigate.detectors import create_detector from frigate.detectors.detector_config import ( BaseDetectorConfig, @@ -89,7 +93,7 @@ class LocalObjectDetector(ObjectDetector): def run_detector( name: str, detection_queue: Queue, - out_events: dict[str, MpEvent], + cameras: list[str], avg_speed: Value, start: Value, detector_config: BaseDetectorConfig, @@ -108,15 +112,19 @@ def run_detector( signal.signal(signal.SIGTERM, receiveSignal) signal.signal(signal.SIGINT, receiveSignal) - frame_manager = SharedMemoryFrameManager() - object_detector = LocalObjectDetector(detector_config=detector_config) - - outputs = {} - for name in out_events.keys(): + def create_output_shm(name: str): out_shm = UntrackedSharedMemory(name=f"out-{name}", create=False) out_np = np.ndarray((20, 6), dtype=np.float32, buffer=out_shm.buf) outputs[name] = {"shm": out_shm, "np": out_np} + frame_manager = SharedMemoryFrameManager() + object_detector = LocalObjectDetector(detector_config=detector_config) + detector_publisher = ObjectDetectorPublisher() + + outputs = {} + for name in cameras: + create_output_shm(name) + while not stop_event.is_set(): try: connection_id = detection_queue.get(timeout=1) @@ -136,12 +144,18 @@ def run_detector( detections = object_detector.detect_raw(input_frame) duration = datetime.datetime.now().timestamp() - start.value frame_manager.close(connection_id) + + if connection_id not in outputs: + create_output_shm(connection_id) + outputs[connection_id]["np"][:] = detections[:] - out_events[connection_id].set() + signal_id = f"{connection_id}/update" + detector_publisher.publish(signal_id, signal_id) start.value = 0.0 avg_speed.value = (avg_speed.value * 9 + duration) / 10 + detector_publisher.stop() logger.info("Exited detection process...") @@ -150,11 +164,11 @@ class ObjectDetectProcess: self, name: str, detection_queue: Queue, - out_events: dict[str, MpEvent], + cameras: list[str], detector_config: BaseDetectorConfig, ): self.name = name - self.out_events = out_events + self.cameras = cameras self.detection_queue = detection_queue self.avg_inference_speed = Value("d", 0.01) self.detection_start = Value("d", 0.0) @@ -185,7 +199,7 @@ class ObjectDetectProcess: args=( self.name, self.detection_queue, - self.out_events, + self.cameras, self.avg_inference_speed, self.detection_start, self.detector_config, @@ -201,7 +215,6 @@ class RemoteObjectDetector: name: str, labels: dict[int, str], detection_queue: Queue, - event: MpEvent, model_config: ModelConfig, stop_event: MpEvent, ): @@ -209,7 +222,6 @@ class RemoteObjectDetector: self.name = name self.fps = EventsPerSecond() self.detection_queue = detection_queue - self.event = event self.stop_event = stop_event self.shm = UntrackedSharedMemory(name=self.name, create=False) self.np_shm = np.ndarray( @@ -219,6 +231,7 @@ class RemoteObjectDetector: ) self.out_shm = UntrackedSharedMemory(name=f"out-{self.name}", create=False) self.out_np_shm = np.ndarray((20, 6), dtype=np.float32, buffer=self.out_shm.buf) + self.detector_subscriber = ObjectDetectorSubscriber(f"{name}/update") def detect(self, tensor_input, threshold=0.4): detections = [] @@ -228,9 +241,8 @@ class RemoteObjectDetector: # copy input to shared memory self.np_shm[:] = tensor_input[:] - self.event.clear() self.detection_queue.put(self.name) - result = self.event.wait(timeout=5.0) + result = self.detector_subscriber.check_for_update() # if it timed out if result is None: @@ -246,5 +258,6 @@ class RemoteObjectDetector: return detections def cleanup(self): + self.detector_subscriber.stop() self.shm.unlink() self.out_shm.unlink() diff --git a/frigate/output/output.py b/frigate/output/output.py index 6decf0005..d323596fe 100644 --- a/frigate/output/output.py +++ b/frigate/output/output.py @@ -103,8 +103,10 @@ def output_frames( detection_subscriber = DetectionSubscriber(DetectionTypeEnum.video) config_subscriber = CameraConfigUpdateSubscriber( + config, config.cameras, [ + CameraConfigUpdateEnum.add, CameraConfigUpdateEnum.birdseye, CameraConfigUpdateEnum.enabled, CameraConfigUpdateEnum.record, @@ -135,7 +137,15 @@ def output_frames( while not stop_event.is_set(): # check if there is an updated config - config_subscriber.check_for_updates() + updates = config_subscriber.check_for_updates() + + if "add" in updates: + for camera in updates["add"]: + jsmpeg_cameras[camera] = JsmpegCamera( + cam_config, stop_event, websocket_server + ) + preview_recorders[camera] = PreviewRecorder(cam_config) + preview_write_times[camera] = 0 (topic, data) = detection_subscriber.check_for_update(timeout=1) now = datetime.datetime.now().timestamp() diff --git a/frigate/ptz/autotrack.py b/frigate/ptz/autotrack.py index f38bf1f5f..f0d8419dd 100644 --- a/frigate/ptz/autotrack.py +++ b/frigate/ptz/autotrack.py @@ -31,7 +31,7 @@ from frigate.const import ( ) from frigate.ptz.onvif import OnvifController from frigate.track.tracked_object import TrackedObject -from frigate.util.builtin import update_yaml_file +from frigate.util.builtin import update_yaml_file_bulk from frigate.util.config import find_config_file from frigate.util.image import SharedMemoryFrameManager, intersection_over_union @@ -348,10 +348,13 @@ class PtzAutoTracker: f"{camera}: Writing new config with autotracker motion coefficients: {self.config.cameras[camera].onvif.autotracking.movement_weights}" ) - update_yaml_file( + update_yaml_file_bulk( config_file, - ["cameras", camera, "onvif", "autotracking", "movement_weights"], - self.config.cameras[camera].onvif.autotracking.movement_weights, + { + f"cameras.{camera}.onvif.autotracking.movement_weights": self.config.cameras[ + camera + ].onvif.autotracking.movement_weights + }, ) async def _calibrate_camera(self, camera): diff --git a/frigate/record/maintainer.py b/frigate/record/maintainer.py index ace9a5d24..0883437da 100644 --- a/frigate/record/maintainer.py +++ b/frigate/record/maintainer.py @@ -75,7 +75,9 @@ class RecordingMaintainer(threading.Thread): # create communication for retained recordings self.requestor = InterProcessRequestor() self.config_subscriber = CameraConfigUpdateSubscriber( - self.config.cameras, [CameraConfigUpdateEnum.record] + self.config, + self.config.cameras, + [CameraConfigUpdateEnum.add, CameraConfigUpdateEnum.record], ) self.detection_subscriber = DetectionSubscriber(DetectionTypeEnum.all) self.recordings_publisher = RecordingsDataPublisher( diff --git a/frigate/review/maintainer.py b/frigate/review/maintainer.py index 7f60a0209..778717db3 100644 --- a/frigate/review/maintainer.py +++ b/frigate/review/maintainer.py @@ -154,10 +154,13 @@ class ReviewSegmentMaintainer(threading.Thread): # create communication for review segments self.requestor = InterProcessRequestor() self.config_subscriber = CameraConfigUpdateSubscriber( + config, config.cameras, [ + CameraConfigUpdateEnum.add, CameraConfigUpdateEnum.enabled, CameraConfigUpdateEnum.record, + CameraConfigUpdateEnum.remove, CameraConfigUpdateEnum.review, ], ) diff --git a/frigate/track/object_processing.py b/frigate/track/object_processing.py index e25c83815..79d2f16ef 100644 --- a/frigate/track/object_processing.py +++ b/frigate/track/object_processing.py @@ -70,9 +70,15 @@ class TrackedObjectProcessor(threading.Thread): self.last_motion_detected: dict[str, float] = {} self.ptz_autotracker_thread = ptz_autotracker_thread - self.config_subscriber = CameraConfigUpdateSubscriber( + self.camera_config_subscriber = CameraConfigUpdateSubscriber( + self.config, self.config.cameras, - [CameraConfigUpdateEnum.enabled, CameraConfigUpdateEnum.zones], + [ + CameraConfigUpdateEnum.add, + CameraConfigUpdateEnum.enabled, + CameraConfigUpdateEnum.remove, + CameraConfigUpdateEnum.zones, + ], ) self.requestor = InterProcessRequestor() @@ -95,6 +101,12 @@ class TrackedObjectProcessor(threading.Thread): self.zone_data = defaultdict(lambda: defaultdict(dict)) self.active_zone_data = defaultdict(lambda: defaultdict(dict)) + for camera in self.config.cameras.keys(): + self.create_camera_state(camera) + + def create_camera_state(self, camera: str) -> None: + """Creates a new camera state.""" + def start(camera: str, obj: TrackedObject, frame_name: str): self.event_sender.publish( ( @@ -206,17 +218,16 @@ class TrackedObjectProcessor(threading.Thread): self.camera_activity[camera] = activity self.requestor.send_data(UPDATE_CAMERA_ACTIVITY, self.camera_activity) - for camera in self.config.cameras.keys(): - camera_state = CameraState( - camera, self.config, self.frame_manager, self.ptz_autotracker_thread - ) - camera_state.on("start", start) - camera_state.on("autotrack", autotrack) - camera_state.on("update", update) - camera_state.on("end", end) - camera_state.on("snapshot", snapshot) - camera_state.on("camera_activity", camera_activity) - self.camera_states[camera] = camera_state + camera_state = CameraState( + camera, self.config, self.frame_manager, self.ptz_autotracker_thread + ) + camera_state.on("start", start) + camera_state.on("autotrack", autotrack) + camera_state.on("update", update) + camera_state.on("end", end) + camera_state.on("snapshot", snapshot) + camera_state.on("camera_activity", camera_activity) + self.camera_states[camera] = camera_state def should_save_snapshot(self, camera, obj: TrackedObject): if obj.false_positive: @@ -644,7 +655,7 @@ class TrackedObjectProcessor(threading.Thread): def run(self): while not self.stop_event.is_set(): # check for config updates - updated_topics = self.config_subscriber.check_for_updates() + updated_topics = self.camera_config_subscriber.check_for_updates() if "enabled" in updated_topics: for camera in updated_topics["enabled"]: @@ -652,6 +663,17 @@ class TrackedObjectProcessor(threading.Thread): self.camera_states[camera].prev_enabled = self.config.cameras[ camera ].enabled + elif "add" in updated_topics: + for camera in updated_topics["add"]: + self.config.cameras[camera] = ( + self.camera_config_subscriber.camera_configs[camera] + ) + self.create_camera_state(camera) + elif "remove" in updated_topics: + for camera in updated_topics["remove"]: + camera_state = self.camera_states[camera] + camera_state.shutdown() + self.camera_states.pop(camera) # manage camera disabled state for camera, config in self.config.cameras.items(): @@ -760,6 +782,6 @@ class TrackedObjectProcessor(threading.Thread): self.event_sender.stop() self.event_end_subscriber.stop() self.sub_label_subscriber.stop() - self.config_subscriber.stop() + self.camera_config_subscriber.stop() logger.info("Exiting object processor...") diff --git a/frigate/util/builtin.py b/frigate/util/builtin.py index 52280ecd8..0433af18e 100644 --- a/frigate/util/builtin.py +++ b/frigate/util/builtin.py @@ -14,7 +14,7 @@ import urllib.parse from collections.abc import Mapping from multiprocessing.sharedctypes import Synchronized from pathlib import Path -from typing import Any, Optional, Tuple, Union +from typing import Any, Dict, Optional, Tuple, Union from zoneinfo import ZoneInfoNotFoundError import numpy as np @@ -184,25 +184,12 @@ def create_mask(frame_shape, mask): mask_img[:] = 255 -def update_yaml_from_url(file_path, url): - parsed_url = urllib.parse.urlparse(url) - query_string = urllib.parse.parse_qs(parsed_url.query, keep_blank_values=True) - - # Filter out empty keys but keep blank values for non-empty keys - query_string = {k: v for k, v in query_string.items() if k} - +def process_config_query_string(query_string: Dict[str, list]) -> Dict[str, Any]: + updates = {} for key_path_str, new_value_list in query_string.items(): - key_path = key_path_str.split(".") - for i in range(len(key_path)): - try: - index = int(key_path[i]) - key_path[i] = (key_path[i - 1], index) - key_path.pop(i - 1) - except ValueError: - pass - + # use the string key as-is for updates dictionary if len(new_value_list) > 1: - update_yaml_file(file_path, key_path, new_value_list) + updates[key_path_str] = new_value_list else: value = new_value_list[0] try: @@ -210,10 +197,24 @@ def update_yaml_from_url(file_path, url): value = ast.literal_eval(value) if "," not in value else value except (ValueError, SyntaxError): pass - update_yaml_file(file_path, key_path, value) + updates[key_path_str] = value + return updates -def update_yaml_file(file_path, key_path, new_value): +def flatten_config_data( + config_data: Dict[str, Any], parent_key: str = "" +) -> Dict[str, Any]: + items = [] + for key, value in config_data.items(): + new_key = f"{parent_key}.{key}" if parent_key else key + if isinstance(value, dict): + items.extend(flatten_config_data(value, new_key).items()) + else: + items.append((new_key, value)) + return dict(items) + + +def update_yaml_file_bulk(file_path: str, updates: Dict[str, Any]): yaml = YAML() yaml.indent(mapping=2, sequence=4, offset=2) @@ -226,7 +227,17 @@ def update_yaml_file(file_path, key_path, new_value): ) return - data = update_yaml(data, key_path, new_value) + # Apply all updates + for key_path_str, new_value in updates.items(): + key_path = key_path_str.split(".") + for i in range(len(key_path)): + try: + index = int(key_path[i]) + key_path[i] = (key_path[i - 1], index) + key_path.pop(i - 1) + except ValueError: + pass + data = update_yaml(data, key_path, new_value) try: with open(file_path, "w") as f: diff --git a/frigate/video.py b/frigate/video.py index 5012c31c6..9710dbd81 100755 --- a/frigate/video.py +++ b/frigate/video.py @@ -116,7 +116,7 @@ def capture_frames( skipped_eps = EventsPerSecond() skipped_eps.start() config_subscriber = CameraConfigUpdateSubscriber( - {config.name: config}, [CameraConfigUpdateEnum.enabled] + None, {config.name: config}, [CameraConfigUpdateEnum.enabled] ) def get_enabled_state(): @@ -196,7 +196,7 @@ class CameraWatchdog(threading.Thread): self.sleeptime = self.config.ffmpeg.retry_interval self.config_subscriber = CameraConfigUpdateSubscriber( - {config.name: config}, [CameraConfigUpdateEnum.enabled] + None, {config.name: config}, [CameraConfigUpdateEnum.enabled] ) self.was_enabled = self.config.enabled @@ -483,7 +483,6 @@ def track_camera( model_config: ModelConfig, labelmap: dict[int, str], detection_queue: Queue, - result_connection: MpEvent, detected_objects_queue, camera_metrics: CameraMetrics, ptz_metrics: PTZMetrics, @@ -513,7 +512,7 @@ def track_camera( ptz_metrics=ptz_metrics, ) object_detector = RemoteObjectDetector( - name, labelmap, detection_queue, result_connection, model_config, stop_event + name, labelmap, detection_queue, model_config, stop_event ) object_tracker = NorfairTracker(config, ptz_metrics) @@ -607,6 +606,7 @@ def process_frames( ): next_region_update = get_tomorrow_at_time(2) config_subscriber = CameraConfigUpdateSubscriber( + None, {camera_name: camera_config}, [ CameraConfigUpdateEnum.detect, From 1caf8b97c485b023404452f5cd2273e9bae1006e Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Thu, 12 Jun 2025 12:12:34 -0600 Subject: [PATCH 056/144] Use Fork-Server As Spawn Method (#18682) * Set runtime * Use count correctly * Don't assume camera sizes * Use separate zmq proxy for object detection * Correct order * Use forkserver * Only store PID instead of entire process reference * Cleanup * Catch correct errors * Fix typing * Remove before_run from process util The before_run never actually ran because: You're right to suspect an issue with before_run not being called and a potential deadlock. The way you've implemented the run_wrapper using __getattribute__ for the run method of BaseProcess is a common pitfall in Python's multiprocessing, especially when combined with how multiprocessing.Process works internally. Here's a breakdown of why before_run isn't being called and why you might be experiencing a deadlock: The Problem: __getattribute__ and Process Serialization When you create a multiprocessing.Process object and call start(), the multiprocessing module needs to serialize the process object (or at least enough of it to re-create the process in the new interpreter). It then pickles this serialized object and sends it to the newly spawned process. The issue with your __getattribute__ implementation for run is that: run is retrieved during serialization: When multiprocessing tries to pickle your Process object to send to the new process, it will likely access the run attribute. This triggers your __getattribute__ wrapper, which then tries to bind run_wrapper to self. run_wrapper is bound to the parent process's self: The run_wrapper closure, when created in the parent process, captures the self (the Process instance) from the parent's memory space. Deserialization creates a new object: In the child process, a new Process object is created by deserializing the pickled data. However, the run_wrapper method that was pickled still holds a reference to the self from the parent process. This is a subtle but critical distinction. The child's run is not your wrapped run: When the child process starts, it internally calls its own run method. Because of the serialization and deserialization process, the run method that's ultimately executed in the child process is the original multiprocessing.Process.run or the Process.run if you had directly overridden it. Your __getattribute__ magic, which wraps run, isn't correctly applied to the Process object within the child's context. * Cleanup * Logging bugfix (#18465) * use mp Manager to handle logging queues A Python bug (https://github.com/python/cpython/issues/91555) was preventing logs from the embeddings maintainer process from printing. The bug is fixed in Python 3.14, but a viable workaround is to use the multiprocessing Manager, which better manages mp queues and causes the logging to work correctly. * consolidate * fix typing * Fix typing * Use global log queue * Move to using process for logging * Convert camera tracking to process * Add more processes * Finalize process * Cleanup * Cleanup typing * Formatting * Remove daemon --------- Co-authored-by: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> --- frigate/__main__.py | 7 +- frigate/app.py | 61 ++-- frigate/camera/__init__.py | 32 +-- frigate/camera/maintainer.py | 65 ++--- frigate/comms/object_detector_signaler.py | 85 +++++- frigate/data_processing/types.py | 34 +-- frigate/embeddings/__init__.py | 58 ++-- frigate/embeddings/maintainer.py | 23 +- frigate/events/audio.py | 9 +- frigate/log.py | 13 +- frigate/object_detection/base.py | 139 +++++---- frigate/output/output.py | 330 +++++++++++----------- frigate/record/record.py | 60 ++-- frigate/review/review.py | 37 +-- frigate/stats/util.py | 10 +- frigate/util/builtin.py | 13 +- frigate/util/process.py | 29 +- frigate/video.py | 208 +++++++------- web/src/views/system/CameraMetrics.tsx | 2 +- 19 files changed, 606 insertions(+), 609 deletions(-) diff --git a/frigate/__main__.py b/frigate/__main__.py index 4c732be80..6dd5d130e 100644 --- a/frigate/__main__.py +++ b/frigate/__main__.py @@ -1,5 +1,6 @@ import argparse import faulthandler +import multiprocessing as mp import signal import sys import threading @@ -15,10 +16,11 @@ from frigate.util.config import find_config_file def main() -> None: + manager = mp.Manager() faulthandler.enable() # Setup the logging thread - setup_logging() + setup_logging(manager) threading.current_thread().name = "frigate" @@ -108,8 +110,9 @@ def main() -> None: sys.exit(0) # Run the main application. - FrigateApp(config).start() + FrigateApp(config, manager).start() if __name__ == "__main__": + mp.set_start_method("forkserver", force=True) main() diff --git a/frigate/app.py b/frigate/app.py index 186ed1195..010f311b9 100644 --- a/frigate/app.py +++ b/frigate/app.py @@ -5,6 +5,7 @@ import os import secrets import shutil from multiprocessing import Queue +from multiprocessing.managers import DictProxy, SyncManager from multiprocessing.synchronize import Event as MpEvent from pathlib import Path from typing import Optional @@ -14,7 +15,6 @@ import uvicorn from peewee_migrate import Router from playhouse.sqlite_ext import SqliteExtDatabase -import frigate.util as util from frigate.api.auth import hash_password from frigate.api.fastapi_app import create_fastapi_app from frigate.camera import CameraMetrics, PTZMetrics @@ -24,6 +24,7 @@ from frigate.comms.dispatcher import Dispatcher from frigate.comms.event_metadata_updater import EventMetadataPublisher from frigate.comms.inter_process import InterProcessCommunicator from frigate.comms.mqtt import MqttClient +from frigate.comms.object_detector_signaler import DetectorProxy from frigate.comms.webpush import WebPushClient from frigate.comms.ws import WebSocketClient from frigate.comms.zmq_proxy import ZmqProxy @@ -41,7 +42,7 @@ from frigate.const import ( ) from frigate.data_processing.types import DataProcessorMetrics from frigate.db.sqlitevecq import SqliteVecQueueDatabase -from frigate.embeddings import EmbeddingsContext, manage_embeddings +from frigate.embeddings import EmbeddingProcess, EmbeddingsContext from frigate.events.audio import AudioProcessor from frigate.events.cleanup import EventCleanup from frigate.events.maintainer import EventProcessor @@ -58,13 +59,13 @@ from frigate.models import ( User, ) from frigate.object_detection.base import ObjectDetectProcess -from frigate.output.output import output_frames +from frigate.output.output import OutputProcess from frigate.ptz.autotrack import PtzAutoTrackerThread from frigate.ptz.onvif import OnvifController from frigate.record.cleanup import RecordingCleanup from frigate.record.export import migrate_exports -from frigate.record.record import manage_recordings -from frigate.review.review import manage_review_segments +from frigate.record.record import RecordProcess +from frigate.review.review import ReviewProcess from frigate.stats.emitter import StatsEmitter from frigate.stats.util import stats_init from frigate.storage import StorageMaintainer @@ -80,16 +81,19 @@ logger = logging.getLogger(__name__) class FrigateApp: - def __init__(self, config: FrigateConfig) -> None: + def __init__(self, config: FrigateConfig, manager: SyncManager) -> None: + self.metrics_manager = manager self.audio_process: Optional[mp.Process] = None self.stop_event: MpEvent = mp.Event() self.detection_queue: Queue = mp.Queue() self.detectors: dict[str, ObjectDetectProcess] = {} self.detection_shms: list[mp.shared_memory.SharedMemory] = [] self.log_queue: Queue = mp.Queue() - self.camera_metrics: dict[str, CameraMetrics] = {} + self.camera_metrics: DictProxy = self.metrics_manager.dict() self.embeddings_metrics: DataProcessorMetrics | None = ( - DataProcessorMetrics(list(config.classification.custom.keys())) + DataProcessorMetrics( + self.metrics_manager, list(config.classification.custom.keys()) + ) if ( config.semantic_search.enabled or config.lpr.enabled @@ -127,7 +131,7 @@ class FrigateApp: def init_camera_metrics(self) -> None: # create camera_metrics for camera_name in self.config.cameras.keys(): - self.camera_metrics[camera_name] = CameraMetrics() + self.camera_metrics[camera_name] = CameraMetrics(self.metrics_manager) self.ptz_metrics[camera_name] = PTZMetrics( autotracker_enabled=self.config.cameras[ camera_name @@ -221,24 +225,14 @@ class FrigateApp: self.processes["go2rtc"] = proc.info["pid"] def init_recording_manager(self) -> None: - recording_process = util.Process( - target=manage_recordings, - name="recording_manager", - args=(self.config,), - ) - recording_process.daemon = True + recording_process = RecordProcess(self.config) self.recording_process = recording_process recording_process.start() self.processes["recording"] = recording_process.pid or 0 logger.info(f"Recording process started: {recording_process.pid}") def init_review_segment_manager(self) -> None: - review_segment_process = util.Process( - target=manage_review_segments, - name="review_segment_manager", - args=(self.config,), - ) - review_segment_process.daemon = True + review_segment_process = ReviewProcess(self.config) self.review_segment_process = review_segment_process review_segment_process.start() self.processes["review_segment"] = review_segment_process.pid or 0 @@ -257,15 +251,10 @@ class FrigateApp: ): return - embedding_process = util.Process( - target=manage_embeddings, - name="embeddings_manager", - args=( - self.config, - self.embeddings_metrics, - ), + embedding_process = EmbeddingProcess( + self.config, + self.embeddings_metrics, ) - embedding_process.daemon = True self.embedding_process = embedding_process embedding_process.start() self.processes["embeddings"] = embedding_process.pid or 0 @@ -333,6 +322,7 @@ class FrigateApp: self.inter_config_updater = CameraConfigUpdatePublisher() self.event_metadata_updater = EventMetadataPublisher() self.inter_zmq_proxy = ZmqProxy() + self.detection_proxy = DetectorProxy() def init_onvif(self) -> None: self.onvif_controller = OnvifController(self.config, self.ptz_metrics) @@ -421,12 +411,7 @@ class FrigateApp: self.detected_frames_processor.start() def start_video_output_processor(self) -> None: - output_processor = util.Process( - target=output_frames, - name="output_processor", - args=(self.config,), - ) - output_processor.daemon = True + output_processor = OutputProcess(self.config) self.output_processor = output_processor output_processor.start() logger.info(f"Output process started: {output_processor.pid}") @@ -560,11 +545,11 @@ class FrigateApp: self.init_recording_manager() self.init_review_segment_manager() self.init_go2rtc() - self.start_detectors() self.init_embeddings_manager() self.bind_database() self.check_db_data_migrations() self.init_inter_process_communicator() + self.start_detectors() self.init_dispatcher() self.init_embeddings_client() self.start_video_output_processor() @@ -670,13 +655,13 @@ class FrigateApp: self.inter_config_updater.stop() self.event_metadata_updater.stop() self.inter_zmq_proxy.stop() + self.detection_proxy.stop() while len(self.detection_shms) > 0: shm = self.detection_shms.pop() shm.close() shm.unlink() - # exit the mp Manager process _stop_logging() - + self.metrics_manager.shutdown() os._exit(os.EX_OK) diff --git a/frigate/camera/__init__.py b/frigate/camera/__init__.py index 456751c52..77b1fd424 100644 --- a/frigate/camera/__init__.py +++ b/frigate/camera/__init__.py @@ -1,7 +1,7 @@ import multiprocessing as mp +from multiprocessing.managers import SyncManager from multiprocessing.sharedctypes import Synchronized from multiprocessing.synchronize import Event -from typing import Optional class CameraMetrics: @@ -16,25 +16,25 @@ class CameraMetrics: frame_queue: mp.Queue - process: Optional[mp.Process] - capture_process: Optional[mp.Process] + process_pid: Synchronized + capture_process_pid: Synchronized ffmpeg_pid: Synchronized - def __init__(self): - self.camera_fps = mp.Value("d", 0) - self.detection_fps = mp.Value("d", 0) - self.detection_frame = mp.Value("d", 0) - self.process_fps = mp.Value("d", 0) - self.skipped_fps = mp.Value("d", 0) - self.read_start = mp.Value("d", 0) - self.audio_rms = mp.Value("d", 0) - self.audio_dBFS = mp.Value("d", 0) + def __init__(self, manager: SyncManager): + self.camera_fps = manager.Value("d", 0) + self.detection_fps = manager.Value("d", 0) + self.detection_frame = manager.Value("d", 0) + self.process_fps = manager.Value("d", 0) + self.skipped_fps = manager.Value("d", 0) + self.read_start = manager.Value("d", 0) + self.audio_rms = manager.Value("d", 0) + self.audio_dBFS = manager.Value("d", 0) - self.frame_queue = mp.Queue(maxsize=2) + self.frame_queue = manager.Queue(maxsize=2) - self.process = None - self.capture_process = None - self.ffmpeg_pid = mp.Value("i", 0) + self.process_pid = manager.Value("i", 0) + self.capture_process_pid = manager.Value("i", 0) + self.ffmpeg_pid = manager.Value("i", 0) class PTZMetrics: diff --git a/frigate/camera/maintainer.py b/frigate/camera/maintainer.py index 6abeb762e..dd978bbfc 100644 --- a/frigate/camera/maintainer.py +++ b/frigate/camera/maintainer.py @@ -1,10 +1,12 @@ """Create and maintain camera processes / management.""" import logging +import multiprocessing as mp import os import shutil import threading from multiprocessing import Queue +from multiprocessing.managers import DictProxy from multiprocessing.synchronize import Event as MpEvent from frigate.camera import CameraMetrics, PTZMetrics @@ -16,11 +18,10 @@ from frigate.config.camera.updater import ( ) from frigate.const import SHM_FRAMES_VAR from frigate.models import Regions -from frigate.util import Process as FrigateProcess from frigate.util.builtin import empty_and_close_queue from frigate.util.image import SharedMemoryFrameManager, UntrackedSharedMemory from frigate.util.object import get_camera_regions_grid -from frigate.video import capture_camera, track_camera +from frigate.video import CameraCapture, CameraTracker logger = logging.getLogger(__name__) @@ -31,7 +32,7 @@ class CameraMaintainer(threading.Thread): config: FrigateConfig, detection_queue: Queue, detected_frames_queue: Queue, - camera_metrics: dict[str, CameraMetrics], + camera_metrics: DictProxy, ptz_metrics: dict[str, PTZMetrics], stop_event: MpEvent, ): @@ -53,6 +54,8 @@ class CameraMaintainer(threading.Thread): ], ) self.shm_count = self.__calculate_shm_frame_count() + self.camera_processes: dict[str, mp.Process] = {} + self.capture_processes: dict[str, mp.Process] = {} def __init_historical_regions(self) -> None: # delete region grids for removed or renamed cameras @@ -94,7 +97,7 @@ class CameraMaintainer(threading.Thread): # leave room for 2 cameras that are added dynamically, if a user wants to add more cameras they may need to increase the SHM size and restart after adding them. cam_total_frame_size += 2 * round( - (camera.detect.width * camera.detect.height * 1.5 + 270480) / 1048576, + (1280 * 720 * 1.5 + 270480) / 1048576, 1, ) @@ -151,24 +154,19 @@ class CameraMaintainer(threading.Thread): except FileExistsError: pass - camera_process = FrigateProcess( - target=track_camera, - name=f"camera_processor:{name}", - args=( - config.name, - config, - self.config.model, - self.config.model.merged_labelmap, - self.detection_queue, - self.detected_frames_queue, - self.camera_metrics[name], - self.ptz_metrics[name], - self.region_grids[name], - ), - daemon=True, + camera_process = CameraTracker( + config, + self.config.model, + self.config.model.merged_labelmap, + self.detection_queue, + self.detected_frames_queue, + self.camera_metrics[name], + self.ptz_metrics[name], + self.region_grids[name], ) - self.camera_metrics[config.name].process = camera_process + self.camera_processes[config.name] = camera_process camera_process.start() + self.camera_metrics[config.name].process_pid.value = camera_process.pid logger.info(f"Camera processor started for {config.name}: {camera_process.pid}") def __start_camera_capture( @@ -179,36 +177,33 @@ class CameraMaintainer(threading.Thread): return # pre-create shms - for i in range(10 if runtime else self.shm_count): + count = 10 if runtime else self.shm_count + for i in range(count): frame_size = config.frame_shape_yuv[0] * config.frame_shape_yuv[1] self.frame_manager.create(f"{config.name}_frame{i}", frame_size) - capture_process = FrigateProcess( - target=capture_camera, - name=f"camera_capture:{name}", - args=(config, self.shm_count, self.camera_metrics[name]), - ) + capture_process = CameraCapture(config, count, self.camera_metrics[name]) capture_process.daemon = True - self.camera_metrics[name].capture_process = capture_process + self.capture_processes[name] = capture_process capture_process.start() + self.camera_metrics[name].capture_process_pid.value = capture_process.pid logger.info(f"Capture process started for {name}: {capture_process.pid}") def __stop_camera_capture_process(self, camera: str) -> None: - capture_process = self.camera_metrics[camera].capture_process + capture_process = self.capture_processes[camera] if capture_process is not None: logger.info(f"Waiting for capture process for {camera} to stop") capture_process.terminate() capture_process.join() def __stop_camera_process(self, camera: str) -> None: - metrics = self.camera_metrics[camera] - camera_process = metrics.process + camera_process = self.camera_processes[camera] if camera_process is not None: logger.info(f"Waiting for process for {camera} to stop") camera_process.terminate() camera_process.join() logger.info(f"Closing frame queue for {camera}") - empty_and_close_queue(metrics.frame_queue) + empty_and_close_queue(self.camera_metrics[camera].frame_queue) def run(self): self.__init_historical_regions() @@ -230,18 +225,20 @@ class CameraMaintainer(threading.Thread): runtime=True, ) self.__start_camera_capture( - camera, self.update_subscriber.camera_configs[camera] + camera, + self.update_subscriber.camera_configs[camera], + runtime=True, ) elif update_type == CameraConfigUpdateEnum.remove.name: self.__stop_camera_capture_process(camera) self.__stop_camera_process(camera) # ensure the capture processes are done - for camera in self.camera_metrics.keys(): + for camera in self.camera_processes.keys(): self.__stop_camera_capture_process(camera) # ensure the camera processors are done - for camera in self.camera_metrics.keys(): + for camera in self.capture_processes.keys(): self.__stop_camera_process(camera) self.update_subscriber.stop() diff --git a/frigate/comms/object_detector_signaler.py b/frigate/comms/object_detector_signaler.py index befc83e4d..e8871db1a 100644 --- a/frigate/comms/object_detector_signaler.py +++ b/frigate/comms/object_detector_signaler.py @@ -1,21 +1,92 @@ """Facilitates communication between processes for object detection signals.""" -from .zmq_proxy import Publisher, Subscriber +import threading + +import zmq + +SOCKET_PUB = "ipc:///tmp/cache/detector_pub" +SOCKET_SUB = "ipc:///tmp/cache/detector_sub" -class ObjectDetectorPublisher(Publisher): +class ZmqProxyRunner(threading.Thread): + def __init__(self, context: zmq.Context[zmq.Socket]) -> None: + super().__init__(name="detector_proxy") + self.context = context + + def run(self) -> None: + """Run the proxy.""" + incoming = self.context.socket(zmq.XSUB) + incoming.bind(SOCKET_PUB) + outgoing = self.context.socket(zmq.XPUB) + outgoing.bind(SOCKET_SUB) + + # Blocking: This will unblock (via exception) when we destroy the context + # The incoming and outgoing sockets will be closed automatically + # when the context is destroyed as well. + try: + zmq.proxy(incoming, outgoing) + except zmq.ZMQError: + pass + + +class DetectorProxy: + """Proxies object detection signals.""" + + def __init__(self) -> None: + self.context = zmq.Context() + self.runner = ZmqProxyRunner(self.context) + self.runner.start() + + def stop(self) -> None: + # destroying the context will tell the proxy to stop + self.context.destroy() + self.runner.join() + + +class ObjectDetectorPublisher: """Publishes signal for object detection to different processes.""" topic_base = "object_detector/" + def __init__(self, topic: str = "") -> None: + self.topic = f"{self.topic_base}{topic}" + self.context = zmq.Context() + self.socket = self.context.socket(zmq.PUB) + self.socket.connect(SOCKET_PUB) -class ObjectDetectorSubscriber(Subscriber): + def publish(self, sub_topic: str = "") -> None: + """Publish message.""" + self.socket.send_string(f"{self.topic}{sub_topic}/") + + def stop(self) -> None: + self.socket.close() + self.context.destroy() + + +class ObjectDetectorSubscriber: """Simplifies receiving a signal for object detection.""" topic_base = "object_detector/" - def __init__(self, topic: str) -> None: - super().__init__(topic) + def __init__(self, topic: str = "") -> None: + self.topic = f"{self.topic_base}{topic}/" + self.context = zmq.Context() + self.socket = self.context.socket(zmq.SUB) + self.socket.setsockopt_string(zmq.SUBSCRIBE, self.topic) + self.socket.connect(SOCKET_SUB) - def check_for_update(self): - return super().check_for_update(timeout=5) + def check_for_update(self, timeout: float = 5) -> str | None: + """Returns message or None if no update.""" + try: + has_update, _, _ = zmq.select([self.socket], [], [], timeout) + + if has_update: + return self.socket.recv_string(flags=zmq.NOBLOCK) + except zmq.ZMQError: + pass + + return None + + def stop(self) -> None: + self.socket.close() + self.context.destroy() diff --git a/frigate/data_processing/types.py b/frigate/data_processing/types.py index 50f1ed561..d18a1175a 100644 --- a/frigate/data_processing/types.py +++ b/frigate/data_processing/types.py @@ -1,7 +1,7 @@ """Embeddings types.""" -import multiprocessing as mp from enum import Enum +from multiprocessing.managers import SyncManager from multiprocessing.sharedctypes import Synchronized import sherpa_onnx @@ -20,25 +20,27 @@ class DataProcessorMetrics: alpr_pps: Synchronized yolov9_lpr_speed: Synchronized yolov9_lpr_pps: Synchronized - classification_speeds: dict[str, Synchronized] = {} - classification_cps: dict[str, Synchronized] = {} + classification_speeds: dict[str, Synchronized] + classification_cps: dict[str, Synchronized] - def __init__(self, custom_classification_models: list[str]): - self.image_embeddings_speed = mp.Value("d", 0.0) - self.image_embeddings_eps = mp.Value("d", 0.0) - self.text_embeddings_speed = mp.Value("d", 0.0) - self.text_embeddings_eps = mp.Value("d", 0.0) - self.face_rec_speed = mp.Value("d", 0.0) - self.face_rec_fps = mp.Value("d", 0.0) - self.alpr_speed = mp.Value("d", 0.0) - self.alpr_pps = mp.Value("d", 0.0) - self.yolov9_lpr_speed = mp.Value("d", 0.0) - self.yolov9_lpr_pps = mp.Value("d", 0.0) + def __init__(self, manager: SyncManager, custom_classification_models: list[str]): + self.image_embeddings_speed = manager.Value("d", 0.0) + self.image_embeddings_eps = manager.Value("d", 0.0) + self.text_embeddings_speed = manager.Value("d", 0.0) + self.text_embeddings_eps = manager.Value("d", 0.0) + self.face_rec_speed = manager.Value("d", 0.0) + self.face_rec_fps = manager.Value("d", 0.0) + self.alpr_speed = manager.Value("d", 0.0) + self.alpr_pps = manager.Value("d", 0.0) + self.yolov9_lpr_speed = manager.Value("d", 0.0) + self.yolov9_lpr_pps = manager.Value("d", 0.0) + self.classification_speeds = manager.dict() + self.classification_cps = manager.dict() if custom_classification_models: for key in custom_classification_models: - self.classification_speeds[key] = mp.Value("d", 0.0) - self.classification_cps[key] = mp.Value("d", 0.0) + self.classification_speeds[key] = manager.Value("d", 0.0) + self.classification_cps[key] = manager.Value("d", 0.0) class DataProcessorModelRunner: diff --git a/frigate/embeddings/__init__.py b/frigate/embeddings/__init__.py index 037cadcf0..054f2c334 100644 --- a/frigate/embeddings/__init__.py +++ b/frigate/embeddings/__init__.py @@ -3,27 +3,23 @@ import base64 import json import logging -import multiprocessing as mp import os -import signal import threading from json.decoder import JSONDecodeError -from types import FrameType -from typing import Any, Optional, Union +from typing import Any, Union import regex from pathvalidate import ValidationError, sanitize_filename -from setproctitle import setproctitle from frigate.comms.embeddings_updater import EmbeddingsRequestEnum, EmbeddingsRequestor from frigate.config import FrigateConfig from frigate.const import CONFIG_DIR, FACE_DIR from frigate.data_processing.types import DataProcessorMetrics from frigate.db.sqlitevecq import SqliteVecQueueDatabase -from frigate.models import Event, Recordings +from frigate.models import Event +from frigate.util import Process as FrigateProcess from frigate.util.builtin import serialize from frigate.util.classification import kickoff_model_training -from frigate.util.services import listen from .maintainer import EmbeddingMaintainer from .util import ZScoreNormalization @@ -31,40 +27,22 @@ from .util import ZScoreNormalization logger = logging.getLogger(__name__) -def manage_embeddings(config: FrigateConfig, metrics: DataProcessorMetrics) -> None: - stop_event = mp.Event() +class EmbeddingProcess(FrigateProcess): + def __init__( + self, config: FrigateConfig, metrics: DataProcessorMetrics | None + ) -> None: + super().__init__(name="frigate.embeddings_manager", daemon=True) + self.config = config + self.metrics = metrics - def receiveSignal(signalNumber: int, frame: Optional[FrameType]) -> None: - stop_event.set() - - signal.signal(signal.SIGTERM, receiveSignal) - signal.signal(signal.SIGINT, receiveSignal) - - threading.current_thread().name = "process:embeddings_manager" - setproctitle("frigate.embeddings_manager") - listen() - - # Configure Frigate DB - db = SqliteVecQueueDatabase( - config.database.path, - pragmas={ - "auto_vacuum": "FULL", # Does not defragment database - "cache_size": -512 * 1000, # 512MB of cache - "synchronous": "NORMAL", # Safe when using WAL https://www.sqlite.org/pragma.html#pragma_synchronous - }, - timeout=max(60, 10 * len([c for c in config.cameras.values() if c.enabled])), - load_vec_extension=True, - ) - models = [Event, Recordings] - db.bind(models) - - maintainer = EmbeddingMaintainer( - db, - config, - metrics, - stop_event, - ) - maintainer.start() + def run(self) -> None: + self.pre_run_setup() + maintainer = EmbeddingMaintainer( + self.config, + self.metrics, + self.stop_event, + ) + maintainer.start() class EmbeddingsContext: diff --git a/frigate/embeddings/maintainer.py b/frigate/embeddings/maintainer.py index 0980a8ae8..c659d04fe 100644 --- a/frigate/embeddings/maintainer.py +++ b/frigate/embeddings/maintainer.py @@ -12,7 +12,6 @@ from typing import Any, Optional import cv2 import numpy as np from peewee import DoesNotExist -from playhouse.sqliteq import SqliteQueueDatabase from frigate.comms.detections_updater import DetectionSubscriber, DetectionTypeEnum from frigate.comms.embeddings_updater import EmbeddingsRequestEnum, EmbeddingsResponder @@ -58,9 +57,10 @@ from frigate.data_processing.real_time.license_plate import ( LicensePlateRealTimeProcessor, ) from frigate.data_processing.types import DataProcessorMetrics, PostProcessDataEnum +from frigate.db.sqlitevecq import SqliteVecQueueDatabase from frigate.events.types import EventTypeEnum, RegenerateDescriptionEnum from frigate.genai import get_genai_client -from frigate.models import Event +from frigate.models import Event, Recordings from frigate.types import TrackedObjectUpdateTypesEnum from frigate.util.builtin import serialize from frigate.util.image import ( @@ -82,9 +82,8 @@ class EmbeddingMaintainer(threading.Thread): def __init__( self, - db: SqliteQueueDatabase, config: FrigateConfig, - metrics: DataProcessorMetrics, + metrics: DataProcessorMetrics | None, stop_event: MpEvent, ) -> None: super().__init__(name="embeddings_maintainer") @@ -97,6 +96,22 @@ class EmbeddingMaintainer(threading.Thread): [CameraConfigUpdateEnum.add, CameraConfigUpdateEnum.remove], ) + # Configure Frigate DB + db = SqliteVecQueueDatabase( + config.database.path, + pragmas={ + "auto_vacuum": "FULL", # Does not defragment database + "cache_size": -512 * 1000, # 512MB of cache + "synchronous": "NORMAL", # Safe when using WAL https://www.sqlite.org/pragma.html#pragma_synchronous + }, + timeout=max( + 60, 10 * len([c for c in config.cameras.values() if c.enabled]) + ), + load_vec_extension=True, + ) + models = [Event, Recordings] + db.bind(models) + if config.semantic_search.enabled: self.embeddings = Embeddings(config, db, metrics) diff --git a/frigate/events/audio.py b/frigate/events/audio.py index 797a767ba..9152428fa 100644 --- a/frigate/events/audio.py +++ b/frigate/events/audio.py @@ -6,12 +6,12 @@ import random import string import threading import time +from multiprocessing.managers import DictProxy from typing import Any, Tuple import numpy as np import frigate.util as util -from frigate.camera import CameraMetrics from frigate.comms.detections_updater import DetectionPublisher, DetectionTypeEnum from frigate.comms.event_metadata_updater import ( EventMetadataPublisher, @@ -83,7 +83,7 @@ class AudioProcessor(util.Process): self, config: FrigateConfig, cameras: list[CameraConfig], - camera_metrics: dict[str, CameraMetrics], + camera_metrics: DictProxy, ): super().__init__(name="frigate.audio_manager", daemon=True) @@ -93,7 +93,7 @@ class AudioProcessor(util.Process): if any( [ - conf.audio_transcription.enabled_in_config + conf.audio_transcription.enabled_in_config == True for conf in config.cameras.values() ] ): @@ -105,6 +105,7 @@ class AudioProcessor(util.Process): self.transcription_model_runner = None def run(self) -> None: + self.pre_run_setup() audio_threads: list[AudioEventMaintainer] = [] threading.current_thread().name = "process:audio_manager" @@ -146,7 +147,7 @@ class AudioEventMaintainer(threading.Thread): self, camera: CameraConfig, config: FrigateConfig, - camera_metrics: dict[str, CameraMetrics], + camera_metrics: DictProxy, audio_transcription_model_runner: AudioTranscriptionModelRunner | None, stop_event: threading.Event, ) -> None: diff --git a/frigate/log.py b/frigate/log.py index 096b52215..f535a278c 100644 --- a/frigate/log.py +++ b/frigate/log.py @@ -1,12 +1,12 @@ # In log.py import atexit import logging -import multiprocessing as mp import os import sys import threading from collections import deque from logging.handlers import QueueHandler, QueueListener +from multiprocessing.managers import SyncManager from queue import Queue from typing import Deque, Optional @@ -35,12 +35,10 @@ LOG_HANDLER.addFilter( log_listener: Optional[QueueListener] = None log_queue: Optional[Queue] = None -manager = None -def setup_logging() -> None: - global log_listener, log_queue, manager - manager = mp.Manager() +def setup_logging(manager: SyncManager) -> None: + global log_listener, log_queue log_queue = manager.Queue() log_listener = QueueListener(log_queue, LOG_HANDLER, respect_handler_level=True) @@ -57,13 +55,10 @@ def setup_logging() -> None: def _stop_logging() -> None: - global log_listener, manager + global log_listener if log_listener is not None: log_listener.stop() log_listener = None - if manager is not None: - manager.shutdown() - manager = None # When a multiprocessing.Process exits, python tries to flush stdout and stderr. However, if the diff --git a/frigate/object_detection/base.py b/frigate/object_detection/base.py index 86febc6a7..d203e8574 100644 --- a/frigate/object_detection/base.py +++ b/frigate/object_detection/base.py @@ -1,16 +1,11 @@ import datetime import logging -import multiprocessing as mp -import os import queue -import signal -import threading from abc import ABC, abstractmethod from multiprocessing import Queue, Value from multiprocessing.synchronize import Event as MpEvent import numpy as np -from setproctitle import setproctitle import frigate.util as util from frigate.comms.object_detector_signaler import ( @@ -25,7 +20,6 @@ from frigate.detectors.detector_config import ( ) from frigate.util.builtin import EventsPerSecond, load_labels from frigate.util.image import SharedMemoryFrameManager, UntrackedSharedMemory -from frigate.util.services import listen from .util import tensor_transform @@ -90,73 +84,75 @@ class LocalObjectDetector(ObjectDetector): return self.detect_api.detect_raw(tensor_input=tensor_input) -def run_detector( - name: str, - detection_queue: Queue, - cameras: list[str], - avg_speed: Value, - start: Value, - detector_config: BaseDetectorConfig, -): - threading.current_thread().name = f"detector:{name}" - logger = logging.getLogger(f"detector.{name}") - logger.info(f"Starting detection process: {os.getpid()}") - setproctitle(f"frigate.detector.{name}") - listen() +class DetectorRunner(util.Process): + def __init__( + self, + name, + detection_queue: Queue, + cameras: list[str], + avg_speed: Value, + start_time: Value, + detector_config: BaseDetectorConfig, + ) -> None: + super().__init__(name=name, daemon=True) + self.detection_queue = detection_queue + self.cameras = cameras + self.avg_speed = avg_speed + self.start_time = start_time + self.detector_config = detector_config + self.outputs: dict = {} - stop_event: MpEvent = mp.Event() - - def receiveSignal(signalNumber, frame): - stop_event.set() - - signal.signal(signal.SIGTERM, receiveSignal) - signal.signal(signal.SIGINT, receiveSignal) - - def create_output_shm(name: str): + def create_output_shm(self, name: str): out_shm = UntrackedSharedMemory(name=f"out-{name}", create=False) out_np = np.ndarray((20, 6), dtype=np.float32, buffer=out_shm.buf) - outputs[name] = {"shm": out_shm, "np": out_np} + self.outputs[name] = {"shm": out_shm, "np": out_np} - frame_manager = SharedMemoryFrameManager() - object_detector = LocalObjectDetector(detector_config=detector_config) - detector_publisher = ObjectDetectorPublisher() + def run(self) -> None: + self.pre_run_setup() - outputs = {} - for name in cameras: - create_output_shm(name) + frame_manager = SharedMemoryFrameManager() + object_detector = LocalObjectDetector(detector_config=self.detector_config) + detector_publisher = ObjectDetectorPublisher() - while not stop_event.is_set(): - try: - connection_id = detection_queue.get(timeout=1) - except queue.Empty: - continue - input_frame = frame_manager.get( - connection_id, - (1, detector_config.model.height, detector_config.model.width, 3), - ) + for name in self.cameras: + self.create_output_shm(name) - if input_frame is None: - logger.warning(f"Failed to get frame {connection_id} from SHM") - continue + while not self.stop_event.is_set(): + try: + connection_id = self.detection_queue.get(timeout=1) + except queue.Empty: + continue + input_frame = frame_manager.get( + connection_id, + ( + 1, + self.detector_config.model.height, + self.detector_config.model.width, + 3, + ), + ) - # detect and send the output - start.value = datetime.datetime.now().timestamp() - detections = object_detector.detect_raw(input_frame) - duration = datetime.datetime.now().timestamp() - start.value - frame_manager.close(connection_id) + if input_frame is None: + logger.warning(f"Failed to get frame {connection_id} from SHM") + continue - if connection_id not in outputs: - create_output_shm(connection_id) + # detect and send the output + self.start_time.value = datetime.datetime.now().timestamp() + detections = object_detector.detect_raw(input_frame) + duration = datetime.datetime.now().timestamp() - self.start_time.value + frame_manager.close(connection_id) - outputs[connection_id]["np"][:] = detections[:] - signal_id = f"{connection_id}/update" - detector_publisher.publish(signal_id, signal_id) - start.value = 0.0 + if connection_id not in self.outputs: + self.create_output_shm(connection_id) - avg_speed.value = (avg_speed.value * 9 + duration) / 10 + self.outputs[connection_id]["np"][:] = detections[:] + detector_publisher.publish(connection_id) + self.start_time.value = 0.0 - detector_publisher.stop() - logger.info("Exited detection process...") + self.avg_speed.value = (self.avg_speed.value * 9 + duration) / 10 + + detector_publisher.stop() + logger.info("Exited detection process...") class ObjectDetectProcess: @@ -193,19 +189,14 @@ class ObjectDetectProcess: self.detection_start.value = 0.0 if (self.detect_process is not None) and self.detect_process.is_alive(): self.stop() - self.detect_process = util.Process( - target=run_detector, - name=f"detector:{self.name}", - args=( - self.name, - self.detection_queue, - self.cameras, - self.avg_inference_speed, - self.detection_start, - self.detector_config, - ), + self.detect_process = DetectorRunner( + f"detector:{self.name}", + self.detection_queue, + self.cameras, + self.avg_inference_speed, + self.detection_start, + self.detector_config, ) - self.detect_process.daemon = True self.detect_process.start() @@ -231,7 +222,7 @@ class RemoteObjectDetector: ) self.out_shm = UntrackedSharedMemory(name=f"out-{self.name}", create=False) self.out_np_shm = np.ndarray((20, 6), dtype=np.float32, buffer=self.out_shm.buf) - self.detector_subscriber = ObjectDetectorSubscriber(f"{name}/update") + self.detector_subscriber = ObjectDetectorSubscriber(name) def detect(self, tensor_input, threshold=0.4): detections = [] diff --git a/frigate/output/output.py b/frigate/output/output.py index d323596fe..8c60e51c7 100644 --- a/frigate/output/output.py +++ b/frigate/output/output.py @@ -2,14 +2,11 @@ import datetime import logging -import multiprocessing as mp import os import shutil -import signal import threading from wsgiref.simple_server import make_server -from setproctitle import setproctitle from ws4py.server.wsgirefserver import ( WebSocketWSGIHandler, WebSocketWSGIRequestHandler, @@ -17,6 +14,7 @@ from ws4py.server.wsgirefserver import ( ) from ws4py.server.wsgiutils import WebSocketWSGIApplication +import frigate.util as util from frigate.comms.detections_updater import DetectionSubscriber, DetectionTypeEnum from frigate.comms.ws import WebSocket from frigate.config import FrigateConfig @@ -73,189 +71,193 @@ def check_disabled_camera_update( birdseye.all_cameras_disabled() -def output_frames( - config: FrigateConfig, -): - threading.current_thread().name = "output" - setproctitle("frigate.output") +class OutputProcess(util.Process): + def __init__(self, config: FrigateConfig) -> None: + super().__init__(name="frigate.output", daemon=True) + self.config = config - stop_event = mp.Event() + def run(self) -> None: + self.pre_run_setup() - def receiveSignal(signalNumber, frame): - stop_event.set() + frame_manager = SharedMemoryFrameManager() - signal.signal(signal.SIGTERM, receiveSignal) - signal.signal(signal.SIGINT, receiveSignal) - - frame_manager = SharedMemoryFrameManager() - - # start a websocket server on 8082 - WebSocketWSGIHandler.http_version = "1.1" - websocket_server = make_server( - "127.0.0.1", - 8082, - server_class=WSGIServer, - handler_class=WebSocketWSGIRequestHandler, - app=WebSocketWSGIApplication(handler_cls=WebSocket), - ) - websocket_server.initialize_websockets_manager() - websocket_thread = threading.Thread(target=websocket_server.serve_forever) - - detection_subscriber = DetectionSubscriber(DetectionTypeEnum.video) - config_subscriber = CameraConfigUpdateSubscriber( - config, - config.cameras, - [ - CameraConfigUpdateEnum.add, - CameraConfigUpdateEnum.birdseye, - CameraConfigUpdateEnum.enabled, - CameraConfigUpdateEnum.record, - ], - ) - - jsmpeg_cameras: dict[str, JsmpegCamera] = {} - birdseye: Birdseye | None = None - preview_recorders: dict[str, PreviewRecorder] = {} - preview_write_times: dict[str, float] = {} - failed_frame_requests: dict[str, int] = {} - last_disabled_cam_check = datetime.datetime.now().timestamp() - - move_preview_frames("cache") - - for camera, cam_config in config.cameras.items(): - if not cam_config.enabled_in_config: - continue - - jsmpeg_cameras[camera] = JsmpegCamera(cam_config, stop_event, websocket_server) - preview_recorders[camera] = PreviewRecorder(cam_config) - preview_write_times[camera] = 0 - - if config.birdseye.enabled: - birdseye = Birdseye(config, stop_event, websocket_server) - - websocket_thread.start() - - while not stop_event.is_set(): - # check if there is an updated config - updates = config_subscriber.check_for_updates() - - if "add" in updates: - for camera in updates["add"]: - jsmpeg_cameras[camera] = JsmpegCamera( - cam_config, stop_event, websocket_server - ) - preview_recorders[camera] = PreviewRecorder(cam_config) - preview_write_times[camera] = 0 - - (topic, data) = detection_subscriber.check_for_update(timeout=1) - now = datetime.datetime.now().timestamp() - - if now - last_disabled_cam_check > 5: - # check disabled cameras every 5 seconds - last_disabled_cam_check = now - check_disabled_camera_update( - config, birdseye, preview_recorders, preview_write_times - ) - - if not topic: - continue - - ( - camera, - frame_name, - frame_time, - current_tracked_objects, - motion_boxes, - _, - ) = data - - if not config.cameras[camera].enabled: - continue - - frame = frame_manager.get(frame_name, config.cameras[camera].frame_shape_yuv) - - if frame is None: - logger.debug(f"Failed to get frame {frame_name} from SHM") - failed_frame_requests[camera] = failed_frame_requests.get(camera, 0) + 1 - - if failed_frame_requests[camera] > config.cameras[camera].detect.fps: - logger.warning( - f"Failed to retrieve many frames for {camera} from SHM, consider increasing SHM size if this continues." - ) - - continue - else: - failed_frame_requests[camera] = 0 - - # send frames for low fps recording - preview_recorders[camera].write_data( - current_tracked_objects, motion_boxes, frame_time, frame + # start a websocket server on 8082 + WebSocketWSGIHandler.http_version = "1.1" + websocket_server = make_server( + "127.0.0.1", + 8082, + server_class=WSGIServer, + handler_class=WebSocketWSGIRequestHandler, + app=WebSocketWSGIApplication(handler_cls=WebSocket), ) - preview_write_times[camera] = frame_time + websocket_server.initialize_websockets_manager() + websocket_thread = threading.Thread(target=websocket_server.serve_forever) - # send camera frame to ffmpeg process if websockets are connected - if any( - ws.environ["PATH_INFO"].endswith(camera) for ws in websocket_server.manager - ): - # write to the converter for the camera if clients are listening to the specific camera - jsmpeg_cameras[camera].write_frame(frame.tobytes()) + detection_subscriber = DetectionSubscriber(DetectionTypeEnum.video) + config_subscriber = CameraConfigUpdateSubscriber( + self.config, + self.config.cameras, + [ + CameraConfigUpdateEnum.add, + CameraConfigUpdateEnum.birdseye, + CameraConfigUpdateEnum.enabled, + CameraConfigUpdateEnum.record, + ], + ) - # send output data to birdseye if websocket is connected or restreaming - if config.birdseye.enabled and ( - config.birdseye.restream - or any( - ws.environ["PATH_INFO"].endswith("birdseye") - for ws in websocket_server.manager + jsmpeg_cameras: dict[str, JsmpegCamera] = {} + birdseye: Birdseye | None = None + preview_recorders: dict[str, PreviewRecorder] = {} + preview_write_times: dict[str, float] = {} + failed_frame_requests: dict[str, int] = {} + last_disabled_cam_check = datetime.datetime.now().timestamp() + + move_preview_frames("cache") + + for camera, cam_config in self.config.cameras.items(): + if not cam_config.enabled_in_config: + continue + + jsmpeg_cameras[camera] = JsmpegCamera( + cam_config, self.stop_event, websocket_server ) - ): - birdseye.write_data( + preview_recorders[camera] = PreviewRecorder(cam_config) + preview_write_times[camera] = 0 + + if self.config.birdseye.enabled: + birdseye = Birdseye(self.config, self.stop_event, websocket_server) + + websocket_thread.start() + + while not self.stop_event.is_set(): + # check if there is an updated config + updates = config_subscriber.check_for_updates() + + if "add" in updates: + for camera in updates["add"]: + jsmpeg_cameras[camera] = JsmpegCamera( + cam_config, self.stop_event, websocket_server + ) + preview_recorders[camera] = PreviewRecorder(cam_config) + preview_write_times[camera] = 0 + + (topic, data) = detection_subscriber.check_for_update(timeout=1) + now = datetime.datetime.now().timestamp() + + if now - last_disabled_cam_check > 5: + # check disabled cameras every 5 seconds + last_disabled_cam_check = now + check_disabled_camera_update( + self.config, birdseye, preview_recorders, preview_write_times + ) + + if not topic: + continue + + ( camera, + frame_name, + frame_time, current_tracked_objects, motion_boxes, - frame_time, - frame, + _, + ) = data + + if not self.config.cameras[camera].enabled: + continue + + frame = frame_manager.get( + frame_name, self.config.cameras[camera].frame_shape_yuv ) - frame_manager.close(frame_name) + if frame is None: + logger.debug(f"Failed to get frame {frame_name} from SHM") + failed_frame_requests[camera] = failed_frame_requests.get(camera, 0) + 1 - move_preview_frames("clips") + if ( + failed_frame_requests[camera] + > self.config.cameras[camera].detect.fps + ): + logger.warning( + f"Failed to retrieve many frames for {camera} from SHM, consider increasing SHM size if this continues." + ) - while True: - (topic, data) = detection_subscriber.check_for_update(timeout=0) + continue + else: + failed_frame_requests[camera] = 0 - if not topic: - break + # send frames for low fps recording + preview_recorders[camera].write_data( + current_tracked_objects, motion_boxes, frame_time, frame + ) + preview_write_times[camera] = frame_time - ( - camera, - frame_name, - frame_time, - current_tracked_objects, - motion_boxes, - regions, - ) = data + # send camera frame to ffmpeg process if websockets are connected + if any( + ws.environ["PATH_INFO"].endswith(camera) + for ws in websocket_server.manager + ): + # write to the converter for the camera if clients are listening to the specific camera + jsmpeg_cameras[camera].write_frame(frame.tobytes()) - frame = frame_manager.get(frame_name, config.cameras[camera].frame_shape_yuv) - frame_manager.close(frame_name) + # send output data to birdseye if websocket is connected or restreaming + if self.config.birdseye.enabled and ( + self.config.birdseye.restream + or any( + ws.environ["PATH_INFO"].endswith("birdseye") + for ws in websocket_server.manager + ) + ): + birdseye.write_data( + camera, + current_tracked_objects, + motion_boxes, + frame_time, + frame, + ) - detection_subscriber.stop() + frame_manager.close(frame_name) - for jsmpeg in jsmpeg_cameras.values(): - jsmpeg.stop() + move_preview_frames("clips") - for preview in preview_recorders.values(): - preview.stop() + while True: + (topic, data) = detection_subscriber.check_for_update(timeout=0) - if birdseye is not None: - birdseye.stop() + if not topic: + break - config_subscriber.stop() - websocket_server.manager.close_all() - websocket_server.manager.stop() - websocket_server.manager.join() - websocket_server.shutdown() - websocket_thread.join() - logger.info("exiting output process...") + ( + camera, + frame_name, + frame_time, + current_tracked_objects, + motion_boxes, + regions, + ) = data + + frame = frame_manager.get( + frame_name, self.config.cameras[camera].frame_shape_yuv + ) + frame_manager.close(frame_name) + + detection_subscriber.stop() + + for jsmpeg in jsmpeg_cameras.values(): + jsmpeg.stop() + + for preview in preview_recorders.values(): + preview.stop() + + if birdseye is not None: + birdseye.stop() + + config_subscriber.stop() + websocket_server.manager.close_all() + websocket_server.manager.stop() + websocket_server.manager.join() + websocket_server.shutdown() + websocket_thread.join() + logger.info("exiting output process...") def move_preview_frames(loc: str): diff --git a/frigate/record/record.py b/frigate/record/record.py index 252b80545..40a943a43 100644 --- a/frigate/record/record.py +++ b/frigate/record/record.py @@ -1,50 +1,40 @@ """Run recording maintainer and cleanup.""" import logging -import multiprocessing as mp -import signal -import threading -from types import FrameType -from typing import Optional from playhouse.sqliteq import SqliteQueueDatabase -from setproctitle import setproctitle from frigate.config import FrigateConfig from frigate.models import Recordings, ReviewSegment from frigate.record.maintainer import RecordingMaintainer -from frigate.util.services import listen +from frigate.util import Process as FrigateProcess logger = logging.getLogger(__name__) -def manage_recordings(config: FrigateConfig) -> None: - stop_event = mp.Event() +class RecordProcess(FrigateProcess): + def __init__(self, config: FrigateConfig) -> None: + super().__init__(name="frigate.recording_manager", daemon=True) + self.config = config - def receiveSignal(signalNumber: int, frame: Optional[FrameType]) -> None: - stop_event.set() + def run(self) -> None: + self.pre_run_setup() + db = SqliteQueueDatabase( + self.config.database.path, + pragmas={ + "auto_vacuum": "FULL", # Does not defragment database + "cache_size": -512 * 1000, # 512MB of cache + "synchronous": "NORMAL", # Safe when using WAL https://www.sqlite.org/pragma.html#pragma_synchronous + }, + timeout=max( + 60, 10 * len([c for c in self.config.cameras.values() if c.enabled]) + ), + ) + models = [ReviewSegment, Recordings] + db.bind(models) - signal.signal(signal.SIGTERM, receiveSignal) - signal.signal(signal.SIGINT, receiveSignal) - - threading.current_thread().name = "process:recording_manager" - setproctitle("frigate.recording_manager") - listen() - - db = SqliteQueueDatabase( - config.database.path, - pragmas={ - "auto_vacuum": "FULL", # Does not defragment database - "cache_size": -512 * 1000, # 512MB of cache - "synchronous": "NORMAL", # Safe when using WAL https://www.sqlite.org/pragma.html#pragma_synchronous - }, - timeout=max(60, 10 * len([c for c in config.cameras.values() if c.enabled])), - ) - models = [ReviewSegment, Recordings] - db.bind(models) - - maintainer = RecordingMaintainer( - config, - stop_event, - ) - maintainer.start() + maintainer = RecordingMaintainer( + self.config, + self.stop_event, + ) + maintainer.start() diff --git a/frigate/review/review.py b/frigate/review/review.py index dafa6c802..00910e439 100644 --- a/frigate/review/review.py +++ b/frigate/review/review.py @@ -1,36 +1,23 @@ """Run recording maintainer and cleanup.""" import logging -import multiprocessing as mp -import signal -import threading -from types import FrameType -from typing import Optional - -from setproctitle import setproctitle +import frigate.util as util from frigate.config import FrigateConfig from frigate.review.maintainer import ReviewSegmentMaintainer -from frigate.util.services import listen logger = logging.getLogger(__name__) -def manage_review_segments(config: FrigateConfig) -> None: - stop_event = mp.Event() +class ReviewProcess(util.Process): + def __init__(self, config: FrigateConfig) -> None: + super().__init__(name="frigate.review_segment_manager", daemon=True) + self.config = config - def receiveSignal(signalNumber: int, frame: Optional[FrameType]) -> None: - stop_event.set() - - signal.signal(signal.SIGTERM, receiveSignal) - signal.signal(signal.SIGINT, receiveSignal) - - threading.current_thread().name = "process:review_segment_manager" - setproctitle("frigate.review_segment_manager") - listen() - - maintainer = ReviewSegmentMaintainer( - config, - stop_event, - ) - maintainer.start() + def run(self) -> None: + self.pre_run_setup() + maintainer = ReviewSegmentMaintainer( + self.config, + self.stop_event, + ) + maintainer.start() diff --git a/frigate/stats/util.py b/frigate/stats/util.py index f5807e1e6..3c41ca3b1 100644 --- a/frigate/stats/util.py +++ b/frigate/stats/util.py @@ -5,13 +5,13 @@ import os import shutil import time from json import JSONDecodeError +from multiprocessing.managers import DictProxy from typing import Any, Optional import psutil import requests from requests.exceptions import RequestException -from frigate.camera import CameraMetrics from frigate.config import FrigateConfig from frigate.const import CACHE_DIR, CLIPS_DIR, RECORD_DIR from frigate.data_processing.types import DataProcessorMetrics @@ -53,7 +53,7 @@ def get_latest_version(config: FrigateConfig) -> str: def stats_init( config: FrigateConfig, - camera_metrics: dict[str, CameraMetrics], + camera_metrics: DictProxy, embeddings_metrics: DataProcessorMetrics | None, detectors: dict[str, ObjectDetectProcess], processes: dict[str, int], @@ -273,10 +273,12 @@ def stats_snapshot( stats["cameras"] = {} for name, camera_stats in camera_metrics.items(): total_detection_fps += camera_stats.detection_fps.value - pid = camera_stats.process.pid if camera_stats.process else None + pid = camera_stats.process_pid.value if camera_stats.process_pid.value else None ffmpeg_pid = camera_stats.ffmpeg_pid.value if camera_stats.ffmpeg_pid else None capture_pid = ( - camera_stats.capture_process.pid if camera_stats.capture_process else None + camera_stats.capture_process_pid.value + if camera_stats.capture_process_pid.value + else None ) stats["cameras"][name] = { "camera_fps": round(camera_stats.camera_fps.value, 2), diff --git a/frigate/util/builtin.py b/frigate/util/builtin.py index 0433af18e..90c0f9227 100644 --- a/frigate/util/builtin.py +++ b/frigate/util/builtin.py @@ -341,11 +341,14 @@ def clear_and_unlink(file: Path, missing_ok: bool = True) -> None: def empty_and_close_queue(q: mp.Queue): while True: try: - q.get(block=True, timeout=0.5) - except queue.Empty: - q.close() - q.join_thread() - return + try: + q.get(block=True, timeout=0.5) + except (queue.Empty, EOFError): + q.close() + q.join_thread() + return + except AttributeError: + pass def generate_color_palette(n): diff --git a/frigate/util/process.py b/frigate/util/process.py index ac15539fe..3501e585e 100644 --- a/frigate/util/process.py +++ b/frigate/util/process.py @@ -4,9 +4,8 @@ import multiprocessing as mp import signal import sys import threading -from functools import wraps from logging.handlers import QueueHandler -from typing import Any, Callable, Optional +from typing import Callable, Optional import frigate.log @@ -30,34 +29,12 @@ class BaseProcess(mp.Process): super().start(*args, **kwargs) self.after_start() - def __getattribute__(self, name: str) -> Any: - if name == "run": - run = super().__getattribute__("run") - - @wraps(run) - def run_wrapper(*args, **kwargs): - try: - self.before_run() - return run(*args, **kwargs) - finally: - self.after_run() - - return run_wrapper - - return super().__getattribute__(name) - def before_start(self) -> None: pass def after_start(self) -> None: pass - def before_run(self) -> None: - pass - - def after_run(self) -> None: - pass - class Process(BaseProcess): logger: logging.Logger @@ -73,7 +50,7 @@ class Process(BaseProcess): def before_start(self) -> None: self.__log_queue = frigate.log.log_listener.queue - def before_run(self) -> None: + def pre_run_setup(self) -> None: faulthandler.enable() def receiveSignal(signalNumber, frame): @@ -88,8 +65,6 @@ class Process(BaseProcess): signal.signal(signal.SIGTERM, receiveSignal) signal.signal(signal.SIGINT, receiveSignal) - self.logger = logging.getLogger(self.name) - logging.basicConfig(handlers=[], force=True) logging.getLogger().addHandler(QueueHandler(self.__log_queue)) diff --git a/frigate/video.py b/frigate/video.py index 9710dbd81..2869c2bc2 100755 --- a/frigate/video.py +++ b/frigate/video.py @@ -1,9 +1,7 @@ import datetime import logging -import multiprocessing as mp import os import queue -import signal import subprocess as sp import threading import time @@ -12,8 +10,8 @@ from multiprocessing.synchronize import Event as MpEvent from typing import Any import cv2 -from setproctitle import setproctitle +import frigate.util as util from frigate.camera import CameraMetrics, PTZMetrics from frigate.comms.inter_process import InterProcessRequestor from frigate.config import CameraConfig, DetectConfig, ModelConfig @@ -53,7 +51,6 @@ from frigate.util.object import ( is_object_filtered, reduce_detections, ) -from frigate.util.services import listen logger = logging.getLogger(__name__) @@ -328,7 +325,7 @@ class CameraWatchdog(threading.Thread): ffmpeg_cmd, self.logger, self.logpipe, self.frame_size ) self.ffmpeg_pid.value = self.ffmpeg_detect_process.pid - self.capture_thread = CameraCapture( + self.capture_thread = CameraCaptureRunner( self.config, self.shm_frame_count, self.frame_index, @@ -406,7 +403,7 @@ class CameraWatchdog(threading.Thread): return newest_segment_time -class CameraCapture(threading.Thread): +class CameraCaptureRunner(threading.Thread): def __init__( self, config: CameraConfig, @@ -450,103 +447,103 @@ class CameraCapture(threading.Thread): ) -def capture_camera( - config: CameraConfig, shm_frame_count: int, camera_metrics: CameraMetrics -): - stop_event = mp.Event() +class CameraCapture(util.Process): + def __init__( + self, config: CameraConfig, shm_frame_count: int, camera_metrics: CameraMetrics + ) -> None: + super().__init__(name=f"camera_capture:{config.name}", daemon=True) + self.config = config + self.shm_frame_count = shm_frame_count + self.camera_metrics = camera_metrics - def receiveSignal(signalNumber, frame): - stop_event.set() - - signal.signal(signal.SIGTERM, receiveSignal) - signal.signal(signal.SIGINT, receiveSignal) - - threading.current_thread().name = f"capture:{config.name}" - setproctitle(f"frigate.capture:{config.name}") - - camera_watchdog = CameraWatchdog( - config, - shm_frame_count, - camera_metrics.frame_queue, - camera_metrics.camera_fps, - camera_metrics.skipped_fps, - camera_metrics.ffmpeg_pid, - stop_event, - ) - camera_watchdog.start() - camera_watchdog.join() + def run(self) -> None: + self.pre_run_setup() + camera_watchdog = CameraWatchdog( + self.config, + self.shm_frame_count, + self.camera_metrics.frame_queue, + self.camera_metrics.camera_fps, + self.camera_metrics.skipped_fps, + self.camera_metrics.ffmpeg_pid, + self.stop_event, + ) + camera_watchdog.start() + camera_watchdog.join() -def track_camera( - name, - config: CameraConfig, - model_config: ModelConfig, - labelmap: dict[int, str], - detection_queue: Queue, - detected_objects_queue, - camera_metrics: CameraMetrics, - ptz_metrics: PTZMetrics, - region_grid: list[list[dict[str, Any]]], -): - stop_event = mp.Event() - - def receiveSignal(signalNumber, frame): - stop_event.set() - - signal.signal(signal.SIGTERM, receiveSignal) - signal.signal(signal.SIGINT, receiveSignal) - - threading.current_thread().name = f"process:{name}" - setproctitle(f"frigate.process:{name}") - listen() - - frame_queue = camera_metrics.frame_queue - - frame_shape = config.frame_shape - - motion_detector = ImprovedMotionDetector( - frame_shape, - config.motion, - config.detect.fps, - name=config.name, - ptz_metrics=ptz_metrics, - ) - object_detector = RemoteObjectDetector( - name, labelmap, detection_queue, model_config, stop_event - ) - - object_tracker = NorfairTracker(config, ptz_metrics) - - frame_manager = SharedMemoryFrameManager() - - # create communication for region grid updates - requestor = InterProcessRequestor() - - process_frames( - name, - requestor, - frame_queue, - frame_shape, - model_config, - config, - frame_manager, - motion_detector, - object_detector, - object_tracker, +class CameraTracker(util.Process): + def __init__( + self, + config: CameraConfig, + model_config: ModelConfig, + labelmap: dict[int, str], + detection_queue: Queue, detected_objects_queue, - camera_metrics, - stop_event, - ptz_metrics, - region_grid, - ) + camera_metrics: CameraMetrics, + ptz_metrics: PTZMetrics, + region_grid: list[list[dict[str, Any]]], + ) -> None: + super().__init__(name=f"camera_processor:{config.name}", daemon=True) + self.config = config + self.model_config = model_config + self.labelmap = labelmap + self.detection_queue = detection_queue + self.detected_objects_queue = detected_objects_queue + self.camera_metrics = camera_metrics + self.ptz_metrics = ptz_metrics + self.region_grid = region_grid - # empty the frame queue - logger.info(f"{name}: emptying frame queue") - while not frame_queue.empty(): - (frame_name, _) = frame_queue.get(False) - frame_manager.delete(frame_name) + def run(self) -> None: + self.pre_run_setup() + frame_queue = self.camera_metrics.frame_queue + frame_shape = self.config.frame_shape - logger.info(f"{name}: exiting subprocess") + motion_detector = ImprovedMotionDetector( + frame_shape, + self.config.motion, + self.config.detect.fps, + name=self.config.name, + ptz_metrics=self.ptz_metrics, + ) + object_detector = RemoteObjectDetector( + self.config.name, + self.labelmap, + self.detection_queue, + self.model_config, + self.stop_event, + ) + + object_tracker = NorfairTracker(self.config, self.ptz_metrics) + + frame_manager = SharedMemoryFrameManager() + + # create communication for region grid updates + requestor = InterProcessRequestor() + + process_frames( + requestor, + frame_queue, + frame_shape, + self.model_config, + self.config, + frame_manager, + motion_detector, + object_detector, + object_tracker, + self.detected_objects_queue, + self.camera_metrics, + self.stop_event, + self.ptz_metrics, + self.region_grid, + ) + + # empty the frame queue + logger.info(f"{self.config.name}: emptying frame queue") + while not frame_queue.empty(): + (frame_name, _) = frame_queue.get(False) + frame_manager.delete(frame_name) + + logger.info(f"{self.config.name}: exiting subprocess") def detect( @@ -587,7 +584,6 @@ def detect( def process_frames( - camera_name: str, requestor: InterProcessRequestor, frame_queue: Queue, frame_shape: tuple[int, int], @@ -607,7 +603,7 @@ def process_frames( next_region_update = get_tomorrow_at_time(2) config_subscriber = CameraConfigUpdateSubscriber( None, - {camera_name: camera_config}, + {camera_config.name: camera_config}, [ CameraConfigUpdateEnum.detect, CameraConfigUpdateEnum.enabled, @@ -663,7 +659,9 @@ def process_frames( and prev_enabled != camera_enabled and camera_metrics.frame_queue.empty() ): - logger.debug(f"Camera {camera_name} disabled, clearing tracked objects") + logger.debug( + f"Camera {camera_config.name} disabled, clearing tracked objects" + ) prev_enabled = camera_enabled # Clear norfair's dictionaries @@ -688,7 +686,7 @@ def process_frames( datetime.datetime.now().astimezone(datetime.timezone.utc) > next_region_update ): - region_grid = requestor.send_data(REQUEST_REGION_GRID, camera_name) + region_grid = requestor.send_data(REQUEST_REGION_GRID, camera_config.name) next_region_update = get_tomorrow_at_time(2) try: @@ -708,7 +706,9 @@ def process_frames( frame = frame_manager.get(frame_name, (frame_shape[0] * 3 // 2, frame_shape[1])) if frame is None: - logger.debug(f"{camera_name}: frame {frame_time} is not in memory store.") + logger.debug( + f"{camera_config.name}: frame {frame_time} is not in memory store." + ) continue # look for motion if enabled @@ -947,7 +947,7 @@ def process_frames( ) cv2.imwrite( - f"debug/frames/{camera_name}-{'{:.6f}'.format(frame_time)}.jpg", + f"debug/frames/{camera_config.name}-{'{:.6f}'.format(frame_time)}.jpg", bgr_frame, ) # add to the queue if not full @@ -959,7 +959,7 @@ def process_frames( camera_metrics.process_fps.value = fps_tracker.eps() detected_objects_queue.put( ( - camera_name, + camera_config.name, frame_name, frame_time, detections, diff --git a/web/src/views/system/CameraMetrics.tsx b/web/src/views/system/CameraMetrics.tsx index ba2701926..3f5891265 100644 --- a/web/src/views/system/CameraMetrics.tsx +++ b/web/src/views/system/CameraMetrics.tsx @@ -173,7 +173,7 @@ export default function CameraMetrics({ }); series[key]["detect"].data.push({ x: statsIdx, - y: stats.cpu_usages[camStats.pid.toString()].cpu, + y: stats.cpu_usages[camStats.pid?.toString()]?.cpu, }); }); }); From a6b80c0f9ca9f8a14b749b9157aac9b13b138e9f Mon Sep 17 00:00:00 2001 From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> Date: Thu, 12 Jun 2025 14:34:45 -0500 Subject: [PATCH 057/144] Add basic camera settings to UI for testing (#18690) * add basic camera add/edit pane to the UI for testing * only init model runner if transcription is enabled globally * fix role checkboxes --- frigate/events/audio.py | 7 +- web/public/locales/en/views/settings.json | 29 + .../components/settings/CameraEditForm.tsx | 439 ++++++++++ web/src/views/settings/CameraSettingsView.tsx | 782 ++++++++++-------- 4 files changed, 907 insertions(+), 350 deletions(-) create mode 100644 web/src/components/settings/CameraEditForm.tsx diff --git a/frigate/events/audio.py b/frigate/events/audio.py index 9152428fa..791ba80e4 100644 --- a/frigate/events/audio.py +++ b/frigate/events/audio.py @@ -91,12 +91,7 @@ class AudioProcessor(util.Process): self.cameras = cameras self.config = config - if any( - [ - conf.audio_transcription.enabled_in_config == True - for conf in config.cameras.values() - ] - ): + if self.config.audio_transcription.enabled: self.transcription_model_runner = AudioTranscriptionModelRunner( self.config.audio_transcription.device, self.config.audio_transcription.model_size, diff --git a/web/public/locales/en/views/settings.json b/web/public/locales/en/views/settings.json index 2b92e81cd..14dc809bc 100644 --- a/web/public/locales/en/views/settings.json +++ b/web/public/locales/en/views/settings.json @@ -176,6 +176,35 @@ "toast": { "success": "Review Classification configuration has been saved. Restart Frigate to apply changes." } + }, + "addCamera": "Add New Camera", + "editCamera": "Edit Camera:", + "selectCamera": "Select a Camera", + "backToSettings": "Back to Camera Settings", + "cameraConfig": { + "add": "Add Camera", + "edit": "Edit Camera", + "description": "Configure camera settings including stream inputs and roles.", + "name": "Camera Name", + "nameRequired": "Camera name is required", + "nameInvalid": "Camera name must contain only letters, numbers, underscores, or hyphens", + "namePlaceholder": "e.g., front_door", + "enabled": "Enabled", + "ffmpeg": { + "inputs": "Input Streams", + "path": "Stream Path", + "pathRequired": "Stream path is required", + "pathPlaceholder": "rtsp://...", + "roles": "Roles", + "rolesRequired": "At least one role is required", + "rolesUnique": "Each role (audio, detect, record) can only be assigned to one stream", + "addInput": "Add Input Stream", + "removeInput": "Remove Input Stream", + "inputsRequired": "At least one input stream is required" + }, + "toast": { + "success": "Camera {{cameraName}} saved successfully" + } } }, "masksAndZones": { diff --git a/web/src/components/settings/CameraEditForm.tsx b/web/src/components/settings/CameraEditForm.tsx new file mode 100644 index 000000000..eb731b2b3 --- /dev/null +++ b/web/src/components/settings/CameraEditForm.tsx @@ -0,0 +1,439 @@ +import { Button } from "@/components/ui/button"; +import { + Form, + FormControl, + FormField, + FormItem, + FormLabel, + FormMessage, +} from "@/components/ui/form"; +import { Input } from "@/components/ui/input"; +import { Switch } from "@/components/ui/switch"; +import Heading from "@/components/ui/heading"; +import { Separator } from "@/components/ui/separator"; +import { zodResolver } from "@hookform/resolvers/zod"; +import { useForm, useFieldArray } from "react-hook-form"; +import { z } from "zod"; +import axios from "axios"; +import { toast, Toaster } from "sonner"; +import { useTranslation } from "react-i18next"; +import { useState, useMemo } from "react"; +import { LuTrash2, LuPlus } from "react-icons/lu"; +import ActivityIndicator from "@/components/indicators/activity-indicator"; +import { FrigateConfig } from "@/types/frigateConfig"; +import useSWR from "swr"; + +type ConfigSetBody = { + requires_restart: number; + // TODO: type this better + // eslint-disable-next-line @typescript-eslint/no-explicit-any + config_data: any; + update_topic?: string; +}; + +const RoleEnum = z.enum(["audio", "detect", "record"]); +type Role = z.infer; + +type CameraEditFormProps = { + cameraName?: string; + onSave?: () => void; + onCancel?: () => void; +}; + +export default function CameraEditForm({ + cameraName, + onSave, + onCancel, +}: CameraEditFormProps) { + const { t } = useTranslation(["views/settings"]); + const { data: config } = useSWR("config"); + const [isLoading, setIsLoading] = useState(false); + + const formSchema = useMemo( + () => + z.object({ + cameraName: z + .string() + .min(1, { message: t("camera.cameraConfig.nameRequired") }) + .regex(/^[a-zA-Z0-9_-]+$/, { + message: t("camera.cameraConfig.nameInvalid"), + }), + enabled: z.boolean(), + ffmpeg: z.object({ + inputs: z + .array( + z.object({ + path: z.string().min(1, { + message: t("camera.cameraConfig.ffmpeg.pathRequired"), + }), + roles: z.array(RoleEnum).min(1, { + message: t("camera.cameraConfig.ffmpeg.rolesRequired"), + }), + }), + ) + .min(1, { + message: t("camera.cameraConfig.ffmpeg.inputsRequired"), + }) + .refine( + (inputs) => { + const roleOccurrences = new Map(); + inputs.forEach((input) => { + input.roles.forEach((role) => { + roleOccurrences.set( + role, + (roleOccurrences.get(role) || 0) + 1, + ); + }); + }); + return Array.from(roleOccurrences.values()).every( + (count) => count <= 1, + ); + }, + { + message: t("camera.cameraConfig.ffmpeg.rolesUnique"), + path: ["inputs"], + }, + ), + }), + }), + [t], + ); + + type FormValues = z.infer; + + // Determine available roles for default values + const usedRoles = useMemo(() => { + const roles = new Set(); + if (cameraName && config?.cameras[cameraName]) { + const camera = config.cameras[cameraName]; + camera.ffmpeg?.inputs?.forEach((input) => { + input.roles.forEach((role) => roles.add(role as Role)); + }); + } + return roles; + }, [cameraName, config]); + + const defaultValues: FormValues = { + cameraName: cameraName || "", + enabled: true, + ffmpeg: { + inputs: [ + { + path: "", + roles: usedRoles.has("detect") ? [] : ["detect"], + }, + ], + }, + }; + + // Load existing camera config if editing + if (cameraName && config?.cameras[cameraName]) { + const camera = config.cameras[cameraName]; + defaultValues.enabled = camera.enabled ?? true; + defaultValues.ffmpeg.inputs = camera.ffmpeg?.inputs?.length + ? camera.ffmpeg.inputs.map((input) => ({ + path: input.path, + roles: input.roles as Role[], + })) + : defaultValues.ffmpeg.inputs; + } + + const form = useForm({ + resolver: zodResolver(formSchema), + defaultValues, + mode: "onChange", + }); + + const { fields, append, remove } = useFieldArray({ + control: form.control, + name: "ffmpeg.inputs", + }); + + // Watch ffmpeg.inputs to track used roles + const watchedInputs = form.watch("ffmpeg.inputs"); + + const saveCameraConfig = (values: FormValues) => { + setIsLoading(true); + const configData: ConfigSetBody["config_data"] = { + cameras: { + [values.cameraName]: { + enabled: values.enabled, + ffmpeg: { + inputs: values.ffmpeg.inputs.map((input) => ({ + path: input.path, + roles: input.roles, + })), + }, + }, + }, + }; + + const requestBody: ConfigSetBody = { + requires_restart: 1, + config_data: configData, + }; + + // Add update_topic for new cameras + if (!cameraName) { + requestBody.update_topic = `config/cameras/${values.cameraName}/add`; + } + + axios + .put("config/set", requestBody) + .then((res) => { + if (res.status === 200) { + toast.success( + t("camera.cameraConfig.toast.success", { + cameraName: values.cameraName, + }), + { position: "top-center" }, + ); + if (onSave) onSave(); + } else { + throw new Error(res.statusText); + } + }) + .catch((error) => { + const errorMessage = + error.response?.data?.message || + error.response?.data?.detail || + "Unknown error"; + toast.error( + t("toast.save.error.title", { errorMessage, ns: "common" }), + { position: "top-center" }, + ); + }) + .finally(() => { + setIsLoading(false); + }); + }; + + const onSubmit = (values: FormValues) => { + if (cameraName && values.cameraName !== cameraName) { + // If camera name changed, delete old camera config + const deleteRequestBody: ConfigSetBody = { + requires_restart: 1, + config_data: { + cameras: { + [cameraName]: "", + }, + }, + update_topic: `config/cameras/${cameraName}/remove`, + }; + + axios + .put("config/set", deleteRequestBody) + .then(() => saveCameraConfig(values)) + .catch((error) => { + const errorMessage = + error.response?.data?.message || + error.response?.data?.detail || + "Unknown error"; + toast.error( + t("toast.save.error.title", { errorMessage, ns: "common" }), + { position: "top-center" }, + ); + }) + .finally(() => { + setIsLoading(false); + }); + } else { + saveCameraConfig(values); + } + }; + + // Determine available roles for new streams + const getAvailableRoles = (): Role[] => { + const used = new Set(); + watchedInputs.forEach((input) => { + input.roles.forEach((role) => used.add(role)); + }); + return used.has("detect") ? [] : ["detect"]; + }; + + const getUsedRolesExcludingIndex = (excludeIndex: number) => { + const roles = new Set(); + watchedInputs.forEach((input, idx) => { + if (idx !== excludeIndex) { + input.roles.forEach((role) => roles.add(role)); + } + }); + return roles; + }; + + return ( + <> + + + {cameraName + ? t("camera.cameraConfig.edit") + : t("camera.cameraConfig.add")} + +
    + {t("camera.cameraConfig.description")} +
    + + +
    + + ( + + {t("camera.cameraConfig.name")} + + + + + + )} + /> + + ( + + + + + {t("camera.cameraConfig.enabled")} + + + )} + /> + +
    + {t("camera.cameraConfig.ffmpeg.inputs")} + {fields.map((field, index) => ( +
    + ( + + + {t("camera.cameraConfig.ffmpeg.path")} + + + + + + + )} + /> + + ( + + + {t("camera.cameraConfig.ffmpeg.roles")} + + +
    + {(["audio", "detect", "record"] as const).map( + (role) => ( + + ), + )} +
    +
    + +
    + )} + /> + + +
    + ))} + + {form.formState.errors.ffmpeg?.inputs?.root && + form.formState.errors.ffmpeg.inputs.root.message} + + +
    + +
    + + +
    + + + + ); +} diff --git a/web/src/views/settings/CameraSettingsView.tsx b/web/src/views/settings/CameraSettingsView.tsx index 994936b8f..6d5527c82 100644 --- a/web/src/views/settings/CameraSettingsView.tsx +++ b/web/src/views/settings/CameraSettingsView.tsx @@ -1,7 +1,6 @@ import Heading from "@/components/ui/heading"; import { useCallback, useContext, useEffect, useMemo, useState } from "react"; -import { Toaster } from "sonner"; -import { toast } from "sonner"; +import { Toaster, toast } from "sonner"; import { Form, FormControl, @@ -14,8 +13,8 @@ import { import { zodResolver } from "@hookform/resolvers/zod"; import { useForm } from "react-hook-form"; import { z } from "zod"; -import { Separator } from "../../components/ui/separator"; -import { Button } from "../../components/ui/button"; +import { Separator } from "@/components/ui/separator"; +import { Button } from "@/components/ui/button"; import useSWR from "swr"; import { FrigateConfig } from "@/types/frigateConfig"; import { Checkbox } from "@/components/ui/checkbox"; @@ -33,6 +32,17 @@ import { Label } from "@/components/ui/label"; import { useAlertsState, useDetectionsState, useEnabledState } from "@/api/ws"; import { useDocDomain } from "@/hooks/use-doc-domain"; import { getTranslatedLabel } from "@/utils/i18n"; +import CameraEditForm from "@/components/settings/CameraEditForm"; +import { LuPlus } from "react-icons/lu"; +import { + Select, + SelectContent, + SelectItem, + SelectTrigger, + SelectValue, +} from "@/components/ui/select"; +import { IoMdArrowRoundBack } from "react-icons/io"; +import { isDesktop } from "react-device-detect"; type CameraSettingsViewProps = { selectedCamera: string; @@ -63,9 +73,23 @@ export default function CameraSettingsView({ const [changedValue, setChangedValue] = useState(false); const [isLoading, setIsLoading] = useState(false); const [selectDetections, setSelectDetections] = useState(false); + const [viewMode, setViewMode] = useState<"settings" | "add" | "edit">( + "settings", + ); // Control view state + const [editCameraName, setEditCameraName] = useState( + undefined, + ); // Track camera being edited const { addMessage, removeMessage } = useContext(StatusBarMessagesContext)!; + // List of cameras for dropdown + const cameras = useMemo(() => { + if (config) { + return Object.keys(config.cameras).sort(); + } + return []; + }, [config]); + // zones and labels const zones = useMemo(() => { @@ -259,7 +283,14 @@ export default function CameraSettingsView({ document.title = t("documentTitle.camera"); }, [t]); - if (!cameraConfig && !selectedCamera) { + // Handle back navigation from add/edit form + const handleBack = useCallback(() => { + setViewMode("settings"); + setEditCameraName(undefined); + updateConfig(); + }, [updateConfig]); + + if (!cameraConfig && !selectedCamera && viewMode === "settings") { return ; } @@ -268,254 +299,184 @@ export default function CameraSettingsView({
    - - camera.title - - - - - - camera.streams.title - - -
    - { - sendEnabled(isChecked ? "ON" : "OFF"); - }} - /> -
    - -
    -
    -
    - camera.streams.desc -
    - - - - camera.review.title - - -
    -
    - { - sendAlerts(isChecked ? "ON" : "OFF"); - }} - /> -
    - + {viewMode === "settings" ? ( + <> + + {t("camera.title")} + +
    + + {cameras.length > 0 && ( +
    + + +
    + )}
    -
    -
    + + + + camera.streams.title + +
    { - sendDetections(isChecked ? "ON" : "OFF"); + sendEnabled(isChecked ? "ON" : "OFF"); }} />
    -
    - camera.review.desc + camera.streams.desc
    -
    -
    + - + + camera.review.title + - - camera.reviewClassification.title - +
    +
    + { + sendAlerts(isChecked ? "ON" : "OFF"); + }} + /> +
    + +
    +
    +
    +
    + { + sendDetections(isChecked ? "ON" : "OFF"); + }} + /> +
    + +
    +
    +
    + camera.review.desc +
    +
    +
    -
    -
    -

    + + + - camera.reviewClassification.desc + camera.reviewClassification.title -

    -
    - - - camera.reviewClassification.readTheDocumentation - {" "} - - + + +
    +
    +

    + + camera.reviewClassification.desc + +

    +
    + + + camera.reviewClassification.readTheDocumentation + {" "} + + +
    +
    -
    -
    -
    - -
    0 && - "grid items-start gap-5 md:grid-cols-2", - )} - > - ( - - {zones && zones?.length > 0 ? ( - <> -
    - - - camera.review.alerts - - - - - - camera.reviewClassification.selectAlertsZones - - -
    -
    - {zones?.map((zone) => ( - { - return ( - - - { - setChangedValue(true); - return checked - ? field.onChange([ - ...field.value, - zone.name, - ]) - : field.onChange( - field.value?.filter( - (value) => - value !== zone.name, - ), - ); - }} - /> - - - {zone.name.replaceAll("_", " ")} - - - ); - }} - /> - ))} -
    - - ) : ( -
    - - camera.reviewClassification.noDefinedZones - -
    - )} - -
    - {watchedAlertsZones && watchedAlertsZones.length > 0 - ? t( - "camera.reviewClassification.zoneObjectAlertsTips", - { - alertsLabels, - zone: watchedAlertsZones - .map((zone) => - capitalizeFirstLetter(zone).replaceAll( - "_", - " ", - ), - ) - .join(", "), - cameraName: capitalizeFirstLetter( - cameraConfig?.name ?? "", - ).replaceAll("_", " "), - }, - ) - : t("camera.reviewClassification.objectAlertsTips", { - alertsLabels, - cameraName: capitalizeFirstLetter( - cameraConfig?.name ?? "", - ).replaceAll("_", " "), - })} -
    -
    - )} - /> - - ( - - {zones && zones?.length > 0 && ( - <> -
    - - - camera.review.detections - - - - {selectDetections && ( - - - camera.reviewClassification.selectDetectionsZones - - - )} -
    - - {selectDetections && ( -
    - {zones?.map((zone) => ( - { - return ( + + +
    0 && + "grid items-start gap-5 md:grid-cols-2", + )} + > + ( + + {zones && zones?.length > 0 ? ( + <> +
    + + + camera.review.alerts + + + + + + camera.reviewClassification.selectAlertsZones + + +
    +
    + {zones?.map((zone) => ( + ( { + setChangedValue(true); return checked ? field.onChange([ ...field.value, @@ -545,126 +507,258 @@ export default function CameraSettingsView({ {zone.name.replaceAll("_", " ")} - ); - }} - /> - ))} + )} + /> + ))} +
    + + ) : ( +
    + + camera.reviewClassification.noDefinedZones +
    )} - -
    - -
    - -
    +
    + {watchedAlertsZones && watchedAlertsZones.length > 0 + ? t( + "camera.reviewClassification.zoneObjectAlertsTips", + { + alertsLabels, + zone: watchedAlertsZones + .map((zone) => + capitalizeFirstLetter(zone).replaceAll( + "_", + " ", + ), + ) + .join(", "), + cameraName: capitalizeFirstLetter( + cameraConfig?.name ?? "", + ).replaceAll("_", " "), + }, + ) + : t( + "camera.reviewClassification.objectAlertsTips", + { + alertsLabels, + cameraName: capitalizeFirstLetter( + cameraConfig?.name ?? "", + ).replaceAll("_", " "), + }, + )}
    - + )} + /> -
    - {watchedDetectionsZones && - watchedDetectionsZones.length > 0 ? ( - !selectDetections ? ( - - capitalizeFirstLetter(zone).replaceAll( - "_", - " ", - ), - ) - .join(", "), - cameraName: capitalizeFirstLetter( - cameraConfig?.name ?? "", - ).replaceAll("_", " "), - }} - ns="views/settings" - > - ) : ( - - capitalizeFirstLetter(zone).replaceAll( - "_", - " ", - ), - ) - .join(", "), - cameraName: capitalizeFirstLetter( - cameraConfig?.name ?? "", - ).replaceAll("_", " "), - }} - ns="views/settings" - /> - ) - ) : ( - - )} -
    - + ( + + {zones && zones?.length > 0 && ( + <> +
    + + + camera.review.detections + + + + {selectDetections && ( + + + camera.reviewClassification.selectDetectionsZones + + + )} +
    + + {selectDetections && ( +
    + {zones?.map((zone) => ( + ( + + + { + return checked + ? field.onChange([ + ...field.value, + zone.name, + ]) + : field.onChange( + field.value?.filter( + (value) => + value !== zone.name, + ), + ); + }} + /> + + + {zone.name.replaceAll("_", " ")} + + + )} + /> + ))} +
    + )} + + +
    + +
    + +
    +
    + + )} + +
    + {watchedDetectionsZones && + watchedDetectionsZones.length > 0 ? ( + !selectDetections ? ( + + capitalizeFirstLetter(zone).replaceAll( + "_", + " ", + ), + ) + .join(", "), + cameraName: capitalizeFirstLetter( + cameraConfig?.name ?? "", + ).replaceAll("_", " "), + }} + ns="views/settings" + /> + ) : ( + + capitalizeFirstLetter(zone).replaceAll( + "_", + " ", + ), + ) + .join(", "), + cameraName: capitalizeFirstLetter( + cameraConfig?.name ?? "", + ).replaceAll("_", " "), + }} + ns="views/settings" + /> + ) + ) : ( + + )} +
    +
    + )} + /> +
    + + +
    + + +
    + + + + ) : ( + <> +
    + +
    +
    +
    - - -
    - - -
    - - + + )}
    From 4deccf08a1051b9cca4852cf2a3cc11dd0108afe Mon Sep 17 00:00:00 2001 From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> Date: Fri, 13 Jun 2025 09:43:38 -0500 Subject: [PATCH 058/144] Ensure logging config is propagated to forked processes (#18704) * Move log level initialization to log * Use logger config * Formatting * Fix config order * Set process names --------- Co-authored-by: Nicolas Mowen --- frigate/app.py | 1 + frigate/config/logger.py | 29 ++++------------------------- frigate/embeddings/__init__.py | 2 +- frigate/events/audio.py | 2 +- frigate/log.py | 26 ++++++++++++++++++++++++++ frigate/object_detection/base.py | 8 +++++++- frigate/output/output.py | 2 +- frigate/record/record.py | 2 +- frigate/review/review.py | 2 +- frigate/util/process.py | 12 +++++++++++- 10 files changed, 54 insertions(+), 32 deletions(-) diff --git a/frigate/app.py b/frigate/app.py index 010f311b9..687a06be4 100644 --- a/frigate/app.py +++ b/frigate/app.py @@ -387,6 +387,7 @@ class FrigateApp: name, self.detection_queue, list(self.config.cameras.keys()), + self.config, detector_config, ) diff --git a/frigate/config/logger.py b/frigate/config/logger.py index a3eed23d0..0ba3e6972 100644 --- a/frigate/config/logger.py +++ b/frigate/config/logger.py @@ -1,20 +1,11 @@ -import logging -from enum import Enum - from pydantic import Field, ValidationInfo, model_validator from typing_extensions import Self +from frigate.log import LogLevel, apply_log_levels + from .base import FrigateBaseModel -__all__ = ["LoggerConfig", "LogLevel"] - - -class LogLevel(str, Enum): - debug = "debug" - info = "info" - warning = "warning" - error = "error" - critical = "critical" +__all__ = ["LoggerConfig"] class LoggerConfig(FrigateBaseModel): @@ -26,18 +17,6 @@ class LoggerConfig(FrigateBaseModel): @model_validator(mode="after") def post_validation(self, info: ValidationInfo) -> Self: if isinstance(info.context, dict) and info.context.get("install", False): - logging.getLogger().setLevel(self.default.value.upper()) - - log_levels = { - "absl": LogLevel.error, - "httpx": LogLevel.error, - "tensorflow": LogLevel.error, - "werkzeug": LogLevel.error, - "ws4py": LogLevel.error, - **self.logs, - } - - for log, level in log_levels.items(): - logging.getLogger(log).setLevel(level.value.upper()) + apply_log_levels(self.default.value.upper(), self.logs) return self diff --git a/frigate/embeddings/__init__.py b/frigate/embeddings/__init__.py index 054f2c334..9c72bcd03 100644 --- a/frigate/embeddings/__init__.py +++ b/frigate/embeddings/__init__.py @@ -36,7 +36,7 @@ class EmbeddingProcess(FrigateProcess): self.metrics = metrics def run(self) -> None: - self.pre_run_setup() + self.pre_run_setup(self.config.logger) maintainer = EmbeddingMaintainer( self.config, self.metrics, diff --git a/frigate/events/audio.py b/frigate/events/audio.py index 791ba80e4..d7242cf2b 100644 --- a/frigate/events/audio.py +++ b/frigate/events/audio.py @@ -100,7 +100,7 @@ class AudioProcessor(util.Process): self.transcription_model_runner = None def run(self) -> None: - self.pre_run_setup() + self.pre_run_setup(self.config.logger) audio_threads: list[AudioEventMaintainer] = [] threading.current_thread().name = "process:audio_manager" diff --git a/frigate/log.py b/frigate/log.py index f535a278c..2e9c781f1 100644 --- a/frigate/log.py +++ b/frigate/log.py @@ -5,6 +5,7 @@ import os import sys import threading from collections import deque +from enum import Enum from logging.handlers import QueueHandler, QueueListener from multiprocessing.managers import SyncManager from queue import Queue @@ -33,6 +34,15 @@ LOG_HANDLER.addFilter( not in record.getMessage() ) + +class LogLevel(str, Enum): + debug = "debug" + info = "info" + warning = "warning" + error = "error" + critical = "critical" + + log_listener: Optional[QueueListener] = None log_queue: Optional[Queue] = None @@ -61,6 +71,22 @@ def _stop_logging() -> None: log_listener = None +def apply_log_levels(default: str, log_levels: dict[str, LogLevel]) -> None: + logging.getLogger().setLevel(default) + + log_levels = { + "absl": LogLevel.error, + "httpx": LogLevel.error, + "tensorflow": LogLevel.error, + "werkzeug": LogLevel.error, + "ws4py": LogLevel.error, + **log_levels, + } + + for log, level in log_levels.items(): + logging.getLogger(log).setLevel(level.value.upper()) + + # When a multiprocessing.Process exits, python tries to flush stdout and stderr. However, if the # process is created after a thread (for example a logging thread) is created and the process fork # happens while an internal lock is held, the stdout/err flush can cause a deadlock. diff --git a/frigate/object_detection/base.py b/frigate/object_detection/base.py index d203e8574..e86b1b036 100644 --- a/frigate/object_detection/base.py +++ b/frigate/object_detection/base.py @@ -12,6 +12,7 @@ from frigate.comms.object_detector_signaler import ( ObjectDetectorPublisher, ObjectDetectorSubscriber, ) +from frigate.config import FrigateConfig from frigate.detectors import create_detector from frigate.detectors.detector_config import ( BaseDetectorConfig, @@ -92,6 +93,7 @@ class DetectorRunner(util.Process): cameras: list[str], avg_speed: Value, start_time: Value, + config: FrigateConfig, detector_config: BaseDetectorConfig, ) -> None: super().__init__(name=name, daemon=True) @@ -99,6 +101,7 @@ class DetectorRunner(util.Process): self.cameras = cameras self.avg_speed = avg_speed self.start_time = start_time + self.config = config self.detector_config = detector_config self.outputs: dict = {} @@ -108,7 +111,7 @@ class DetectorRunner(util.Process): self.outputs[name] = {"shm": out_shm, "np": out_np} def run(self) -> None: - self.pre_run_setup() + self.pre_run_setup(self.config.logger) frame_manager = SharedMemoryFrameManager() object_detector = LocalObjectDetector(detector_config=self.detector_config) @@ -161,6 +164,7 @@ class ObjectDetectProcess: name: str, detection_queue: Queue, cameras: list[str], + config: FrigateConfig, detector_config: BaseDetectorConfig, ): self.name = name @@ -169,6 +173,7 @@ class ObjectDetectProcess: self.avg_inference_speed = Value("d", 0.01) self.detection_start = Value("d", 0.0) self.detect_process: util.Process | None = None + self.config = config self.detector_config = detector_config self.start_or_restart() @@ -195,6 +200,7 @@ class ObjectDetectProcess: self.cameras, self.avg_inference_speed, self.detection_start, + self.config, self.detector_config, ) self.detect_process.start() diff --git a/frigate/output/output.py b/frigate/output/output.py index 8c60e51c7..0cb8a649f 100644 --- a/frigate/output/output.py +++ b/frigate/output/output.py @@ -77,7 +77,7 @@ class OutputProcess(util.Process): self.config = config def run(self) -> None: - self.pre_run_setup() + self.pre_run_setup(self.config.logger) frame_manager = SharedMemoryFrameManager() diff --git a/frigate/record/record.py b/frigate/record/record.py index 40a943a43..153560a11 100644 --- a/frigate/record/record.py +++ b/frigate/record/record.py @@ -18,7 +18,7 @@ class RecordProcess(FrigateProcess): self.config = config def run(self) -> None: - self.pre_run_setup() + self.pre_run_setup(self.config.logger) db = SqliteQueueDatabase( self.config.database.path, pragmas={ diff --git a/frigate/review/review.py b/frigate/review/review.py index 00910e439..e687f4f45 100644 --- a/frigate/review/review.py +++ b/frigate/review/review.py @@ -15,7 +15,7 @@ class ReviewProcess(util.Process): self.config = config def run(self) -> None: - self.pre_run_setup() + self.pre_run_setup(self.config.logger) maintainer = ReviewSegmentMaintainer( self.config, self.stop_event, diff --git a/frigate/util/process.py b/frigate/util/process.py index 3501e585e..6e3459c6b 100644 --- a/frigate/util/process.py +++ b/frigate/util/process.py @@ -7,7 +7,10 @@ import threading from logging.handlers import QueueHandler from typing import Callable, Optional +from setproctitle import setproctitle + import frigate.log +from frigate.config.logger import LoggerConfig class BaseProcess(mp.Process): @@ -50,7 +53,9 @@ class Process(BaseProcess): def before_start(self) -> None: self.__log_queue = frigate.log.log_listener.queue - def pre_run_setup(self) -> None: + def pre_run_setup(self, logConfig: LoggerConfig | None = None) -> None: + setproctitle(self.name) + threading.current_thread().name = f"process:{self.name}" faulthandler.enable() def receiveSignal(signalNumber, frame): @@ -68,3 +73,8 @@ class Process(BaseProcess): self.logger = logging.getLogger(self.name) logging.basicConfig(handlers=[], force=True) logging.getLogger().addHandler(QueueHandler(self.__log_queue)) + + if logConfig: + frigate.log.apply_log_levels( + logConfig.default.value.upper(), logConfig.logs + ) From e832bb4badbf7ad015d8dfba9e2242cb114b3860 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Fri, 13 Jun 2025 11:09:51 -0600 Subject: [PATCH 059/144] Fix go2rtc init (#18708) * Cleanup process handling * Adjust process name --- benchmark.py | 4 ++-- frigate/embeddings/__init__.py | 2 +- frigate/events/audio.py | 4 ++-- frigate/object_detection/base.py | 8 ++++---- frigate/output/output.py | 4 ++-- frigate/record/record.py | 2 +- frigate/review/review.py | 4 ++-- frigate/util/__init__.py | 3 --- frigate/util/classification.py | 4 ++-- frigate/util/process.py | 2 +- frigate/video.py | 6 +++--- 11 files changed, 20 insertions(+), 23 deletions(-) diff --git a/benchmark.py b/benchmark.py index 1f39302a7..46adc59df 100755 --- a/benchmark.py +++ b/benchmark.py @@ -4,13 +4,13 @@ from statistics import mean import numpy as np -import frigate.util as util from frigate.config import DetectorTypeEnum from frigate.object_detection.base import ( ObjectDetectProcess, RemoteObjectDetector, load_labels, ) +from frigate.util.process import FrigateProcess my_frame = np.expand_dims(np.full((300, 300, 3), 1, np.uint8), axis=0) labels = load_labels("/labelmap.txt") @@ -91,7 +91,7 @@ edgetpu_process_2 = ObjectDetectProcess( ) for x in range(0, 10): - camera_process = util.Process( + camera_process = FrigateProcess( target=start, args=(x, 300, detection_queue, events[str(x)]) ) camera_process.daemon = True diff --git a/frigate/embeddings/__init__.py b/frigate/embeddings/__init__.py index 9c72bcd03..cb897ed4b 100644 --- a/frigate/embeddings/__init__.py +++ b/frigate/embeddings/__init__.py @@ -17,9 +17,9 @@ from frigate.const import CONFIG_DIR, FACE_DIR from frigate.data_processing.types import DataProcessorMetrics from frigate.db.sqlitevecq import SqliteVecQueueDatabase from frigate.models import Event -from frigate.util import Process as FrigateProcess from frigate.util.builtin import serialize from frigate.util.classification import kickoff_model_training +from frigate.util.process import FrigateProcess from .maintainer import EmbeddingMaintainer from .util import ZScoreNormalization diff --git a/frigate/events/audio.py b/frigate/events/audio.py index d7242cf2b..7f94c2a00 100644 --- a/frigate/events/audio.py +++ b/frigate/events/audio.py @@ -11,7 +11,6 @@ from typing import Any, Tuple import numpy as np -import frigate.util as util from frigate.comms.detections_updater import DetectionPublisher, DetectionTypeEnum from frigate.comms.event_metadata_updater import ( EventMetadataPublisher, @@ -40,6 +39,7 @@ from frigate.ffmpeg_presets import parse_preset_input from frigate.log import LogPipe from frigate.object_detection.base import load_labels from frigate.util.builtin import get_ffmpeg_arg_list +from frigate.util.process import FrigateProcess from frigate.video import start_or_restart_ffmpeg, stop_ffmpeg try: @@ -76,7 +76,7 @@ def get_ffmpeg_command(ffmpeg: FfmpegConfig) -> list[str]: ) -class AudioProcessor(util.Process): +class AudioProcessor(FrigateProcess): name = "frigate.audio_manager" def __init__( diff --git a/frigate/object_detection/base.py b/frigate/object_detection/base.py index e86b1b036..2953f86eb 100644 --- a/frigate/object_detection/base.py +++ b/frigate/object_detection/base.py @@ -7,7 +7,6 @@ from multiprocessing.synchronize import Event as MpEvent import numpy as np -import frigate.util as util from frigate.comms.object_detector_signaler import ( ObjectDetectorPublisher, ObjectDetectorSubscriber, @@ -21,6 +20,7 @@ from frigate.detectors.detector_config import ( ) from frigate.util.builtin import EventsPerSecond, load_labels from frigate.util.image import SharedMemoryFrameManager, UntrackedSharedMemory +from frigate.util.process import FrigateProcess from .util import tensor_transform @@ -85,7 +85,7 @@ class LocalObjectDetector(ObjectDetector): return self.detect_api.detect_raw(tensor_input=tensor_input) -class DetectorRunner(util.Process): +class DetectorRunner(FrigateProcess): def __init__( self, name, @@ -172,7 +172,7 @@ class ObjectDetectProcess: self.detection_queue = detection_queue self.avg_inference_speed = Value("d", 0.01) self.detection_start = Value("d", 0.0) - self.detect_process: util.Process | None = None + self.detect_process: FrigateProcess | None = None self.config = config self.detector_config = detector_config self.start_or_restart() @@ -195,7 +195,7 @@ class ObjectDetectProcess: if (self.detect_process is not None) and self.detect_process.is_alive(): self.stop() self.detect_process = DetectorRunner( - f"detector:{self.name}", + f"frigate.detector:{self.name}", self.detection_queue, self.cameras, self.avg_inference_speed, diff --git a/frigate/output/output.py b/frigate/output/output.py index 0cb8a649f..da5906e78 100644 --- a/frigate/output/output.py +++ b/frigate/output/output.py @@ -14,7 +14,6 @@ from ws4py.server.wsgirefserver import ( ) from ws4py.server.wsgiutils import WebSocketWSGIApplication -import frigate.util as util from frigate.comms.detections_updater import DetectionSubscriber, DetectionTypeEnum from frigate.comms.ws import WebSocket from frigate.config import FrigateConfig @@ -27,6 +26,7 @@ from frigate.output.birdseye import Birdseye from frigate.output.camera import JsmpegCamera from frigate.output.preview import PreviewRecorder from frigate.util.image import SharedMemoryFrameManager, get_blank_yuv_frame +from frigate.util.process import FrigateProcess logger = logging.getLogger(__name__) @@ -71,7 +71,7 @@ def check_disabled_camera_update( birdseye.all_cameras_disabled() -class OutputProcess(util.Process): +class OutputProcess(FrigateProcess): def __init__(self, config: FrigateConfig) -> None: super().__init__(name="frigate.output", daemon=True) self.config = config diff --git a/frigate/record/record.py b/frigate/record/record.py index 153560a11..c52260745 100644 --- a/frigate/record/record.py +++ b/frigate/record/record.py @@ -7,7 +7,7 @@ from playhouse.sqliteq import SqliteQueueDatabase from frigate.config import FrigateConfig from frigate.models import Recordings, ReviewSegment from frigate.record.maintainer import RecordingMaintainer -from frigate.util import Process as FrigateProcess +from frigate.util.process import FrigateProcess logger = logging.getLogger(__name__) diff --git a/frigate/review/review.py b/frigate/review/review.py index e687f4f45..677d07776 100644 --- a/frigate/review/review.py +++ b/frigate/review/review.py @@ -2,14 +2,14 @@ import logging -import frigate.util as util from frigate.config import FrigateConfig from frigate.review.maintainer import ReviewSegmentMaintainer +from frigate.util.process import FrigateProcess logger = logging.getLogger(__name__) -class ReviewProcess(util.Process): +class ReviewProcess(FrigateProcess): def __init__(self, config: FrigateConfig) -> None: super().__init__(name="frigate.review_segment_manager", daemon=True) self.config = config diff --git a/frigate/util/__init__.py b/frigate/util/__init__.py index 307bf4f8b..e69de29bb 100644 --- a/frigate/util/__init__.py +++ b/frigate/util/__init__.py @@ -1,3 +0,0 @@ -from .process import Process - -__all__ = ["Process"] diff --git a/frigate/util/classification.py b/frigate/util/classification.py index 842f38fa2..c6e2b5878 100644 --- a/frigate/util/classification.py +++ b/frigate/util/classification.py @@ -14,7 +14,7 @@ from frigate.comms.embeddings_updater import EmbeddingsRequestEnum, EmbeddingsRe from frigate.comms.inter_process import InterProcessRequestor from frigate.const import CLIPS_DIR, MODEL_CACHE_DIR, UPDATE_MODEL_STATE from frigate.types import ModelStatusTypesEnum -from frigate.util import Process +from frigate.util.process import FrigateProcess BATCH_SIZE = 16 EPOCHS = 50 @@ -144,7 +144,7 @@ def kickoff_model_training( # run training in sub process so that # tensorflow will free CPU / GPU memory # upon training completion - training_process = Process( + training_process = FrigateProcess( target=__train_classification_model, name=f"model_training:{model_name}", args=(model_name,), diff --git a/frigate/util/process.py b/frigate/util/process.py index 6e3459c6b..9234a0ea6 100644 --- a/frigate/util/process.py +++ b/frigate/util/process.py @@ -39,7 +39,7 @@ class BaseProcess(mp.Process): pass -class Process(BaseProcess): +class FrigateProcess(BaseProcess): logger: logging.Logger @property diff --git a/frigate/video.py b/frigate/video.py index 2869c2bc2..98f3c7a8b 100755 --- a/frigate/video.py +++ b/frigate/video.py @@ -11,7 +11,6 @@ from typing import Any import cv2 -import frigate.util as util from frigate.camera import CameraMetrics, PTZMetrics from frigate.comms.inter_process import InterProcessRequestor from frigate.config import CameraConfig, DetectConfig, ModelConfig @@ -51,6 +50,7 @@ from frigate.util.object import ( is_object_filtered, reduce_detections, ) +from frigate.util.process import FrigateProcess logger = logging.getLogger(__name__) @@ -447,7 +447,7 @@ class CameraCaptureRunner(threading.Thread): ) -class CameraCapture(util.Process): +class CameraCapture(FrigateProcess): def __init__( self, config: CameraConfig, shm_frame_count: int, camera_metrics: CameraMetrics ) -> None: @@ -471,7 +471,7 @@ class CameraCapture(util.Process): camera_watchdog.join() -class CameraTracker(util.Process): +class CameraTracker(FrigateProcess): def __init__( self, config: CameraConfig, From ef060b97ca88ef41e3e425ed9615e4651a7dbbb2 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Fri, 13 Jun 2025 18:22:13 -0600 Subject: [PATCH 060/144] Reduce tf initialization --- frigate/util/classification.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/frigate/util/classification.py b/frigate/util/classification.py index c6e2b5878..2f5cc89f2 100644 --- a/frigate/util/classification.py +++ b/frigate/util/classification.py @@ -5,10 +5,6 @@ import sys import cv2 import numpy as np -import tensorflow as tf -from tensorflow.keras import layers, models, optimizers -from tensorflow.keras.applications import MobileNetV2 -from tensorflow.keras.preprocessing.image import ImageDataGenerator from frigate.comms.embeddings_updater import EmbeddingsRequestEnum, EmbeddingsRequestor from frigate.comms.inter_process import InterProcessRequestor @@ -44,6 +40,13 @@ def __generate_representative_dataset_factory(dataset_dir: str): @staticmethod def __train_classification_model(model_name: str) -> bool: """Train a classification model.""" + + # import in the function so that tensorflow is not initialized multiple times + import tensorflow as tf + from tensorflow.keras import layers, models, optimizers + from tensorflow.keras.applications import MobileNetV2 + from tensorflow.keras.preprocessing.image import ImageDataGenerator + dataset_dir = os.path.join(CLIPS_DIR, model_name, "dataset") model_dir = os.path.join(MODEL_CACHE_DIR, model_name) num_classes = len( From 2f4d7353f4125cb5dc8b25d1a362a420735b73d4 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Sat, 14 Jun 2025 09:27:50 -0600 Subject: [PATCH 061/144] Don't use staticmethod --- frigate/util/classification.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/frigate/util/classification.py b/frigate/util/classification.py index 2f5cc89f2..a2ba1bf26 100644 --- a/frigate/util/classification.py +++ b/frigate/util/classification.py @@ -17,7 +17,6 @@ EPOCHS = 50 LEARNING_RATE = 0.001 -@staticmethod def __generate_representative_dataset_factory(dataset_dir: str): def generate_representative_dataset(): image_paths = [] @@ -37,7 +36,6 @@ def __generate_representative_dataset_factory(dataset_dir: str): return generate_representative_dataset -@staticmethod def __train_classification_model(model_name: str) -> bool: """Train a classification model.""" From 0d5a49ab82b64208cabdc2033b2f2d3f0d5e7212 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Tue, 17 Jun 2025 12:11:11 -0600 Subject: [PATCH 062/144] Don't fail on unicode debug for config updates --- frigate/comms/config_updater.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frigate/comms/config_updater.py b/frigate/comms/config_updater.py index 06b870c62..0486cbdd1 100644 --- a/frigate/comms/config_updater.py +++ b/frigate/comms/config_updater.py @@ -50,7 +50,7 @@ class ConfigSubscriber: return (topic, obj) else: return (None, None) - except zmq.ZMQError: + except (zmq.ZMQError, UnicodeDecodeError): return (None, None) def stop(self) -> None: From 847b03e71bb75b490c38a5307c6e838c8b5ad28a Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Fri, 20 Jun 2025 06:44:48 -0600 Subject: [PATCH 063/144] Catch unpickling error --- frigate/comms/config_updater.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/frigate/comms/config_updater.py b/frigate/comms/config_updater.py index 0486cbdd1..866315d95 100644 --- a/frigate/comms/config_updater.py +++ b/frigate/comms/config_updater.py @@ -1,6 +1,7 @@ """Facilitates communication between processes.""" import multiprocessing as mp +from _pickle import UnpicklingError from multiprocessing.synchronize import Event as MpEvent from typing import Any, Optional @@ -50,7 +51,7 @@ class ConfigSubscriber: return (topic, obj) else: return (None, None) - except (zmq.ZMQError, UnicodeDecodeError): + except (zmq.ZMQError, UnicodeDecodeError, UnpicklingError): return (None, None) def stop(self) -> None: From 7c8164aa993302123fde92501b70a5a4ed62f6d1 Mon Sep 17 00:00:00 2001 From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> Date: Sat, 21 Jun 2025 15:38:34 -0500 Subject: [PATCH 064/144] Fix birdseye crash when dynamically adding a camera (#18821) --- frigate/app.py | 1 + frigate/camera/maintainer.py | 6 ++- frigate/output/birdseye.py | 73 ++++++++++++++++++++++++------------ frigate/output/output.py | 8 +++- 4 files changed, 60 insertions(+), 28 deletions(-) diff --git a/frigate/app.py b/frigate/app.py index 687a06be4..48d36988f 100644 --- a/frigate/app.py +++ b/frigate/app.py @@ -425,6 +425,7 @@ class FrigateApp: self.camera_metrics, self.ptz_metrics, self.stop_event, + self.metrics_manager, ) self.camera_maintainer.start() diff --git a/frigate/camera/maintainer.py b/frigate/camera/maintainer.py index dd978bbfc..dd122d4fd 100644 --- a/frigate/camera/maintainer.py +++ b/frigate/camera/maintainer.py @@ -6,7 +6,7 @@ import os import shutil import threading from multiprocessing import Queue -from multiprocessing.managers import DictProxy +from multiprocessing.managers import DictProxy, SyncManager from multiprocessing.synchronize import Event as MpEvent from frigate.camera import CameraMetrics, PTZMetrics @@ -35,6 +35,7 @@ class CameraMaintainer(threading.Thread): camera_metrics: DictProxy, ptz_metrics: dict[str, PTZMetrics], stop_event: MpEvent, + metrics_manager: SyncManager, ): super().__init__(name="camera_processor") self.config = config @@ -56,6 +57,7 @@ class CameraMaintainer(threading.Thread): self.shm_count = self.__calculate_shm_frame_count() self.camera_processes: dict[str, mp.Process] = {} self.capture_processes: dict[str, mp.Process] = {} + self.metrics_manager = metrics_manager def __init_historical_regions(self) -> None: # delete region grids for removed or renamed cameras @@ -128,7 +130,7 @@ class CameraMaintainer(threading.Thread): return if runtime: - self.camera_metrics[name] = CameraMetrics() + self.camera_metrics[name] = CameraMetrics(self.metrics_manager) self.ptz_metrics[name] = PTZMetrics(autotracker_enabled=False) self.region_grids[name] = get_camera_regions_grid( name, diff --git a/frigate/output/birdseye.py b/frigate/output/birdseye.py index a19436d5e..0939b5ce4 100644 --- a/frigate/output/birdseye.py +++ b/frigate/output/birdseye.py @@ -319,35 +319,48 @@ class BirdsEyeFrameManager: self.frame[:] = self.blank_frame self.cameras = {} - for camera, settings in self.config.cameras.items(): - # precalculate the coordinates for all the channels - y, u1, u2, v1, v2 = get_yuv_crop( - settings.frame_shape_yuv, - ( - 0, - 0, - settings.frame_shape[1], - settings.frame_shape[0], - ), - ) - self.cameras[camera] = { - "dimensions": [settings.detect.width, settings.detect.height], - "last_active_frame": 0.0, - "current_frame": 0.0, - "layout_frame": 0.0, - "channel_dims": { - "y": y, - "u1": u1, - "u2": u2, - "v1": v1, - "v2": v2, - }, - } + for camera in self.config.cameras.keys(): + self.add_camera(camera) self.camera_layout = [] self.active_cameras = set() self.last_output_time = 0.0 + def add_camera(self, cam: str): + """Add a camera to self.cameras with the correct structure.""" + settings = self.config.cameras[cam] + # precalculate the coordinates for all the channels + y, u1, u2, v1, v2 = get_yuv_crop( + settings.frame_shape_yuv, + ( + 0, + 0, + settings.frame_shape[1], + settings.frame_shape[0], + ), + ) + self.cameras[cam] = { + "dimensions": [ + settings.detect.width, + settings.detect.height, + ], + "last_active_frame": 0.0, + "current_frame": 0.0, + "layout_frame": 0.0, + "channel_dims": { + "y": y, + "u1": u1, + "u2": u2, + "v1": v1, + "v2": v2, + }, + } + + def remove_camera(self, cam: str): + """Remove a camera from self.cameras.""" + if cam in self.cameras: + del self.cameras[cam] + def clear_frame(self): logger.debug("Clearing the birdseye frame") self.frame[:] = self.blank_frame @@ -774,7 +787,7 @@ class Birdseye: self.broadcaster = BroadcastThread( "birdseye", self.converter, websocket_server, stop_event ) - self.birdseye_manager = BirdsEyeFrameManager(config, stop_event) + self.birdseye_manager = BirdsEyeFrameManager(self.config, stop_event) self.frame_manager = SharedMemoryFrameManager() self.stop_event = stop_event self.requestor = InterProcessRequestor() @@ -804,6 +817,16 @@ class Birdseye: self.birdseye_manager.clear_frame() self.__send_new_frame() + def add_camera(self, camera: str) -> None: + """Add a camera to the birdseye manager.""" + self.birdseye_manager.add_camera(camera) + logger.debug(f"Added camera {camera} to birdseye") + + def remove_camera(self, camera: str) -> None: + """Remove a camera from the birdseye manager.""" + self.birdseye_manager.remove_camera(camera) + logger.debug(f"Removed camera {camera} from birdseye") + def write_data( self, camera: str, diff --git a/frigate/output/output.py b/frigate/output/output.py index da5906e78..f176b2e4c 100644 --- a/frigate/output/output.py +++ b/frigate/output/output.py @@ -133,7 +133,7 @@ class OutputProcess(FrigateProcess): # check if there is an updated config updates = config_subscriber.check_for_updates() - if "add" in updates: + if CameraConfigUpdateEnum.add in updates: for camera in updates["add"]: jsmpeg_cameras[camera] = JsmpegCamera( cam_config, self.stop_event, websocket_server @@ -141,6 +141,12 @@ class OutputProcess(FrigateProcess): preview_recorders[camera] = PreviewRecorder(cam_config) preview_write_times[camera] = 0 + if ( + self.config.birdseye.enabled + and self.config.cameras[camera].birdseye.enabled + ): + birdseye.add_camera(camera) + (topic, data) = detection_subscriber.check_for_update(timeout=1) now = datetime.datetime.now().timestamp() From 9c2ba152e1f4b7ef40853f2452cb289ad2181684 Mon Sep 17 00:00:00 2001 From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> Date: Sat, 21 Jun 2025 21:44:37 -0500 Subject: [PATCH 065/144] Catch invalid character index in lpr CTC decoder (#18825) --- frigate/data_processing/common/license_plate/mixin.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/frigate/data_processing/common/license_plate/mixin.py b/frigate/data_processing/common/license_plate/mixin.py index 2d63c1c69..7f6a27c62 100644 --- a/frigate/data_processing/common/license_plate/mixin.py +++ b/frigate/data_processing/common/license_plate/mixin.py @@ -1610,9 +1610,9 @@ class CTCDecoder: self.characters = [] if character_dict_path and os.path.exists(character_dict_path): with open(character_dict_path, "r", encoding="utf-8") as f: - self.characters = ["blank"] + [ - line.strip() for line in f if line.strip() - ] + self.characters = ( + ["blank"] + [line.strip() for line in f if line.strip()] + [" "] + ) else: self.characters = [ "blank", @@ -1747,7 +1747,7 @@ class CTCDecoder: merged_path.append(char_index) merged_probs.append(seq_log_probs[t, char_index]) - result = "".join(self.char_map[idx] for idx in merged_path) + result = "".join(self.char_map.get(idx, "") for idx in merged_path) results.append(result) confidence = np.exp(merged_probs).tolist() From 3327be05ea98a587e7faceeb09a3244cb2a140ef Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Mon, 23 Jun 2025 08:40:28 -0600 Subject: [PATCH 066/144] Classification model cover images (#18843) * Move to separate component * Add cover images for clssification models --- .../classification/ModelSelectionView.tsx | 74 ++++++++++++++----- 1 file changed, 54 insertions(+), 20 deletions(-) diff --git a/web/src/views/classification/ModelSelectionView.tsx b/web/src/views/classification/ModelSelectionView.tsx index 63133842a..aa2f94c6a 100644 --- a/web/src/views/classification/ModelSelectionView.tsx +++ b/web/src/views/classification/ModelSelectionView.tsx @@ -1,3 +1,4 @@ +import { baseUrl } from "@/api/baseUrl"; import ActivityIndicator from "@/components/indicators/activity-indicator"; import { cn } from "@/lib/utils"; import { @@ -37,27 +38,60 @@ export default function ModelSelectionView({ return (
    {classificationConfigs.map((config) => ( -
    onClick(config)} - onContextMenu={() => { - // e.stopPropagation(); - // e.preventDefault(); - // handleClickEvent(true); - }} - > -
    -
    - {config.name} ({config.state_config != null ? "State" : "Object"}{" "} - Classification) -
    -
    + onClick(config)} /> ))}
    ); } + +type ModelCardProps = { + config: CustomClassificationModelConfig; + onClick: () => void; +}; +function ModelCard({ config, onClick }: ModelCardProps) { + const { data: dataset } = useSWR<{ + [id: string]: string[]; + }>(`classification/${config.name}/dataset`, { revalidateOnFocus: false }); + + const coverImages = useMemo(() => { + if (!dataset) { + return {}; + } + + const imageMap: { [key: string]: string } = {}; + + for (const [key, imageList] of Object.entries(dataset)) { + if (imageList.length > 0) { + imageMap[key] = imageList[0]; + } + } + + return imageMap; + }, [dataset]); + + return ( +
    onClick()} + > +
    + {Object.entries(coverImages).map(([key, image]) => ( + + ))} +
    +
    + {config.name} ({config.state_config != null ? "State" : "Object"}{" "} + Classification) +
    +
    + ); +} From e1ee6f010fdcad2d3277eb9782f885f17d41bf1d Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Mon, 23 Jun 2025 15:55:57 -0600 Subject: [PATCH 067/144] Fix process name --- frigate/video.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/frigate/video.py b/frigate/video.py index 98f3c7a8b..e82faf268 100755 --- a/frigate/video.py +++ b/frigate/video.py @@ -55,7 +55,7 @@ from frigate.util.process import FrigateProcess logger = logging.getLogger(__name__) -def stop_ffmpeg(ffmpeg_process, logger): +def stop_ffmpeg(ffmpeg_process: sp.Popen[Any], logger: logging.Logger): logger.info("Terminating the existing ffmpeg process...") ffmpeg_process.terminate() try: @@ -451,7 +451,7 @@ class CameraCapture(FrigateProcess): def __init__( self, config: CameraConfig, shm_frame_count: int, camera_metrics: CameraMetrics ) -> None: - super().__init__(name=f"camera_capture:{config.name}", daemon=True) + super().__init__(name=f"frigate.capture:{config.name}", daemon=True) self.config = config self.shm_frame_count = shm_frame_count self.camera_metrics = camera_metrics @@ -483,7 +483,7 @@ class CameraTracker(FrigateProcess): ptz_metrics: PTZMetrics, region_grid: list[list[dict[str, Any]]], ) -> None: - super().__init__(name=f"camera_processor:{config.name}", daemon=True) + super().__init__(name=f"frigate.process:{config.name}", daemon=True) self.config = config self.model_config = model_config self.labelmap = labelmap From 542bf05bb8fa05b22df89785b8b5312a3fe79845 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Tue, 24 Jun 2025 11:41:11 -0600 Subject: [PATCH 068/144] Handle SIGINT with forkserver (#18860) * Pass stopevent from main start * Share stop event across processes * preload modules * remove explicit os._exit call --------- Co-authored-by: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> --- frigate/__main__.py | 20 +++++++++++++++++++- frigate/app.py | 19 ++++++++++--------- frigate/camera/maintainer.py | 5 ++++- frigate/embeddings/__init__.py | 8 ++++++-- frigate/events/audio.py | 4 +++- frigate/object_detection/base.py | 6 +++++- frigate/output/output.py | 5 +++-- frigate/record/record.py | 5 +++-- frigate/review/review.py | 5 +++-- frigate/util/process.py | 26 ++++---------------------- frigate/video.py | 11 ++++++++--- 11 files changed, 68 insertions(+), 46 deletions(-) diff --git a/frigate/__main__.py b/frigate/__main__.py index 6dd5d130e..f3181e494 100644 --- a/frigate/__main__.py +++ b/frigate/__main__.py @@ -23,6 +23,10 @@ def main() -> None: setup_logging(manager) threading.current_thread().name = "frigate" + stop_event = mp.Event() + + # send stop event on SIGINT + signal.signal(signal.SIGINT, lambda sig, frame: stop_event.set()) # Make sure we exit cleanly on SIGTERM. signal.signal(signal.SIGTERM, lambda sig, frame: sys.exit()) @@ -110,9 +114,23 @@ def main() -> None: sys.exit(0) # Run the main application. - FrigateApp(config, manager).start() + FrigateApp(config, manager, stop_event).start() if __name__ == "__main__": + mp.set_forkserver_preload( + [ + # Standard library and core dependencies + "sqlite3", + # Third-party libraries commonly used in Frigate + "numpy", + "cv2", + "peewee", + "zmq", + "ruamel.yaml", + # Frigate core modules + "frigate.camera.maintainer", + ] + ) mp.set_start_method("forkserver", force=True) main() diff --git a/frigate/app.py b/frigate/app.py index 48d36988f..9a662dd18 100644 --- a/frigate/app.py +++ b/frigate/app.py @@ -81,10 +81,12 @@ logger = logging.getLogger(__name__) class FrigateApp: - def __init__(self, config: FrigateConfig, manager: SyncManager) -> None: + def __init__( + self, config: FrigateConfig, manager: SyncManager, stop_event: MpEvent + ) -> None: self.metrics_manager = manager self.audio_process: Optional[mp.Process] = None - self.stop_event: MpEvent = mp.Event() + self.stop_event = stop_event self.detection_queue: Queue = mp.Queue() self.detectors: dict[str, ObjectDetectProcess] = {} self.detection_shms: list[mp.shared_memory.SharedMemory] = [] @@ -225,14 +227,14 @@ class FrigateApp: self.processes["go2rtc"] = proc.info["pid"] def init_recording_manager(self) -> None: - recording_process = RecordProcess(self.config) + recording_process = RecordProcess(self.config, self.stop_event) self.recording_process = recording_process recording_process.start() self.processes["recording"] = recording_process.pid or 0 logger.info(f"Recording process started: {recording_process.pid}") def init_review_segment_manager(self) -> None: - review_segment_process = ReviewProcess(self.config) + review_segment_process = ReviewProcess(self.config, self.stop_event) self.review_segment_process = review_segment_process review_segment_process.start() self.processes["review_segment"] = review_segment_process.pid or 0 @@ -252,8 +254,7 @@ class FrigateApp: return embedding_process = EmbeddingProcess( - self.config, - self.embeddings_metrics, + self.config, self.embeddings_metrics, self.stop_event ) self.embedding_process = embedding_process embedding_process.start() @@ -389,6 +390,7 @@ class FrigateApp: list(self.config.cameras.keys()), self.config, detector_config, + self.stop_event, ) def start_ptz_autotracker(self) -> None: @@ -412,7 +414,7 @@ class FrigateApp: self.detected_frames_processor.start() def start_video_output_processor(self) -> None: - output_processor = OutputProcess(self.config) + output_processor = OutputProcess(self.config, self.stop_event) self.output_processor = output_processor output_processor.start() logger.info(f"Output process started: {output_processor.pid}") @@ -438,7 +440,7 @@ class FrigateApp: if audio_cameras: self.audio_process = AudioProcessor( - self.config, audio_cameras, self.camera_metrics + self.config, audio_cameras, self.camera_metrics, self.stop_event ) self.audio_process.start() self.processes["audio_detector"] = self.audio_process.pid or 0 @@ -666,4 +668,3 @@ class FrigateApp: _stop_logging() self.metrics_manager.shutdown() - os._exit(os.EX_OK) diff --git a/frigate/camera/maintainer.py b/frigate/camera/maintainer.py index dd122d4fd..5bd97136c 100644 --- a/frigate/camera/maintainer.py +++ b/frigate/camera/maintainer.py @@ -165,6 +165,7 @@ class CameraMaintainer(threading.Thread): self.camera_metrics[name], self.ptz_metrics[name], self.region_grids[name], + self.stop_event, ) self.camera_processes[config.name] = camera_process camera_process.start() @@ -184,7 +185,9 @@ class CameraMaintainer(threading.Thread): frame_size = config.frame_shape_yuv[0] * config.frame_shape_yuv[1] self.frame_manager.create(f"{config.name}_frame{i}", frame_size) - capture_process = CameraCapture(config, count, self.camera_metrics[name]) + capture_process = CameraCapture( + config, count, self.camera_metrics[name], self.stop_event + ) capture_process.daemon = True self.capture_processes[name] = capture_process capture_process.start() diff --git a/frigate/embeddings/__init__.py b/frigate/embeddings/__init__.py index cb897ed4b..d4887e0d2 100644 --- a/frigate/embeddings/__init__.py +++ b/frigate/embeddings/__init__.py @@ -6,6 +6,7 @@ import logging import os import threading from json.decoder import JSONDecodeError +from multiprocessing.synchronize import Event as MpEvent from typing import Any, Union import regex @@ -29,9 +30,12 @@ logger = logging.getLogger(__name__) class EmbeddingProcess(FrigateProcess): def __init__( - self, config: FrigateConfig, metrics: DataProcessorMetrics | None + self, + config: FrigateConfig, + metrics: DataProcessorMetrics | None, + stop_event: MpEvent, ) -> None: - super().__init__(name="frigate.embeddings_manager", daemon=True) + super().__init__(stop_event, name="frigate.embeddings_manager", daemon=True) self.config = config self.metrics = metrics diff --git a/frigate/events/audio.py b/frigate/events/audio.py index 7f94c2a00..03c750a06 100644 --- a/frigate/events/audio.py +++ b/frigate/events/audio.py @@ -7,6 +7,7 @@ import string import threading import time from multiprocessing.managers import DictProxy +from multiprocessing.synchronize import Event as MpEvent from typing import Any, Tuple import numpy as np @@ -84,8 +85,9 @@ class AudioProcessor(FrigateProcess): config: FrigateConfig, cameras: list[CameraConfig], camera_metrics: DictProxy, + stop_event: MpEvent, ): - super().__init__(name="frigate.audio_manager", daemon=True) + super().__init__(stop_event, name="frigate.audio_manager", daemon=True) self.camera_metrics = camera_metrics self.cameras = cameras diff --git a/frigate/object_detection/base.py b/frigate/object_detection/base.py index 2953f86eb..32f33ffa5 100644 --- a/frigate/object_detection/base.py +++ b/frigate/object_detection/base.py @@ -95,8 +95,9 @@ class DetectorRunner(FrigateProcess): start_time: Value, config: FrigateConfig, detector_config: BaseDetectorConfig, + stop_event: MpEvent, ) -> None: - super().__init__(name=name, daemon=True) + super().__init__(stop_event, name=name, daemon=True) self.detection_queue = detection_queue self.cameras = cameras self.avg_speed = avg_speed @@ -166,6 +167,7 @@ class ObjectDetectProcess: cameras: list[str], config: FrigateConfig, detector_config: BaseDetectorConfig, + stop_event: MpEvent, ): self.name = name self.cameras = cameras @@ -175,6 +177,7 @@ class ObjectDetectProcess: self.detect_process: FrigateProcess | None = None self.config = config self.detector_config = detector_config + self.stop_event = stop_event self.start_or_restart() def stop(self): @@ -202,6 +205,7 @@ class ObjectDetectProcess: self.detection_start, self.config, self.detector_config, + self.stop_event, ) self.detect_process.start() diff --git a/frigate/output/output.py b/frigate/output/output.py index f176b2e4c..34c9e33c0 100644 --- a/frigate/output/output.py +++ b/frigate/output/output.py @@ -5,6 +5,7 @@ import logging import os import shutil import threading +from multiprocessing.synchronize import Event as MpEvent from wsgiref.simple_server import make_server from ws4py.server.wsgirefserver import ( @@ -72,8 +73,8 @@ def check_disabled_camera_update( class OutputProcess(FrigateProcess): - def __init__(self, config: FrigateConfig) -> None: - super().__init__(name="frigate.output", daemon=True) + def __init__(self, config: FrigateConfig, stop_event: MpEvent) -> None: + super().__init__(stop_event, name="frigate.output", daemon=True) self.config = config def run(self) -> None: diff --git a/frigate/record/record.py b/frigate/record/record.py index c52260745..b04a68e8c 100644 --- a/frigate/record/record.py +++ b/frigate/record/record.py @@ -1,6 +1,7 @@ """Run recording maintainer and cleanup.""" import logging +from multiprocessing.synchronize import Event as MpEvent from playhouse.sqliteq import SqliteQueueDatabase @@ -13,8 +14,8 @@ logger = logging.getLogger(__name__) class RecordProcess(FrigateProcess): - def __init__(self, config: FrigateConfig) -> None: - super().__init__(name="frigate.recording_manager", daemon=True) + def __init__(self, config: FrigateConfig, stop_event: MpEvent) -> None: + super().__init__(stop_event, name="frigate.recording_manager", daemon=True) self.config = config def run(self) -> None: diff --git a/frigate/review/review.py b/frigate/review/review.py index 677d07776..917a53a4b 100644 --- a/frigate/review/review.py +++ b/frigate/review/review.py @@ -1,6 +1,7 @@ """Run recording maintainer and cleanup.""" import logging +from multiprocessing.synchronize import Event as MpEvent from frigate.config import FrigateConfig from frigate.review.maintainer import ReviewSegmentMaintainer @@ -10,8 +11,8 @@ logger = logging.getLogger(__name__) class ReviewProcess(FrigateProcess): - def __init__(self, config: FrigateConfig) -> None: - super().__init__(name="frigate.review_segment_manager", daemon=True) + def __init__(self, config: FrigateConfig, stop_event: MpEvent) -> None: + super().__init__(stop_event, name="frigate.review_segment_manager", daemon=True) self.config = config def run(self) -> None: diff --git a/frigate/util/process.py b/frigate/util/process.py index 9234a0ea6..830818d4d 100644 --- a/frigate/util/process.py +++ b/frigate/util/process.py @@ -1,10 +1,9 @@ import faulthandler import logging import multiprocessing as mp -import signal -import sys import threading from logging.handlers import QueueHandler +from multiprocessing.synchronize import Event as MpEvent from typing import Callable, Optional from setproctitle import setproctitle @@ -16,6 +15,7 @@ from frigate.config.logger import LoggerConfig class BaseProcess(mp.Process): def __init__( self, + stop_event: MpEvent, *, name: Optional[str] = None, target: Optional[Callable] = None, @@ -23,6 +23,7 @@ class BaseProcess(mp.Process): kwargs: dict = {}, daemon: Optional[bool] = None, ): + self.stop_event = stop_event super().__init__( name=name, target=target, args=args, kwargs=kwargs, daemon=daemon ) @@ -42,14 +43,6 @@ class BaseProcess(mp.Process): class FrigateProcess(BaseProcess): logger: logging.Logger - @property - def stop_event(self) -> threading.Event: - # Lazily create the stop_event. This allows the signal handler to tell if anyone is - # monitoring the stop event, and to raise a SystemExit if not. - if "stop_event" not in self.__dict__: - self.__dict__["stop_event"] = threading.Event() - return self.__dict__["stop_event"] - def before_start(self) -> None: self.__log_queue = frigate.log.log_listener.queue @@ -58,18 +51,7 @@ class FrigateProcess(BaseProcess): threading.current_thread().name = f"process:{self.name}" faulthandler.enable() - def receiveSignal(signalNumber, frame): - # Get the stop_event through the dict to bypass lazy initialization. - stop_event = self.__dict__.get("stop_event") - if stop_event is not None: - # Someone is monitoring stop_event. We should set it. - stop_event.set() - else: - # Nobody is monitoring stop_event. We should raise SystemExit. - sys.exit() - - signal.signal(signal.SIGTERM, receiveSignal) - signal.signal(signal.SIGINT, receiveSignal) + # setup logging self.logger = logging.getLogger(self.name) logging.basicConfig(handlers=[], force=True) logging.getLogger().addHandler(QueueHandler(self.__log_queue)) diff --git a/frigate/video.py b/frigate/video.py index e82faf268..3bc2702a5 100755 --- a/frigate/video.py +++ b/frigate/video.py @@ -449,9 +449,13 @@ class CameraCaptureRunner(threading.Thread): class CameraCapture(FrigateProcess): def __init__( - self, config: CameraConfig, shm_frame_count: int, camera_metrics: CameraMetrics + self, + config: CameraConfig, + shm_frame_count: int, + camera_metrics: CameraMetrics, + stop_event: MpEvent, ) -> None: - super().__init__(name=f"frigate.capture:{config.name}", daemon=True) + super().__init__(stop_event, name=f"frigate.capture:{config.name}", daemon=True) self.config = config self.shm_frame_count = shm_frame_count self.camera_metrics = camera_metrics @@ -482,8 +486,9 @@ class CameraTracker(FrigateProcess): camera_metrics: CameraMetrics, ptz_metrics: PTZMetrics, region_grid: list[list[dict[str, Any]]], + stop_event: MpEvent, ) -> None: - super().__init__(name=f"frigate.process:{config.name}", daemon=True) + super().__init__(stop_event, name=f"frigate.process:{config.name}", daemon=True) self.config = config self.model_config = model_config self.labelmap = labelmap From da0248db15b4d539d606564260ddd30f4566a61a Mon Sep 17 00:00:00 2001 From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> Date: Tue, 24 Jun 2025 16:19:09 -0500 Subject: [PATCH 069/144] Don't try to close or join mp manager queues (#18866) Multiprocessing Manager queues don't have a close() or join_thread() method, and the Manager will clean it up appropriately after we empty it. This prevents an infinite loop when an AttributeError exception fires for Manager AutoProxy queue objects. --- frigate/util/builtin.py | 25 ++++++++++++++++--------- 1 file changed, 16 insertions(+), 9 deletions(-) diff --git a/frigate/util/builtin.py b/frigate/util/builtin.py index 90c0f9227..d4f8d7e37 100644 --- a/frigate/util/builtin.py +++ b/frigate/util/builtin.py @@ -5,7 +5,7 @@ import copy import datetime import logging import math -import multiprocessing as mp +import multiprocessing.queues import queue import re import shlex @@ -338,16 +338,23 @@ def clear_and_unlink(file: Path, missing_ok: bool = True) -> None: file.unlink(missing_ok=missing_ok) -def empty_and_close_queue(q: mp.Queue): +def empty_and_close_queue(q): while True: try: - try: - q.get(block=True, timeout=0.5) - except (queue.Empty, EOFError): - q.close() - q.join_thread() - return - except AttributeError: + q.get(block=True, timeout=0.5) + except (queue.Empty, EOFError): + break + except Exception as e: + logger.debug(f"Error while emptying queue: {e}") + break + + # close the queue if it is a multiprocessing queue + # manager proxy queues do not have close or join_thread method + if isinstance(q, multiprocessing.queues.Queue): + try: + q.close() + q.join_thread() + except Exception: pass From ec6c04e49aaff4c1913c8b7fd3fdccace5978940 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Wed, 25 Jun 2025 07:24:45 -0600 Subject: [PATCH 070/144] Improve logging (#18867) * Ignore numpy get limits warning * Add function wrapper to redirect stdout and stderr to logpipe * Save stderr too * Add more to catch * run logpipe * Use other logging redirect class * Use other logging redirect class * add decorator for redirecting c/c++ level output to logger * fix typing --------- Co-authored-by: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> --- docker/main/Dockerfile | 3 + frigate/data_processing/common/face/model.py | 2 + frigate/detectors/plugins/cpu_tfl.py | 2 + frigate/embeddings/onnx/face_embedding.py | 2 + frigate/events/audio.py | 6 +- frigate/log.py | 190 ++++++++++++++++++- frigate/util/classification.py | 19 +- 7 files changed, 206 insertions(+), 18 deletions(-) diff --git a/docker/main/Dockerfile b/docker/main/Dockerfile index 90e174d10..2f5db433b 100644 --- a/docker/main/Dockerfile +++ b/docker/main/Dockerfile @@ -224,6 +224,9 @@ ENV TRANSFORMERS_NO_ADVISORY_WARNINGS=1 # Set OpenCV ffmpeg loglevel to fatal: https://ffmpeg.org/doxygen/trunk/log_8h.html ENV OPENCV_FFMPEG_LOGLEVEL=8 +# Set NumPy to ignore getlimits warning +ENV PYTHONWARNINGS="ignore:::numpy.core.getlimits" + # Set HailoRT to disable logging ENV HAILORT_LOGGER_PATH=NONE diff --git a/frigate/data_processing/common/face/model.py b/frigate/data_processing/common/face/model.py index aea6751a0..f230a1b2c 100644 --- a/frigate/data_processing/common/face/model.py +++ b/frigate/data_processing/common/face/model.py @@ -11,6 +11,7 @@ from scipy import stats from frigate.config import FrigateConfig from frigate.const import MODEL_CACHE_DIR from frigate.embeddings.onnx.face_embedding import ArcfaceEmbedding, FaceNetEmbedding +from frigate.log import redirect_output_to_logger logger = logging.getLogger(__name__) @@ -37,6 +38,7 @@ class FaceRecognizer(ABC): def classify(self, face_image: np.ndarray) -> tuple[str, float] | None: pass + @redirect_output_to_logger(logger, logging.DEBUG) def init_landmark_detector(self) -> None: landmark_model = os.path.join(MODEL_CACHE_DIR, "facedet/landmarkdet.yaml") diff --git a/frigate/detectors/plugins/cpu_tfl.py b/frigate/detectors/plugins/cpu_tfl.py index fc8db0f4b..37cc10777 100644 --- a/frigate/detectors/plugins/cpu_tfl.py +++ b/frigate/detectors/plugins/cpu_tfl.py @@ -5,6 +5,7 @@ from typing_extensions import Literal from frigate.detectors.detection_api import DetectionApi from frigate.detectors.detector_config import BaseDetectorConfig +from frigate.log import redirect_output_to_logger from ..detector_utils import tflite_detect_raw, tflite_init @@ -27,6 +28,7 @@ class CpuDetectorConfig(BaseDetectorConfig): class CpuTfl(DetectionApi): type_key = DETECTOR_KEY + @redirect_output_to_logger(logger, logging.DEBUG) def __init__(self, detector_config: CpuDetectorConfig): interpreter = Interpreter( model_path=detector_config.model.path, diff --git a/frigate/embeddings/onnx/face_embedding.py b/frigate/embeddings/onnx/face_embedding.py index c0f35a581..acb4507a2 100644 --- a/frigate/embeddings/onnx/face_embedding.py +++ b/frigate/embeddings/onnx/face_embedding.py @@ -6,6 +6,7 @@ import os import numpy as np from frigate.const import MODEL_CACHE_DIR +from frigate.log import redirect_output_to_logger from frigate.util.downloader import ModelDownloader from .base_embedding import BaseEmbedding @@ -53,6 +54,7 @@ class FaceNetEmbedding(BaseEmbedding): self._load_model_and_utils() logger.debug(f"models are already downloaded for {self.model_name}") + @redirect_output_to_logger(logger, logging.DEBUG) def _load_model_and_utils(self): if self.runner is None: if self.downloader: diff --git a/frigate/events/audio.py b/frigate/events/audio.py index 03c750a06..f99e6fe41 100644 --- a/frigate/events/audio.py +++ b/frigate/events/audio.py @@ -37,7 +37,7 @@ from frigate.data_processing.real_time.audio_transcription import ( AudioTranscriptionRealTimeProcessor, ) from frigate.ffmpeg_presets import parse_preset_input -from frigate.log import LogPipe +from frigate.log import LogPipe, redirect_output_to_logger from frigate.object_detection.base import load_labels from frigate.util.builtin import get_ffmpeg_arg_list from frigate.util.process import FrigateProcess @@ -49,6 +49,9 @@ except ModuleNotFoundError: from tensorflow.lite.python.interpreter import Interpreter +logger = logging.getLogger(__name__) + + def get_ffmpeg_command(ffmpeg: FfmpegConfig) -> list[str]: ffmpeg_input: CameraInput = [i for i in ffmpeg.inputs if "audio" in i.roles][0] input_args = get_ffmpeg_arg_list(ffmpeg.global_args) + ( @@ -423,6 +426,7 @@ class AudioEventMaintainer(threading.Thread): class AudioTfl: + @redirect_output_to_logger(logger, logging.DEBUG) def __init__(self, stop_event: threading.Event, num_threads=2): self.stop_event = stop_event self.num_threads = num_threads diff --git a/frigate/log.py b/frigate/log.py index 2e9c781f1..11f2da254 100644 --- a/frigate/log.py +++ b/frigate/log.py @@ -1,15 +1,18 @@ # In log.py import atexit +import io import logging import os import sys import threading from collections import deque +from contextlib import contextmanager from enum import Enum +from functools import wraps from logging.handlers import QueueHandler, QueueListener from multiprocessing.managers import SyncManager -from queue import Queue -from typing import Deque, Optional +from queue import Empty, Queue +from typing import Any, Callable, Deque, Generator, Optional from frigate.util.builtin import clean_camera_user_pass @@ -102,11 +105,11 @@ os.register_at_fork(after_in_child=reopen_std_streams) # based on https://codereview.stackexchange.com/a/17959 class LogPipe(threading.Thread): - def __init__(self, log_name: str): + def __init__(self, log_name: str, level: int = logging.ERROR): """Setup the object with a logger and start the thread""" super().__init__(daemon=False) self.logger = logging.getLogger(log_name) - self.level = logging.ERROR + self.level = level self.deque: Deque[str] = deque(maxlen=100) self.fdRead, self.fdWrite = os.pipe() self.pipeReader = os.fdopen(self.fdRead) @@ -135,3 +138,182 @@ class LogPipe(threading.Thread): def close(self) -> None: """Close the write end of the pipe.""" os.close(self.fdWrite) + + +class LogRedirect(io.StringIO): + """ + A custom file-like object to capture stdout and process it. + It extends io.StringIO to capture output and then processes it + line by line. + """ + + def __init__(self, logger_instance: logging.Logger, level: int): + super().__init__() + self.logger = logger_instance + self.log_level = level + self._line_buffer: list[str] = [] + + def write(self, s: Any) -> int: + if not isinstance(s, str): + s = str(s) + + self._line_buffer.append(s) + + # Process output line by line if a newline is present + if "\n" in s: + full_output = "".join(self._line_buffer) + lines = full_output.splitlines(keepends=True) + self._line_buffer = [] + + for line in lines: + if line.endswith("\n"): + self._process_line(line.rstrip("\n")) + else: + self._line_buffer.append(line) + + return len(s) + + def _process_line(self, line: str) -> None: + self.logger.log(self.log_level, line) + + def flush(self) -> None: + if self._line_buffer: + full_output = "".join(self._line_buffer) + self._line_buffer = [] + if full_output: # Only process if there's content + self._process_line(full_output) + + def __enter__(self) -> "LogRedirect": + """Context manager entry point.""" + return self + + def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: + """Context manager exit point. Ensures buffered content is flushed.""" + self.flush() + + +@contextmanager +def redirect_fd_to_queue(queue: Queue[str]) -> Generator[None, None, None]: + """Redirect file descriptor 1 (stdout) to a pipe and capture output in a queue.""" + stdout_fd = os.dup(1) + read_fd, write_fd = os.pipe() + os.dup2(write_fd, 1) + os.close(write_fd) + + stop_event = threading.Event() + + def reader() -> None: + """Read from pipe and put lines in queue until stop_event is set.""" + try: + with os.fdopen(read_fd, "r") as pipe: + while not stop_event.is_set(): + line = pipe.readline() + if not line: # EOF + break + queue.put(line.strip()) + except OSError as e: + queue.put(f"Reader error: {e}") + finally: + if not stop_event.is_set(): + stop_event.set() + + reader_thread = threading.Thread(target=reader, daemon=False) + reader_thread.start() + + try: + yield + finally: + os.dup2(stdout_fd, 1) + os.close(stdout_fd) + stop_event.set() + reader_thread.join(timeout=1.0) + try: + os.close(read_fd) + except OSError: + pass + + +def redirect_output_to_logger(logger: logging.Logger, level: int) -> Any: + """Decorator to redirect both Python sys.stdout/stderr and C-level stdout to logger.""" + + def decorator(func: Callable) -> Callable: + @wraps(func) + def wrapper(*args: Any, **kwargs: Any) -> Any: + queue: Queue[str] = Queue() + + log_redirect = LogRedirect(logger, level) + old_stdout = sys.stdout + old_stderr = sys.stderr + sys.stdout = log_redirect + sys.stderr = log_redirect + + try: + # Redirect C-level stdout + with redirect_fd_to_queue(queue): + result = func(*args, **kwargs) + finally: + # Restore Python stdout/stderr + sys.stdout = old_stdout + sys.stderr = old_stderr + log_redirect.flush() + + # Log C-level output from queue + while True: + try: + logger.log(level, queue.get_nowait()) + except Empty: + break + + return result + + return wrapper + + return decorator + + +def suppress_os_output(func: Callable) -> Callable: + """ + A decorator that suppresses all output (stdout and stderr) + at the operating system file descriptor level for the decorated function. + This is useful for silencing noisy C/C++ libraries. + Note: This is a Unix-specific solution using os.dup2 and os.pipe. + It temporarily redirects file descriptors 1 (stdout) and 2 (stderr) + to a non-read pipe, effectively discarding their output. + """ + + @wraps(func) + def wrapper(*args: tuple, **kwargs: dict[str, Any]) -> Any: + # Save the original file descriptors for stdout (1) and stderr (2) + original_stdout_fd = os.dup(1) + original_stderr_fd = os.dup(2) + + # Create dummy pipes. We only need the write ends to redirect to. + # The data written to these pipes will be discarded as nothing + # will read from the read ends. + devnull_read_fd, devnull_write_fd = os.pipe() + + try: + # Redirect stdout (FD 1) and stderr (FD 2) to the write end of our dummy pipe + os.dup2(devnull_write_fd, 1) # Redirect stdout to devnull pipe + os.dup2(devnull_write_fd, 2) # Redirect stderr to devnull pipe + + # Execute the original function + result = func(*args, **kwargs) + + finally: + # Restore original stdout and stderr file descriptors (1 and 2) + # This is crucial to ensure normal printing resumes after the decorated function. + os.dup2(original_stdout_fd, 1) + os.dup2(original_stderr_fd, 2) + + # Close all duplicated and pipe file descriptors to prevent resource leaks. + # It's important to close the read end of the dummy pipe too, + # as nothing is explicitly reading from it. + os.close(original_stdout_fd) + os.close(original_stderr_fd) + os.close(devnull_read_fd) + os.close(devnull_write_fd) + + return result + + return wrapper diff --git a/frigate/util/classification.py b/frigate/util/classification.py index a2ba1bf26..3c030a986 100644 --- a/frigate/util/classification.py +++ b/frigate/util/classification.py @@ -1,7 +1,7 @@ """Util for classification models.""" +import logging import os -import sys import cv2 import numpy as np @@ -9,6 +9,7 @@ import numpy as np from frigate.comms.embeddings_updater import EmbeddingsRequestEnum, EmbeddingsRequestor from frigate.comms.inter_process import InterProcessRequestor from frigate.const import CLIPS_DIR, MODEL_CACHE_DIR, UPDATE_MODEL_STATE +from frigate.log import redirect_output_to_logger from frigate.types import ModelStatusTypesEnum from frigate.util.process import FrigateProcess @@ -16,6 +17,8 @@ BATCH_SIZE = 16 EPOCHS = 50 LEARNING_RATE = 0.001 +logger = logging.getLogger(__name__) + def __generate_representative_dataset_factory(dataset_dir: str): def generate_representative_dataset(): @@ -36,6 +39,7 @@ def __generate_representative_dataset_factory(dataset_dir: str): return generate_representative_dataset +@redirect_output_to_logger(logger, logging.DEBUG) def __train_classification_model(model_name: str) -> bool: """Train a classification model.""" @@ -55,14 +59,6 @@ def __train_classification_model(model_name: str) -> bool: ] ) - # TF and Keras are very loud with logging - # we want to avoid these logs so we - # temporarily redirect stdout / stderr - original_stdout = sys.stdout - original_stderr = sys.stderr - sys.stdout = open(os.devnull, "w") - sys.stderr = open(os.devnull, "w") - # Start with imagenet base model with 35% of channels in each layer base_model = MobileNetV2( input_shape=(224, 224, 3), @@ -124,10 +120,6 @@ def __train_classification_model(model_name: str) -> bool: with open(os.path.join(model_dir, "model.tflite"), "wb") as f: f.write(tflite_model) - # restore original stdout / stderr - sys.stdout = original_stdout - sys.stderr = original_stderr - @staticmethod def kickoff_model_training( @@ -146,6 +138,7 @@ def kickoff_model_training( # tensorflow will free CPU / GPU memory # upon training completion training_process = FrigateProcess( + None, target=__train_classification_model, name=f"model_training:{model_name}", args=(model_name,), From cf62bee1703495d324b30525acdcd8204179ff58 Mon Sep 17 00:00:00 2001 From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> Date: Wed, 25 Jun 2025 16:45:36 -0500 Subject: [PATCH 071/144] Add ONVIF focus support (#18883) * backend * frontend and i18n --- frigate/ptz/onvif.py | 75 ++++++++++++++++++++++++--- web/public/locales/en/views/live.json | 8 +++ web/src/types/ptz.ts | 9 +++- web/src/views/live/LiveCameraView.tsx | 40 +++++++++++++- 4 files changed, 123 insertions(+), 9 deletions(-) diff --git a/frigate/ptz/onvif.py b/frigate/ptz/onvif.py index 81c8b9852..bd5bef0b0 100644 --- a/frigate/ptz/onvif.py +++ b/frigate/ptz/onvif.py @@ -33,6 +33,8 @@ class OnvifCommandEnum(str, Enum): stop = "stop" zoom_in = "zoom_in" zoom_out = "zoom_out" + focus_in = "focus_in" + focus_out = "focus_out" class OnvifController: @@ -185,6 +187,16 @@ class OnvifController: ptz: ONVIFService = await onvif.create_ptz_service() self.cams[camera_name]["ptz"] = ptz + imaging: ONVIFService = await onvif.create_imaging_service() + self.cams[camera_name]["imaging"] = imaging + try: + video_sources = await media.GetVideoSources() + if video_sources and len(video_sources) > 0: + self.cams[camera_name]["video_source_token"] = video_sources[0].token + except (Fault, ONVIFError, TransportError, Exception) as e: + logger.debug(f"Unable to get video sources for {camera_name}: {e}") + self.cams[camera_name]["video_source_token"] = None + # setup continuous moving request move_request = ptz.create_type("ContinuousMove") move_request.ProfileToken = profile.token @@ -366,7 +378,19 @@ class OnvifController: f"Disabling autotracking zooming for {camera_name}: Absolute zoom not supported. Exception: {e}" ) - # set relative pan/tilt space for autotracker + if self.cams[camera_name]["video_source_token"] is not None: + try: + imaging_capabilities = await imaging.GetImagingSettings( + {"VideoSourceToken": self.cams[camera_name]["video_source_token"]} + ) + if ( + hasattr(imaging_capabilities, "Focus") + and imaging_capabilities.Focus + ): + supported_features.append("focus") + except (Fault, ONVIFError, TransportError, Exception) as e: + logger.debug(f"Focus not supported for {camera_name}: {e}") + if ( self.config.cameras[camera_name].onvif.autotracking.enabled_in_config and self.config.cameras[camera_name].onvif.autotracking.enabled @@ -391,6 +415,18 @@ class OnvifController: "Zoom": True, } ) + if ( + "focus" in self.cams[camera_name]["features"] + and self.cams[camera_name]["video_source_token"] + ): + try: + stop_request = self.cams[camera_name]["imaging"].create_type("Stop") + stop_request.VideoSourceToken = self.cams[camera_name][ + "video_source_token" + ] + await self.cams[camera_name]["imaging"].Stop(stop_request) + except (Fault, ONVIFError, TransportError, Exception) as e: + logger.warning(f"Failed to stop focus for {camera_name}: {e}") self.cams[camera_name]["active"] = False async def _move(self, camera_name: str, command: OnvifCommandEnum) -> None: @@ -599,6 +635,35 @@ class OnvifController: self.cams[camera_name]["active"] = False + async def _focus(self, camera_name: str, command: OnvifCommandEnum) -> None: + if self.cams[camera_name]["active"]: + logger.warning( + f"{camera_name} is already performing an action, not moving..." + ) + await self._stop(camera_name) + + if ( + "focus" not in self.cams[camera_name]["features"] + or not self.cams[camera_name]["video_source_token"] + ): + logger.error(f"{camera_name} does not support ONVIF continuous focus.") + return + + self.cams[camera_name]["active"] = True + move_request = self.cams[camera_name]["imaging"].create_type("Move") + move_request.VideoSourceToken = self.cams[camera_name]["video_source_token"] + move_request.Focus = { + "Continuous": { + "Speed": 0.5 if command == OnvifCommandEnum.focus_in else -0.5 + } + } + + try: + await self.cams[camera_name]["imaging"].Move(move_request) + except (Fault, ONVIFError, TransportError, Exception) as e: + logger.warning(f"Onvif sending focus request to {camera_name} failed: {e}") + self.cams[camera_name]["active"] = False + async def handle_command_async( self, camera_name: str, command: OnvifCommandEnum, param: str = "" ) -> None: @@ -622,11 +687,10 @@ class OnvifController: elif command == OnvifCommandEnum.move_relative: _, pan, tilt = param.split("_") await self._move_relative(camera_name, float(pan), float(tilt), 0, 1) - elif ( - command == OnvifCommandEnum.zoom_in - or command == OnvifCommandEnum.zoom_out - ): + elif command in (OnvifCommandEnum.zoom_in, OnvifCommandEnum.zoom_out): await self._zoom(camera_name, command) + elif command in (OnvifCommandEnum.focus_in, OnvifCommandEnum.focus_out): + await self._focus(camera_name, command) else: await self._move(camera_name, command) except (Fault, ONVIFError, TransportError, Exception) as e: @@ -637,7 +701,6 @@ class OnvifController: ) -> None: """ Handle ONVIF commands by scheduling them in the event loop. - This is the synchronous interface that schedules async work. """ future = asyncio.run_coroutine_threadsafe( self.handle_command_async(camera_name, command, param), self.loop diff --git a/web/public/locales/en/views/live.json b/web/public/locales/en/views/live.json index fea120601..2af399296 100644 --- a/web/public/locales/en/views/live.json +++ b/web/public/locales/en/views/live.json @@ -38,6 +38,14 @@ "label": "Zoom PTZ camera out" } }, + "focus": { + "in": { + "label": "Focus PTZ camera in" + }, + "out": { + "label": "Focus PTZ camera out" + } + }, "frame": { "center": { "label": "Click in the frame to center the PTZ camera" diff --git a/web/src/types/ptz.ts b/web/src/types/ptz.ts index 1a626972e..21a300b3d 100644 --- a/web/src/types/ptz.ts +++ b/web/src/types/ptz.ts @@ -1,4 +1,11 @@ -type PtzFeature = "pt" | "zoom" | "pt-r" | "zoom-r" | "zoom-a" | "pt-r-fov"; +type PtzFeature = + | "pt" + | "zoom" + | "pt-r" + | "zoom-r" + | "zoom-a" + | "pt-r-fov" + | "focus"; export type CameraPtzInfo = { name: string; diff --git a/web/src/views/live/LiveCameraView.tsx b/web/src/views/live/LiveCameraView.tsx index 9e9e0e974..69d4a26f4 100644 --- a/web/src/views/live/LiveCameraView.tsx +++ b/web/src/views/live/LiveCameraView.tsx @@ -92,6 +92,8 @@ import { LuX, } from "react-icons/lu"; import { + MdCenterFocusStrong, + MdCenterFocusWeak, MdClosedCaption, MdClosedCaptionDisabled, MdNoPhotography, @@ -809,10 +811,10 @@ function PtzControlPanel({ sendPtz("MOVE_DOWN"); break; case "+": - sendPtz("ZOOM_IN"); + sendPtz(modifiers.shift ? "FOCUS_IN" : "ZOOM_IN"); break; case "-": - sendPtz("ZOOM_OUT"); + sendPtz(modifiers.shift ? "FOCUS_OUT" : "ZOOM_OUT"); break; } }, @@ -923,6 +925,40 @@ function PtzControlPanel({ )} + {ptz?.features?.includes("focus") && ( + <> + { + e.preventDefault(); + sendPtz("FOCUS_IN"); + }} + onTouchStart={(e) => { + e.preventDefault(); + sendPtz("FOCUS_IN"); + }} + onMouseUp={onStop} + onTouchEnd={onStop} + > + + + { + e.preventDefault(); + sendPtz("FOCUS_OUT"); + }} + onTouchStart={(e) => { + e.preventDefault(); + sendPtz("FOCUS_OUT"); + }} + onMouseUp={onStop} + onTouchEnd={onStop} + > + + + + )} {ptz?.features?.includes("pt-r-fov") && ( From ceeb6543f53d735a8c890ba58e3e7a47ca3d054e Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Thu, 26 Jun 2025 07:32:48 -0600 Subject: [PATCH 072/144] 0.17 tweaks (#18892) * Set version * Cleanup more logs * Don't log matplotlib --- Makefile | 2 +- frigate/data_processing/real_time/bird.py | 2 ++ frigate/log.py | 5 +++-- 3 files changed, 6 insertions(+), 3 deletions(-) diff --git a/Makefile b/Makefile index e414ed65c..1c4e137a1 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ default_target: local COMMIT_HASH := $(shell git log -1 --pretty=format:"%h"|tail -1) -VERSION = 0.16.1 +VERSION = 0.17.0 IMAGE_REPO ?= ghcr.io/blakeblackshear/frigate GITHUB_REF_NAME ?= $(shell git rev-parse --abbrev-ref HEAD) BOARDS= #Initialized empty diff --git a/frigate/data_processing/real_time/bird.py b/frigate/data_processing/real_time/bird.py index 8d2c598fc..a51c7a7e8 100644 --- a/frigate/data_processing/real_time/bird.py +++ b/frigate/data_processing/real_time/bird.py @@ -13,6 +13,7 @@ from frigate.comms.event_metadata_updater import ( ) from frigate.config import FrigateConfig from frigate.const import MODEL_CACHE_DIR +from frigate.log import redirect_output_to_logger from frigate.util.object import calculate_region from ..types import DataProcessorMetrics @@ -76,6 +77,7 @@ class BirdRealTimeProcessor(RealTimeProcessorApi): except Exception as e: logger.error(f"Failed to download {path}: {e}") + @redirect_output_to_logger(logger, logging.DEBUG) def __build_detector(self) -> None: self.interpreter = Interpreter( model_path=os.path.join(MODEL_CACHE_DIR, "bird/bird.tflite"), diff --git a/frigate/log.py b/frigate/log.py index 11f2da254..f2171ffe0 100644 --- a/frigate/log.py +++ b/frigate/log.py @@ -80,6 +80,7 @@ def apply_log_levels(default: str, log_levels: dict[str, LogLevel]) -> None: log_levels = { "absl": LogLevel.error, "httpx": LogLevel.error, + "matplotlib": LogLevel.error, "tensorflow": LogLevel.error, "werkzeug": LogLevel.error, "ws4py": LogLevel.error, @@ -193,7 +194,7 @@ class LogRedirect(io.StringIO): @contextmanager -def redirect_fd_to_queue(queue: Queue[str]) -> Generator[None, None, None]: +def __redirect_fd_to_queue(queue: Queue[str]) -> Generator[None, None, None]: """Redirect file descriptor 1 (stdout) to a pipe and capture output in a queue.""" stdout_fd = os.dup(1) read_fd, write_fd = os.pipe() @@ -249,7 +250,7 @@ def redirect_output_to_logger(logger: logging.Logger, level: int) -> Any: try: # Redirect C-level stdout - with redirect_fd_to_queue(queue): + with __redirect_fd_to_queue(queue): result = func(*args, **kwargs) finally: # Restore Python stdout/stderr From 13fb7bc26092ddc726a64944f24dc662f3423d71 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Fri, 27 Jun 2025 06:28:40 -0600 Subject: [PATCH 073/144] Improve object classification (#18908) * Ui improvements * Improve image cropping and model saving * Improve naming * Add logs for training * Improve model labeling * Don't set sub label for none object classification * Cleanup --- .../real_time/custom_classification.py | 28 +++++++++++-------- frigate/util/classification.py | 1 + .../overlay/ClassificationSelectionDialog.tsx | 10 +++---- .../classification/ModelTrainingView.tsx | 22 ++++++++------- 4 files changed, 35 insertions(+), 26 deletions(-) diff --git a/frigate/data_processing/real_time/custom_classification.py b/frigate/data_processing/real_time/custom_classification.py index f153b5b92..fb1d31e89 100644 --- a/frigate/data_processing/real_time/custom_classification.py +++ b/frigate/data_processing/real_time/custom_classification.py @@ -187,7 +187,7 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi): super().__init__(config, metrics) self.model_config = model_config self.model_dir = os.path.join(MODEL_CACHE_DIR, self.model_config.name) - self.train_dir = os.path.join(self.model_dir, "train") + self.train_dir = os.path.join(CLIPS_DIR, self.model_config.name, "train") self.interpreter: Interpreter = None self.sub_label_publisher = sub_label_publisher self.tensor_input_details: dict[str, Any] = None @@ -232,20 +232,23 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi): obj_data["box"][1], obj_data["box"][2], obj_data["box"][3], - 224, + max( + obj_data["box"][1] - obj_data["box"][0], + obj_data["box"][3] - obj_data["box"][2], + ), 1.0, ) rgb = cv2.cvtColor(frame, cv2.COLOR_YUV2RGB_I420) - input = rgb[ + crop = rgb[ y:y2, x:x2, ] - if input.shape != (224, 224): - input = cv2.resize(input, (224, 224)) + if crop.shape != (224, 224): + crop = cv2.resize(crop, (224, 224)) - input = np.expand_dims(input, axis=0) + input = np.expand_dims(crop, axis=0) self.interpreter.set_tensor(self.tensor_input_details[0]["index"], input) self.interpreter.invoke() res: np.ndarray = self.interpreter.get_tensor( @@ -259,7 +262,7 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi): write_classification_attempt( self.train_dir, - cv2.cvtColor(frame, cv2.COLOR_RGB2BGR), + cv2.cvtColor(crop, cv2.COLOR_RGB2BGR), now, self.labelmap[best_id], score, @@ -269,12 +272,15 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi): logger.debug(f"Score {score} is worse than previous score {previous_score}") return - self.sub_label_publisher.publish( - EventMetadataTypeEnum.sub_label, - (obj_data["id"], self.labelmap[best_id], score), - ) + sub_label = self.labelmap[best_id] self.detected_objects[obj_data["id"]] = score + if sub_label != "none": + self.sub_label_publisher.publish( + EventMetadataTypeEnum.sub_label, + (obj_data["id"], sub_label, score), + ) + def handle_request(self, topic, request_data): if topic == EmbeddingsRequestEnum.reload_classification_model.value: if request_data.get("model_name") == self.model_config.name: diff --git a/frigate/util/classification.py b/frigate/util/classification.py index 3c030a986..6eab829f2 100644 --- a/frigate/util/classification.py +++ b/frigate/util/classification.py @@ -49,6 +49,7 @@ def __train_classification_model(model_name: str) -> bool: from tensorflow.keras.applications import MobileNetV2 from tensorflow.keras.preprocessing.image import ImageDataGenerator + logger.info(f"Kicking off classification training for {model_name}.") dataset_dir = os.path.join(CLIPS_DIR, model_name, "dataset") model_dir = os.path.join(MODEL_CACHE_DIR, model_name) num_classes = len( diff --git a/web/src/components/overlay/ClassificationSelectionDialog.tsx b/web/src/components/overlay/ClassificationSelectionDialog.tsx index 7cb8ca156..f86ced19a 100644 --- a/web/src/components/overlay/ClassificationSelectionDialog.tsx +++ b/web/src/components/overlay/ClassificationSelectionDialog.tsx @@ -82,7 +82,7 @@ export default function ClassificationSelectionDialog({ ); // control - const [newFace, setNewFace] = useState(false); + const [newClass, setNewClass] = useState(false); // components const Selector = isDesktop ? DropdownMenu : Drawer; @@ -98,10 +98,10 @@ export default function ClassificationSelectionDialog({ return (
    - {newFace && ( + {newClass && ( onCategorizeImage(newCat)} /> @@ -130,7 +130,7 @@ export default function ClassificationSelectionDialog({ > setNewFace(true)} + onClick={() => setNewClass(true)} > {t("createCategory.new")} @@ -142,7 +142,7 @@ export default function ClassificationSelectionDialog({ onClick={() => onCategorizeImage(category)} > - {category} + {category.replaceAll("_", " ")} ))}
    diff --git a/web/src/views/classification/ModelTrainingView.tsx b/web/src/views/classification/ModelTrainingView.tsx index 1f62a4f53..ea265bd51 100644 --- a/web/src/views/classification/ModelTrainingView.tsx +++ b/web/src/views/classification/ModelTrainingView.tsx @@ -375,7 +375,7 @@ function LibrarySelector({ }: LibrarySelectorProps) { const { t } = useTranslation(["views/classificationModel"]); const [confirmDelete, setConfirmDelete] = useState(null); - const [renameFace, setRenameFace] = useState(null); + const [renameClass, setRenameFace] = useState(null); const handleDeleteFace = useCallback( (name: string) => { @@ -390,9 +390,9 @@ function LibrarySelector({ const handleSetOpen = useCallback( (open: boolean) => { - setRenameFace(open ? renameFace : null); + setRenameFace(open ? renameClass : null); }, - [renameFace], + [renameClass], ); return ( @@ -428,15 +428,15 @@ function LibrarySelector({ { - onRename(renameFace!, newName); + onRename(renameClass!, newName); setRenameFace(null); }} - defaultValue={renameFace || ""} + defaultValue={renameClass || ""} regexPattern={/^[\p{L}\p{N}\s'_-]{1,50}$/u} regexErrorMessage={t("description.invalidName")} /> @@ -484,10 +484,10 @@ function LibrarySelector({ className="group flex items-center justify-between" >
    setPageToggle(id)} > - {id} + {id.replaceAll("_", " ")} ({dataset?.[id].length}) @@ -681,7 +681,9 @@ function TrainGrid({
    -
    {data.label}
    +
    + {data.label.replaceAll("_", " ")} +
    {data.score}%
    From f925154b8a63a7fda1d828594c7db0e79e9f8e89 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Fri, 27 Jun 2025 06:54:02 -0600 Subject: [PATCH 074/144] Remove TFLite init logs --- frigate/data_processing/real_time/custom_classification.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/frigate/data_processing/real_time/custom_classification.py b/frigate/data_processing/real_time/custom_classification.py index fb1d31e89..1e2b91a2d 100644 --- a/frigate/data_processing/real_time/custom_classification.py +++ b/frigate/data_processing/real_time/custom_classification.py @@ -17,6 +17,7 @@ from frigate.comms.inter_process import InterProcessRequestor from frigate.config import FrigateConfig from frigate.config.classification import CustomClassificationConfig from frigate.const import CLIPS_DIR, MODEL_CACHE_DIR +from frigate.log import redirect_output_to_logger from frigate.util.builtin import EventsPerSecond, InferenceSpeed, load_labels from frigate.util.object import box_overlaps, calculate_region @@ -55,6 +56,7 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi): self.last_run = datetime.datetime.now().timestamp() self.__build_detector() + @redirect_output_to_logger(logger, logging.DEBUG) def __build_detector(self) -> None: self.interpreter = Interpreter( model_path=os.path.join(self.model_dir, "model.tflite"), @@ -200,6 +202,7 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi): ) self.__build_detector() + @redirect_output_to_logger(logger, logging.DEBUG) def __build_detector(self) -> None: self.interpreter = Interpreter( model_path=os.path.join(self.model_dir, "model.tflite"), From 528f0d2b1f61a80700537a56e739b11a8506ac25 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Fri, 27 Jun 2025 08:35:02 -0600 Subject: [PATCH 075/144] Improve classification UI (#18910) * Move threhsold to base model config * Improve score handling * Add back button --- frigate/config/classification.py | 6 +-- .../real_time/custom_classification.py | 6 ++- web/src/types/frigateConfig.ts | 2 +- .../classification/ModelTrainingView.tsx | 48 ++++++++++++++----- 4 files changed, 46 insertions(+), 16 deletions(-) diff --git a/frigate/config/classification.py b/frigate/config/classification.py index 40a1183cd..c48ca489c 100644 --- a/frigate/config/classification.py +++ b/frigate/config/classification.py @@ -59,9 +59,6 @@ class CustomClassificationStateCameraConfig(FrigateBaseModel): crop: list[int, int, int, int] = Field( title="Crop of image frame on this camera to run classification on." ) - threshold: float = Field( - default=0.8, title="Classification score threshold to change the state." - ) class CustomClassificationStateConfig(FrigateBaseModel): @@ -86,6 +83,9 @@ class CustomClassificationObjectConfig(FrigateBaseModel): class CustomClassificationConfig(FrigateBaseModel): enabled: bool = Field(default=True, title="Enable running the model.") name: str | None = Field(default=None, title="Name of classification model.") + threshold: float = Field( + default=0.8, title="Classification score threshold to change the state." + ) object_config: CustomClassificationObjectConfig | None = Field(default=None) state_config: CustomClassificationStateConfig | None = Field(default=None) diff --git a/frigate/data_processing/real_time/custom_classification.py b/frigate/data_processing/real_time/custom_classification.py index 1e2b91a2d..05a555701 100644 --- a/frigate/data_processing/real_time/custom_classification.py +++ b/frigate/data_processing/real_time/custom_classification.py @@ -152,7 +152,7 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi): score, ) - if score >= camera_config.threshold: + if score >= self.model_config.threshold: self.requestor.send_data( f"{camera}/classification/{self.model_config.name}", self.labelmap[best_id], @@ -271,6 +271,10 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi): score, ) + if score < self.model_config.threshold: + logger.debug(f"Score {score} is less than threshold.") + return + if score <= previous_score: logger.debug(f"Score {score} is worse than previous score {previous_score}") return diff --git a/web/src/types/frigateConfig.ts b/web/src/types/frigateConfig.ts index 3ccc5b06d..7d4c27794 100644 --- a/web/src/types/frigateConfig.ts +++ b/web/src/types/frigateConfig.ts @@ -282,6 +282,7 @@ export type CameraStreamingSettings = { export type CustomClassificationModelConfig = { enabled: boolean; name: string; + threshold: number; object_config: null | { objects: string[]; }; @@ -289,7 +290,6 @@ export type CustomClassificationModelConfig = { cameras: { [cameraName: string]: { crop: [number, number, number, number]; - threshold: number; }; }; motion: boolean; diff --git a/web/src/views/classification/ModelTrainingView.tsx b/web/src/views/classification/ModelTrainingView.tsx index ea265bd51..14de1a118 100644 --- a/web/src/views/classification/ModelTrainingView.tsx +++ b/web/src/views/classification/ModelTrainingView.tsx @@ -48,12 +48,15 @@ import { TbCategoryPlus } from "react-icons/tb"; import { useModelState } from "@/api/ws"; import { ModelState } from "@/types/ws"; import ActivityIndicator from "@/components/indicators/activity-indicator"; +import { useNavigate } from "react-router-dom"; +import { IoMdArrowRoundBack } from "react-icons/io"; type ModelTrainingViewProps = { model: CustomClassificationModelConfig; }; export default function ModelTrainingView({ model }: ModelTrainingViewProps) { const { t } = useTranslation(["views/classificationModel"]); + const navigate = useNavigate(); const [page, setPage] = useState("train"); const [pageToggle, setPageToggle] = useOptimisticState(page, setPage, 100); @@ -294,14 +297,28 @@ export default function ModelTrainingView({ model }: ModelTrainingViewProps) {
    - {}} - /> +
    + + {}} + /> +
    {selectedImages?.length > 0 ? (
    @@ -640,15 +657,17 @@ function TrainGrid({ trainImages .map((raw) => { const parts = raw.replaceAll(".webp", "").split("-"); + const rawScore = Number.parseFloat(parts[2]); return { raw, timestamp: parts[0], label: parts[1], - score: Number.parseFloat(parts[2]) * 100, + score: rawScore * 100, + truePositive: rawScore >= model.threshold, }; }) .sort((a, b) => b.timestamp.localeCompare(a.timestamp)), - [trainImages], + [model, trainImages], ); return ( @@ -684,7 +703,14 @@ function TrainGrid({
    {data.label.replaceAll("_", " ")}
    -
    {data.score}%
    +
    + {data.score}% +
    Date: Mon, 7 Jul 2025 07:36:06 -0600 Subject: [PATCH 076/144] Classification improvements (#19020) * Move classification training to full process * Sort class images --- frigate/util/classification.py | 190 +++++++++--------- .../classification/ModelTrainingView.tsx | 7 +- 2 files changed, 104 insertions(+), 93 deletions(-) diff --git a/frigate/util/classification.py b/frigate/util/classification.py index 6eab829f2..533c1345a 100644 --- a/frigate/util/classification.py +++ b/frigate/util/classification.py @@ -20,106 +20,117 @@ LEARNING_RATE = 0.001 logger = logging.getLogger(__name__) -def __generate_representative_dataset_factory(dataset_dir: str): - def generate_representative_dataset(): - image_paths = [] - for root, dirs, files in os.walk(dataset_dir): - for file in files: - if file.lower().endswith((".jpg", ".jpeg", ".png")): - image_paths.append(os.path.join(root, file)) +class ClassificationTrainingProcess(FrigateProcess): + def __init__(self, model_name: str) -> None: + super().__init__( + stop_event=None, + name=f"model_training:{model_name}", + ) + self.model_name = model_name - for path in image_paths[:300]: - img = cv2.imread(path) - img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) - img = cv2.resize(img, (224, 224)) - img_array = np.array(img, dtype=np.float32) / 255.0 - img_array = img_array[None, ...] - yield [img_array] + def run(self) -> None: + self.pre_run_setup() + self.__train_classification_model() - return generate_representative_dataset + def __generate_representative_dataset_factory(self, dataset_dir: str): + def generate_representative_dataset(): + image_paths = [] + for root, dirs, files in os.walk(dataset_dir): + for file in files: + if file.lower().endswith((".jpg", ".jpeg", ".png")): + image_paths.append(os.path.join(root, file)) + for path in image_paths[:300]: + img = cv2.imread(path) + img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) + img = cv2.resize(img, (224, 224)) + img_array = np.array(img, dtype=np.float32) / 255.0 + img_array = img_array[None, ...] + yield [img_array] -@redirect_output_to_logger(logger, logging.DEBUG) -def __train_classification_model(model_name: str) -> bool: - """Train a classification model.""" + return generate_representative_dataset - # import in the function so that tensorflow is not initialized multiple times - import tensorflow as tf - from tensorflow.keras import layers, models, optimizers - from tensorflow.keras.applications import MobileNetV2 - from tensorflow.keras.preprocessing.image import ImageDataGenerator + @redirect_output_to_logger(logger, logging.DEBUG) + def __train_classification_model(self) -> bool: + """Train a classification model.""" - logger.info(f"Kicking off classification training for {model_name}.") - dataset_dir = os.path.join(CLIPS_DIR, model_name, "dataset") - model_dir = os.path.join(MODEL_CACHE_DIR, model_name) - num_classes = len( - [ - d - for d in os.listdir(dataset_dir) - if os.path.isdir(os.path.join(dataset_dir, d)) - ] - ) + # import in the function so that tensorflow is not initialized multiple times + import tensorflow as tf + from tensorflow.keras import layers, models, optimizers + from tensorflow.keras.applications import MobileNetV2 + from tensorflow.keras.preprocessing.image import ImageDataGenerator - # Start with imagenet base model with 35% of channels in each layer - base_model = MobileNetV2( - input_shape=(224, 224, 3), - include_top=False, - weights="imagenet", - alpha=0.35, - ) - base_model.trainable = False # Freeze pre-trained layers + logger.info(f"Kicking off classification training for {self.model_name}.") + dataset_dir = os.path.join(CLIPS_DIR, self.model_name, "dataset") + model_dir = os.path.join(MODEL_CACHE_DIR, self.model_name) + num_classes = len( + [ + d + for d in os.listdir(dataset_dir) + if os.path.isdir(os.path.join(dataset_dir, d)) + ] + ) - model = models.Sequential( - [ - base_model, - layers.GlobalAveragePooling2D(), - layers.Dense(128, activation="relu"), - layers.Dropout(0.3), - layers.Dense(num_classes, activation="softmax"), - ] - ) + # Start with imagenet base model with 35% of channels in each layer + base_model = MobileNetV2( + input_shape=(224, 224, 3), + include_top=False, + weights="imagenet", + alpha=0.35, + ) + base_model.trainable = False # Freeze pre-trained layers - model.compile( - optimizer=optimizers.Adam(learning_rate=LEARNING_RATE), - loss="categorical_crossentropy", - metrics=["accuracy"], - ) + model = models.Sequential( + [ + base_model, + layers.GlobalAveragePooling2D(), + layers.Dense(128, activation="relu"), + layers.Dropout(0.3), + layers.Dense(num_classes, activation="softmax"), + ] + ) - # create training set - datagen = ImageDataGenerator(rescale=1.0 / 255, validation_split=0.2) - train_gen = datagen.flow_from_directory( - dataset_dir, - target_size=(224, 224), - batch_size=BATCH_SIZE, - class_mode="categorical", - subset="training", - ) + model.compile( + optimizer=optimizers.Adam(learning_rate=LEARNING_RATE), + loss="categorical_crossentropy", + metrics=["accuracy"], + ) - # write labelmap - class_indices = train_gen.class_indices - index_to_class = {v: k for k, v in class_indices.items()} - sorted_classes = [index_to_class[i] for i in range(len(index_to_class))] - with open(os.path.join(model_dir, "labelmap.txt"), "w") as f: - for class_name in sorted_classes: - f.write(f"{class_name}\n") + # create training set + datagen = ImageDataGenerator(rescale=1.0 / 255, validation_split=0.2) + train_gen = datagen.flow_from_directory( + dataset_dir, + target_size=(224, 224), + batch_size=BATCH_SIZE, + class_mode="categorical", + subset="training", + ) - # train the model - model.fit(train_gen, epochs=EPOCHS, verbose=0) + # write labelmap + class_indices = train_gen.class_indices + index_to_class = {v: k for k, v in class_indices.items()} + sorted_classes = [index_to_class[i] for i in range(len(index_to_class))] + with open(os.path.join(model_dir, "labelmap.txt"), "w") as f: + for class_name in sorted_classes: + f.write(f"{class_name}\n") - # convert model to tflite - converter = tf.lite.TFLiteConverter.from_keras_model(model) - converter.optimizations = [tf.lite.Optimize.DEFAULT] - converter.representative_dataset = __generate_representative_dataset_factory( - dataset_dir - ) - converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8] - converter.inference_input_type = tf.uint8 - converter.inference_output_type = tf.uint8 - tflite_model = converter.convert() + # train the model + model.fit(train_gen, epochs=EPOCHS, verbose=0) - # write model - with open(os.path.join(model_dir, "model.tflite"), "wb") as f: - f.write(tflite_model) + # convert model to tflite + converter = tf.lite.TFLiteConverter.from_keras_model(model) + converter.optimizations = [tf.lite.Optimize.DEFAULT] + converter.representative_dataset = ( + self.__generate_representative_dataset_factory(dataset_dir) + ) + converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8] + converter.inference_input_type = tf.uint8 + converter.inference_output_type = tf.uint8 + tflite_model = converter.convert() + + # write model + with open(os.path.join(model_dir, "model.tflite"), "wb") as f: + f.write(tflite_model) @staticmethod @@ -138,12 +149,7 @@ def kickoff_model_training( # run training in sub process so that # tensorflow will free CPU / GPU memory # upon training completion - training_process = FrigateProcess( - None, - target=__train_classification_model, - name=f"model_training:{model_name}", - args=(model_name,), - ) + training_process = ClassificationTrainingProcess(model_name) training_process.start() training_process.join() diff --git a/web/src/views/classification/ModelTrainingView.tsx b/web/src/views/classification/ModelTrainingView.tsx index 14de1a118..145004ec3 100644 --- a/web/src/views/classification/ModelTrainingView.tsx +++ b/web/src/views/classification/ModelTrainingView.tsx @@ -577,9 +577,14 @@ function DatasetGrid({ }: DatasetGridProps) { const { t } = useTranslation(["views/classificationModel"]); + const classData = useMemo( + () => images.sort((a, b) => a.localeCompare(b)), + [images], + ); + return (
    - {images.map((image) => ( + {classData.map((image) => (
    Date: Mon, 7 Jul 2025 09:03:57 -0500 Subject: [PATCH 077/144] Semantic Search Triggers (#18969) * semantic trigger test * database and model * config * embeddings maintainer and trigger post-processor * api to create, edit, delete triggers * frontend and i18n keys * use thumbnail and description for trigger types * image picker tweaks * initial sync * thumbnail file management * clean up logs and use saved thumbnail on frontend * publish mqtt messages * webpush changes to enable trigger notifications * add enabled switch * add triggers from explore * renaming and deletion fixes * fix typing * UI updates and add last triggering event time and link * log exception instead of return in endpoint * highlight entry in UI when triggered * save and delete thumbnails directly * remove alert action for now and add descriptions * tweaks * clean up * fix types * docs * docs tweaks * docs * reuse enum --- docs/docs/configuration/reference.md | 17 + docs/docs/configuration/semantic_search.md | 38 ++ docs/docs/integrations/mqtt.md | 14 + frigate/api/defs/request/events_body.py | 8 + frigate/api/event.py | 434 ++++++++++++- frigate/app.py | 6 + frigate/comms/webpush.py | 100 ++- frigate/config/camera/camera.py | 5 + frigate/config/camera/updater.py | 3 + frigate/config/classification.py | 36 ++ frigate/const.py | 1 + .../data_processing/post/semantic_trigger.py | 233 +++++++ frigate/embeddings/__init__.py | 12 + frigate/embeddings/embeddings.py | 238 ++++++- frigate/embeddings/maintainer.py | 102 ++- frigate/models.py | 17 + frigate/util/builtin.py | 16 + migrations/031_create_trigger_table.py | 50 ++ web/public/locales/en/components/dialog.json | 7 + web/public/locales/en/views/explore.json | 4 + web/public/locales/en/views/settings.json | 95 +++ web/src/api/ws.tsx | 11 + .../components/card/SearchThumbnailFooter.tsx | 3 + .../components/menu/SearchResultActions.tsx | 13 + .../overlay/CreateTriggerDialog.tsx | 416 ++++++++++++ .../overlay/DeleteTriggerDialog.tsx | 80 +++ .../components/overlay/DeleteUserDialog.tsx | 2 +- web/src/components/overlay/ImagePicker.tsx | 172 +++++ web/src/pages/FaceLibrary.tsx | 1 + web/src/pages/Settings.tsx | 17 +- web/src/types/frigateConfig.ts | 12 + web/src/types/trigger.ts | 11 + web/src/types/ws.ts | 8 + .../classification/ModelTrainingView.tsx | 1 + web/src/views/explore/ExploreView.tsx | 8 + web/src/views/search/SearchView.tsx | 12 + web/src/views/settings/TriggerView.tsx | 595 ++++++++++++++++++ 37 files changed, 2736 insertions(+), 62 deletions(-) create mode 100644 frigate/data_processing/post/semantic_trigger.py create mode 100644 migrations/031_create_trigger_table.py create mode 100644 web/src/components/overlay/CreateTriggerDialog.tsx create mode 100644 web/src/components/overlay/DeleteTriggerDialog.tsx create mode 100644 web/src/components/overlay/ImagePicker.tsx create mode 100644 web/src/types/trigger.ts create mode 100644 web/src/views/settings/TriggerView.tsx diff --git a/docs/docs/configuration/reference.md b/docs/docs/configuration/reference.md index 4be10000d..43084db4a 100644 --- a/docs/docs/configuration/reference.md +++ b/docs/docs/configuration/reference.md @@ -840,6 +840,23 @@ cameras: # By default the cameras are sorted alphabetically. order: 0 + # Optional: Configuration for triggers to automate actions based on semantic search results. + triggers: + # Required: Unique identifier for the trigger (generated automatically from nickname if not specified). + trigger_name: + # Required: Enable or disable the trigger. (default: shown below) + enabled: true + # Type of trigger, either `thumbnail` for image-based matching or `description` for text-based matching. (default: none) + type: thumbnail + # Reference data for matching, either an event ID for `thumbnail` or a text string for `description`. (default: none) + data: 1751565549.853251-b69j73 + # Similarity threshold for triggering. (default: none) + threshold: 0.7 + # List of actions to perform when the trigger fires. (default: none) + # Available options: `notification` (send a webpush notification) + actions: + - notification + # Optional: Configuration for AI generated tracked object descriptions genai: # Optional: Enable AI description generation (default: shown below) diff --git a/docs/docs/configuration/semantic_search.md b/docs/docs/configuration/semantic_search.md index d9fcb5006..4d2a849c4 100644 --- a/docs/docs/configuration/semantic_search.md +++ b/docs/docs/configuration/semantic_search.md @@ -102,3 +102,41 @@ See the [Hardware Accelerated Enrichments](/configuration/hardware_acceleration_ 4. Make your search language and tone closely match exactly what you're looking for. If you are using thumbnail search, **phrase your query as an image caption**. Searching for "red car" may not work as well as "red sedan driving down a residential street on a sunny day". 5. Semantic search on thumbnails tends to return better results when matching large subjects that take up most of the frame. Small things like "cat" tend to not work well. 6. Experiment! Find a tracked object you want to test and start typing keywords and phrases to see what works for you. + +## Triggers + +Triggers utilize semantic search to automate actions when a tracked object matches a specified image or description. Triggers can be configured so that Frigate executes a specific actions when a tracked object's image or description matches a predefined image or text, based on a similarity threshold. Triggers are managed per camera and can be configured via the Frigate UI in the Settings page under the Triggers tab. + +### Configuration + +Triggers are defined within the `semantic_search` configuration for each camera in your Frigate configuration file or through the UI. Each trigger consists of a `type` (either `thumbnail` or `description`), a `data` field (the reference image event ID or text), a `threshold` for similarity matching, and a list of `actions` to perform when the trigger fires. + +#### Managing Triggers in the UI + +1. Navigate to the **Settings** page and select the **Triggers** tab. +2. Choose a camera from the dropdown menu to view or manage its triggers. +3. Click **Add Trigger** to create a new trigger or use the pencil icon to edit an existing one. +4. In the **Create Trigger** dialog: + - Enter a **Name** for the trigger (e.g., "red_car_alert"). + - Select the **Type** (`Thumbnail` or `Description`). + - For `Thumbnail`, select an image to trigger this action when a similar thumbnail image is detected, based on the threshold. + - For `Description`, enter text to trigger this action when a similar tracked object description is detected. + - Set the **Threshold** for similarity matching. + - Select **Actions** to perform when the trigger fires. +5. Save the trigger to update the configuration and store the embedding in the database. + +When a trigger fires, the UI highlights the trigger with a blue outline for 3 seconds for easy identification. + +### Usage and Best Practices + +1. **Thumbnail Triggers**: Select a representative image (event ID) from the Explore page that closely matches the object you want to detect. For best results, choose images where the object is prominent and fills most of the frame. +2. **Description Triggers**: Write concise, specific text descriptions (e.g., "Person in a red jacket") that align with the tracked object’s description. Avoid vague terms to improve matching accuracy. +3. **Threshold Tuning**: Adjust the threshold to balance sensitivity and specificity. A higher threshold (e.g., 0.8) requires closer matches, reducing false positives but potentially missing similar objects. A lower threshold (e.g., 0.6) is more inclusive but may trigger more often. +4. **Using Explore**: Use the context menu or right-click / long-press on a tracked object in the Grid View in Explore to quickly add a trigger based on the tracked object's thumbnail. +5. **Editing triggers**: For the best experience, triggers should be edited via the UI. However, Frigate will ensure triggers edited in the config will be synced with triggers created and edited in the UI. + +### Notes + +- Triggers rely on the same Jina AI CLIP models (V1 or V2) used for semantic search. Ensure `semantic_search` is enabled and properly configured. +- Reindexing embeddings (via the UI or `reindex: True`) does not affect trigger configurations but may update the embeddings used for matching. +- For optimal performance, use a system with sufficient RAM (8GB minimum, 16GB recommended) and a GPU for `large` model configurations, as described in the Semantic Search requirements. diff --git a/docs/docs/integrations/mqtt.md b/docs/docs/integrations/mqtt.md index 2710e433d..8fa5b57f6 100644 --- a/docs/docs/integrations/mqtt.md +++ b/docs/docs/integrations/mqtt.md @@ -206,6 +206,20 @@ Message published for each changed review item. The first message is published w } ``` +### `frigate/triggers` + +Message published when a trigger defined in a camera's `semantic_search` configuration fires. + +```json +{ + "name": "car_trigger", + "camera": "driveway", + "event_id": "1751565549.853251-b69j73", + "type": "thumbnail", + "score": 0.85 +} +``` + ### `frigate/stats` Same data available at `/api/stats` published at a configurable interval. diff --git a/frigate/api/defs/request/events_body.py b/frigate/api/defs/request/events_body.py index 0883d066f..dd18ff8f7 100644 --- a/frigate/api/defs/request/events_body.py +++ b/frigate/api/defs/request/events_body.py @@ -2,6 +2,8 @@ from typing import List, Optional, Union from pydantic import BaseModel, Field +from frigate.config.classification import TriggerType + class EventsSubLabelBody(BaseModel): subLabel: str = Field(title="Sub label", max_length=100) @@ -45,3 +47,9 @@ class EventsDeleteBody(BaseModel): class SubmitPlusBody(BaseModel): include_annotation: int = Field(default=1) + + +class TriggerEmbeddingBody(BaseModel): + type: TriggerType + data: str + threshold: float = Field(default=0.5, ge=0.0, le=1.0) diff --git a/frigate/api/event.py b/frigate/api/event.py index 24a6c6f4a..1fe34caec 100644 --- a/frigate/api/event.py +++ b/frigate/api/event.py @@ -1,5 +1,6 @@ """Event apis.""" +import base64 import datetime import logging import os @@ -10,6 +11,7 @@ from pathlib import Path from urllib.parse import unquote import cv2 +import numpy as np from fastapi import APIRouter, Request from fastapi.params import Depends from fastapi.responses import JSONResponse @@ -34,6 +36,7 @@ from frigate.api.defs.request.events_body import ( EventsLPRBody, EventsSubLabelBody, SubmitPlusBody, + TriggerEmbeddingBody, ) from frigate.api.defs.response.event_response import ( EventCreateResponse, @@ -44,11 +47,12 @@ from frigate.api.defs.response.event_response import ( from frigate.api.defs.response.generic_response import GenericResponse from frigate.api.defs.tags import Tags from frigate.comms.event_metadata_updater import EventMetadataTypeEnum -from frigate.const import CLIPS_DIR +from frigate.const import CLIPS_DIR, TRIGGER_DIR from frigate.embeddings import EmbeddingsContext -from frigate.models import Event, ReviewSegment, Timeline +from frigate.models import Event, ReviewSegment, Timeline, Trigger from frigate.track.object_processing import TrackedObject from frigate.util.builtin import get_tz_modifiers +from frigate.util.path import get_event_thumbnail_bytes logger = logging.getLogger(__name__) @@ -1264,6 +1268,38 @@ def regenerate_description( ) +@router.post( + "/description/generate", + response_model=GenericResponse, + # dependencies=[Depends(require_role(["admin"]))], +) +def generate_description_embedding( + request: Request, + body: EventsDescriptionBody, +): + new_description = body.description + + # If semantic search is enabled, update the index + if request.app.frigate_config.semantic_search.enabled: + context: EmbeddingsContext = request.app.embeddings + if len(new_description) > 0: + result = context.generate_description_embedding( + new_description, + ) + + return JSONResponse( + content=( + { + "success": True, + "message": f"Embedding for description is {result}" + if result + else "Failed to generate embedding", + } + ), + status_code=200, + ) + + def delete_single_event(event_id: str, request: Request) -> dict: try: event = Event.get(Event.id == event_id) @@ -1412,3 +1448,397 @@ def end_event(request: Request, event_id: str, body: EventsEndBody): content=({"success": True, "message": "Event successfully ended."}), status_code=200, ) + + +@router.post( + "/trigger/embedding", + response_model=dict, + dependencies=[Depends(require_role(["admin"]))], +) +def create_trigger_embedding( + request: Request, + body: TriggerEmbeddingBody, + camera: str, + name: str, +): + try: + if not request.app.frigate_config.semantic_search.enabled: + return JSONResponse( + content={ + "success": False, + "message": "Semantic search is not enabled", + }, + status_code=400, + ) + + # Check if trigger already exists + if ( + Trigger.select() + .where(Trigger.camera == camera, Trigger.name == name) + .exists() + ): + return JSONResponse( + content={ + "success": False, + "message": f"Trigger {camera}:{name} already exists", + }, + status_code=400, + ) + + context: EmbeddingsContext = request.app.embeddings + # Generate embedding based on type + embedding = None + if body.type == "description": + embedding = context.generate_description_embedding(body.data) + elif body.type == "thumbnail": + try: + event: Event = Event.get(Event.id == body.data) + except DoesNotExist: + # TODO: check triggers directory for image + return JSONResponse( + content={ + "success": False, + "message": f"Failed to fetch event for {body.type} trigger", + }, + status_code=400, + ) + + # Skip the event if not an object + if event.data.get("type") != "object": + return + + if thumbnail := get_event_thumbnail_bytes(event): + cursor = context.db.execute_sql( + """ + SELECT thumbnail_embedding FROM vec_thumbnails WHERE id = ? + """, + [body.data], + ) + + row = cursor.fetchone() if cursor else None + + if row: + query_embedding = row[0] + embedding = np.frombuffer(query_embedding, dtype=np.float32) + else: + # Extract valid thumbnail + thumbnail = get_event_thumbnail_bytes(event) + + if thumbnail is None: + return JSONResponse( + content={ + "success": False, + "message": f"Failed to get thumbnail for {body.data} for {body.type} trigger", + }, + status_code=400, + ) + + embedding = context.generate_image_embedding( + body.data, (base64.b64encode(thumbnail).decode("ASCII")) + ) + + if embedding is None: + return JSONResponse( + content={ + "success": False, + "message": f"Failed to generate embedding for {body.type} trigger", + }, + status_code=400, + ) + + if body.type == "thumbnail": + # Save image to the triggers directory + try: + os.makedirs(os.path.join(TRIGGER_DIR, camera), exist_ok=True) + with open( + os.path.join(TRIGGER_DIR, camera, f"{body.data}.webp"), "wb" + ) as f: + f.write(thumbnail) + logger.debug( + f"Writing thumbnail for trigger with data {body.data} in {camera}." + ) + except Exception as e: + logger.error( + f"Failed to write thumbnail for trigger with data {body.data} in {camera}: {e}" + ) + + Trigger.create( + camera=camera, + name=name, + type=body.type, + data=body.data, + threshold=body.threshold, + model=request.app.frigate_config.semantic_search.model, + embedding=np.array(embedding, dtype=np.float32).tobytes(), + triggering_event_id="", + last_triggered=None, + ) + + return JSONResponse( + content={ + "success": True, + "message": f"Trigger created successfully for {camera}:{name}", + }, + status_code=200, + ) + + except Exception as e: + return JSONResponse( + content={ + "success": False, + "message": f"Error creating trigger embedding: {str(e)}", + }, + status_code=500, + ) + + +@router.put( + "/trigger/embedding/{camera}/{name}", + response_model=dict, + dependencies=[Depends(require_role(["admin"]))], +) +def update_trigger_embedding( + request: Request, + camera: str, + name: str, + body: TriggerEmbeddingBody, +): + try: + if not request.app.frigate_config.semantic_search.enabled: + return JSONResponse( + content={ + "success": False, + "message": "Semantic search is not enabled", + }, + status_code=400, + ) + + context: EmbeddingsContext = request.app.embeddings + # Generate embedding based on type + embedding = None + if body.type == "description": + embedding = context.generate_description_embedding(body.data) + elif body.type == "thumbnail": + webp_file = body.data + ".webp" + webp_path = os.path.join(TRIGGER_DIR, camera, webp_file) + + try: + event: Event = Event.get(Event.id == body.data) + # Skip the event if not an object + if event.data.get("type") != "object": + return JSONResponse( + content={ + "success": False, + "message": f"Event {body.data} is not a tracked object for {body.type} trigger", + }, + status_code=400, + ) + # Extract valid thumbnail + thumbnail = get_event_thumbnail_bytes(event) + + with open(webp_path, "wb") as f: + f.write(thumbnail) + except DoesNotExist: + # check triggers directory for image + if not os.path.exists(webp_path): + return JSONResponse( + content={ + "success": False, + "message": f"Failed to fetch event for {body.type} trigger", + }, + status_code=400, + ) + else: + # Load the image from the triggers directory + with open(webp_path, "rb") as f: + thumbnail = f.read() + + embedding = context.generate_image_embedding( + body.data, (base64.b64encode(thumbnail).decode("ASCII")) + ) + + if embedding is None: + return JSONResponse( + content={ + "success": False, + "message": f"Failed to generate embedding for {body.type} trigger", + }, + status_code=400, + ) + + # Check if trigger exists for upsert + trigger = Trigger.get_or_none(Trigger.camera == camera, Trigger.name == name) + + if trigger: + # Update existing trigger + if trigger.data != body.data: # Delete old thumbnail only if data changes + try: + os.remove(os.path.join(TRIGGER_DIR, camera, f"{trigger.data}.webp")) + logger.debug( + f"Deleted thumbnail for trigger with data {trigger.data} in {camera}." + ) + except Exception as e: + logger.error( + f"Failed to delete thumbnail for trigger with data {trigger.data} in {camera}: {e}" + ) + + Trigger.update( + data=body.data, + model=request.app.frigate_config.semantic_search.model, + embedding=np.array(embedding, dtype=np.float32).tobytes(), + threshold=body.threshold, + triggering_event_id="", + last_triggered=None, + ).where(Trigger.camera == camera, Trigger.name == name).execute() + else: + # Create new trigger (for rename case) + Trigger.create( + camera=camera, + name=name, + type=body.type, + data=body.data, + threshold=body.threshold, + model=request.app.frigate_config.semantic_search.model, + embedding=np.array(embedding, dtype=np.float32).tobytes(), + triggering_event_id="", + last_triggered=None, + ) + + if body.type == "thumbnail": + # Save image to the triggers directory + try: + os.makedirs(os.path.join(TRIGGER_DIR, camera), exist_ok=True) + with open( + os.path.join(TRIGGER_DIR, camera, f"{body.data}.webp"), "wb" + ) as f: + f.write(thumbnail) + logger.debug( + f"Writing thumbnail for trigger with data {body.data} in {camera}." + ) + except Exception as e: + logger.error( + f"Failed to write thumbnail for trigger with data {body.data} in {camera}: {e}" + ) + + return JSONResponse( + content={ + "success": True, + "message": f"Trigger updated successfully for {camera}:{name}", + }, + status_code=200, + ) + + except Exception as e: + return JSONResponse( + content={ + "success": False, + "message": f"Error updating trigger embedding: {str(e)}", + }, + status_code=500, + ) + + +@router.delete( + "/trigger/embedding/{camera}/{name}", + response_model=dict, + dependencies=[Depends(require_role(["admin"]))], +) +def delete_trigger_embedding( + request: Request, + camera: str, + name: str, +): + try: + trigger = Trigger.get_or_none(Trigger.camera == camera, Trigger.name == name) + if trigger is None: + return JSONResponse( + content={ + "success": False, + "message": f"Trigger {camera}:{name} not found", + }, + status_code=500, + ) + + deleted = ( + Trigger.delete() + .where(Trigger.camera == camera, Trigger.name == name) + .execute() + ) + if deleted == 0: + return JSONResponse( + content={ + "success": False, + "message": f"Error deleting trigger {camera}:{name}", + }, + status_code=401, + ) + + try: + os.remove(os.path.join(TRIGGER_DIR, camera, f"{trigger.data}.webp")) + logger.debug( + f"Deleted thumbnail for trigger with data {trigger.data} in {camera}." + ) + except Exception as e: + logger.error( + f"Failed to delete thumbnail for trigger with data {trigger.data} in {camera}: {e}" + ) + + return JSONResponse( + content={ + "success": True, + "message": f"Trigger deleted successfully for {camera}:{name}", + }, + status_code=200, + ) + + except Exception as e: + return JSONResponse( + content={ + "success": False, + "message": f"Error deleting trigger embedding: {str(e)}", + }, + status_code=500, + ) + + +@router.get( + "/triggers/status/{camera_name}", + response_model=dict, + dependencies=[Depends(require_role(["admin"]))], +) +def get_triggers_status( + camera_name: str, +): + try: + # Fetch all triggers for the specified camera + triggers = Trigger.select().where(Trigger.camera == camera_name) + + # Prepare the response with trigger status + status = { + trigger.name: { + "last_triggered": trigger.last_triggered.timestamp() + if trigger.last_triggered + else None, + "triggering_event_id": trigger.triggering_event_id + if trigger.triggering_event_id + else None, + } + for trigger in triggers + } + + if not status: + return JSONResponse( + content={ + "success": False, + "message": f"No triggers found for camera {camera_name}", + }, + status_code=404, + ) + + return {"success": True, "triggers": status} + except Exception as ex: + logger.exception(ex) + return JSONResponse( + content=({"success": False, "message": "Error fetching trigger status"}), + status_code=400, + ) diff --git a/frigate/app.py b/frigate/app.py index 9a662dd18..00d620666 100644 --- a/frigate/app.py +++ b/frigate/app.py @@ -39,6 +39,7 @@ from frigate.const import ( MODEL_CACHE_DIR, RECORD_DIR, THUMB_DIR, + TRIGGER_DIR, ) from frigate.data_processing.types import DataProcessorMetrics from frigate.db.sqlitevecq import SqliteVecQueueDatabase @@ -56,6 +57,7 @@ from frigate.models import ( Regions, ReviewSegment, Timeline, + Trigger, User, ) from frigate.object_detection.base import ObjectDetectProcess @@ -123,6 +125,9 @@ class FrigateApp: if self.config.face_recognition.enabled: dirs.append(FACE_DIR) + if self.config.semantic_search.enabled: + dirs.append(TRIGGER_DIR) + for d in dirs: if not os.path.exists(d) and not os.path.islink(d): logger.info(f"Creating directory: {d}") @@ -288,6 +293,7 @@ class FrigateApp: ReviewSegment, Timeline, User, + Trigger, ] self.db.bind(models) diff --git a/frigate/comms/webpush.py b/frigate/comms/webpush.py index 7bc66f3b7..0bc2c1457 100644 --- a/frigate/comms/webpush.py +++ b/frigate/comms/webpush.py @@ -186,6 +186,28 @@ class WebPushClient(Communicator): # type: ignore[misc] logger.debug(f"Notifications for {camera} are currently suspended.") return self.send_alert(decoded) + if topic == "triggers": + decoded = json.loads(payload) + + camera = decoded["camera"] + name = decoded["name"] + + # ensure notifications are enabled and the specific trigger has + # notification action enabled + if ( + not self.config.cameras[camera].notifications.enabled + or name not in self.config.cameras[camera].semantic_search.triggers + or "notification" + not in self.config.cameras[camera] + .semantic_search.triggers[name] + .actions + ): + return + + if self.is_camera_suspended(camera): + logger.debug(f"Notifications for {camera} are currently suspended.") + return + self.send_trigger(decoded) elif topic == "notification_test": if not self.config.notifications.enabled and not any( cam.notifications.enabled for cam in self.config.cameras.values() @@ -267,6 +289,23 @@ class WebPushClient(Communicator): # type: ignore[misc] except Exception as e: logger.error(f"Error processing notification: {str(e)}") + def _within_cooldown(self, camera: str) -> bool: + now = datetime.datetime.now().timestamp() + if now - self.last_notification_time < self.config.notifications.cooldown: + logger.debug( + f"Skipping notification for {camera} - in global cooldown period" + ) + return True + if ( + now - self.last_camera_notification_time[camera] + < self.config.cameras[camera].notifications.cooldown + ): + logger.debug( + f"Skipping notification for {camera} - in camera-specific cooldown period" + ) + return True + return False + def send_notification_test(self) -> None: if not self.config.notifications.email: return @@ -295,24 +334,7 @@ class WebPushClient(Communicator): # type: ignore[misc] camera: str = payload["after"]["camera"] current_time = datetime.datetime.now().timestamp() - # Check global cooldown period - if ( - current_time - self.last_notification_time - < self.config.notifications.cooldown - ): - logger.debug( - f"Skipping notification for {camera} - in global cooldown period" - ) - return - - # Check camera-specific cooldown period - if ( - current_time - self.last_camera_notification_time[camera] - < self.config.cameras[camera].notifications.cooldown - ): - logger.debug( - f"Skipping notification for {camera} - in camera-specific cooldown period" - ) + if self._within_cooldown(camera): return self.check_registrations() @@ -367,6 +389,48 @@ class WebPushClient(Communicator): # type: ignore[misc] self.cleanup_registrations() + def send_trigger(self, payload: dict[str, Any]) -> None: + if not self.config.notifications.email: + return + + camera: str = payload["camera"] + current_time = datetime.datetime.now().timestamp() + + if self._within_cooldown(camera): + return + + self.check_registrations() + + self.last_camera_notification_time[camera] = current_time + self.last_notification_time = current_time + + trigger_type = payload["type"] + event_id = payload["event_id"] + name = payload["name"] + score = payload["score"] + + title = f"{name.replace('_', ' ')} triggered on {titlecase(camera.replace('_', ' '))}" + message = f"{titlecase(trigger_type)} trigger fired for {titlecase(camera.replace('_', ' '))} with score {score:.2f}" + image = f"clips/triggers/{camera}/{event_id}.webp" + + direct_url = f"/explore?event_id={event_id}" + ttl = 0 + + logger.debug(f"Sending push notification for {camera}, trigger name {name}") + + for user in self.web_pushers: + self.send_push_notification( + user=user, + payload=payload, + title=title, + message=message, + direct_url=direct_url, + image=image, + ttl=ttl, + ) + + self.cleanup_registrations() + def stop(self) -> None: logger.info("Closing notification queue") self.notification_thread.join() diff --git a/frigate/config/camera/camera.py b/frigate/config/camera/camera.py index 33ad312a2..c356984f3 100644 --- a/frigate/config/camera/camera.py +++ b/frigate/config/camera/camera.py @@ -22,6 +22,7 @@ from ..classification import ( AudioTranscriptionConfig, CameraFaceRecognitionConfig, CameraLicensePlateRecognitionConfig, + CameraSemanticSearchConfig, ) from .audio import AudioConfig from .birdseye import BirdseyeCameraConfig @@ -91,6 +92,10 @@ class CameraConfig(FrigateBaseModel): review: ReviewConfig = Field( default_factory=ReviewConfig, title="Review configuration." ) + semantic_search: CameraSemanticSearchConfig = Field( + default_factory=CameraSemanticSearchConfig, + title="Semantic search configuration.", + ) snapshots: SnapshotsConfig = Field( default_factory=SnapshotsConfig, title="Snapshot configuration." ) diff --git a/frigate/config/camera/updater.py b/frigate/config/camera/updater.py index 83536fc46..756e370db 100644 --- a/frigate/config/camera/updater.py +++ b/frigate/config/camera/updater.py @@ -23,6 +23,7 @@ class CameraConfigUpdateEnum(str, Enum): record = "record" remove = "remove" # for removing a camera review = "review" + semantic_search = "semantic_search" # for semantic search triggers snapshots = "snapshots" zones = "zones" @@ -106,6 +107,8 @@ class CameraConfigUpdateSubscriber: config.record = updated_config elif update_type == CameraConfigUpdateEnum.review: config.review = updated_config + elif update_type == CameraConfigUpdateEnum.semantic_search: + config.semantic_search = updated_config elif update_type == CameraConfigUpdateEnum.snapshots: config.snapshots = updated_config elif update_type == CameraConfigUpdateEnum.zones: diff --git a/frigate/config/classification.py b/frigate/config/classification.py index c48ca489c..e92f1da78 100644 --- a/frigate/config/classification.py +++ b/frigate/config/classification.py @@ -10,6 +10,7 @@ __all__ = [ "CameraLicensePlateRecognitionConfig", "FaceRecognitionConfig", "SemanticSearchConfig", + "CameraSemanticSearchConfig", "LicensePlateRecognitionConfig", ] @@ -24,6 +25,15 @@ class EnrichmentsDeviceEnum(str, Enum): CPU = "CPU" +class TriggerType(str, Enum): + THUMBNAIL = "thumbnail" + DESCRIPTION = "description" + + +class TriggerAction(str, Enum): + NOTIFICATION = "notification" + + class AudioTranscriptionConfig(FrigateBaseModel): enabled: bool = Field(default=False, title="Enable audio transcription.") language: str = Field( @@ -113,6 +123,32 @@ class SemanticSearchConfig(FrigateBaseModel): ) +class TriggerConfig(FrigateBaseModel): + enabled: bool = Field(default=True, title="Enable this trigger") + type: TriggerType = Field(default=TriggerType.DESCRIPTION, title="Type of trigger") + data: str = Field(title="Trigger content (text phrase or image ID)") + threshold: float = Field( + title="Confidence score required to run the trigger", + default=0.8, + gt=0.0, + le=1.0, + ) + actions: Optional[List[TriggerAction]] = Field( + default=[], title="Actions to perform when trigger is matched" + ) + + model_config = ConfigDict(extra="forbid", protected_namespaces=()) + + +class CameraSemanticSearchConfig(FrigateBaseModel): + triggers: Optional[Dict[str, TriggerConfig]] = Field( + default=None, + title="Trigger actions on tracked objects that match existing thumbnails or descriptions", + ) + + model_config = ConfigDict(extra="forbid", protected_namespaces=()) + + class FaceRecognitionConfig(FrigateBaseModel): enabled: bool = Field(default=False, title="Enable face recognition.") model_size: str = Field( diff --git a/frigate/const.py b/frigate/const.py index 893e6eb52..5a5ee3f24 100644 --- a/frigate/const.py +++ b/frigate/const.py @@ -11,6 +11,7 @@ EXPORT_DIR = f"{BASE_DIR}/exports" FACE_DIR = f"{CLIPS_DIR}/faces" THUMB_DIR = f"{CLIPS_DIR}/thumbs" RECORD_DIR = f"{BASE_DIR}/recordings" +TRIGGER_DIR = f"{CLIPS_DIR}/triggers" BIRDSEYE_PIPE = "/tmp/cache/birdseye" CACHE_DIR = "/tmp/cache" FRIGATE_LOCALHOST = "http://127.0.0.1:5000" diff --git a/frigate/data_processing/post/semantic_trigger.py b/frigate/data_processing/post/semantic_trigger.py new file mode 100644 index 000000000..baa47ba1c --- /dev/null +++ b/frigate/data_processing/post/semantic_trigger.py @@ -0,0 +1,233 @@ +"""Post time processor to trigger actions based on similar embeddings.""" + +import datetime +import json +import logging +import os +from typing import Any + +import cv2 +import numpy as np +from peewee import DoesNotExist + +from frigate.comms.inter_process import InterProcessRequestor +from frigate.config import FrigateConfig +from frigate.const import CONFIG_DIR +from frigate.data_processing.types import PostProcessDataEnum +from frigate.db.sqlitevecq import SqliteVecQueueDatabase +from frigate.embeddings.util import ZScoreNormalization +from frigate.models import Event, Trigger +from frigate.util.builtin import cosine_distance +from frigate.util.path import get_event_thumbnail_bytes + +from ..post.api import PostProcessorApi +from ..types import DataProcessorMetrics + +logger = logging.getLogger(__name__) + +WRITE_DEBUG_IMAGES = False + + +class SemanticTriggerProcessor(PostProcessorApi): + def __init__( + self, + db: SqliteVecQueueDatabase, + config: FrigateConfig, + requestor: InterProcessRequestor, + metrics: DataProcessorMetrics, + embeddings, + ): + super().__init__(config, metrics, None) + self.db = db + self.embeddings = embeddings + self.requestor = requestor + self.trigger_embeddings: list[np.ndarray] = [] + + self.thumb_stats = ZScoreNormalization() + self.desc_stats = ZScoreNormalization() + + # load stats from disk + try: + with open(os.path.join(CONFIG_DIR, ".search_stats.json"), "r") as f: + data = json.loads(f.read()) + self.thumb_stats.from_dict(data["thumb_stats"]) + self.desc_stats.from_dict(data["desc_stats"]) + except FileNotFoundError: + pass + + def process_data( + self, data: dict[str, Any], data_type: PostProcessDataEnum + ) -> None: + event_id = data["event_id"] + camera = data["camera"] + process_type = data["type"] + + if self.config.cameras[camera].semantic_search.triggers is None: + return + + triggers = ( + Trigger.select( + Trigger.camera, + Trigger.name, + Trigger.data, + Trigger.type, + Trigger.embedding, + Trigger.threshold, + ) + .where(Trigger.camera == camera) + .dicts() + .iterator() + ) + + for trigger in triggers: + if ( + trigger["name"] + not in self.config.cameras[camera].semantic_search.triggers + or not self.config.cameras[camera] + .semantic_search.triggers[trigger["name"]] + .enabled + ): + logger.debug( + f"Trigger {trigger['name']} is disabled for camera {camera}" + ) + continue + + logger.debug( + f"Processing {trigger['type']} trigger for {event_id} on {trigger['camera']}: {trigger['name']}" + ) + + trigger_embedding = np.frombuffer(trigger["embedding"], dtype=np.float32) + + # Get embeddings based on type + thumbnail_embedding = None + description_embedding = None + + if process_type == "image": + cursor = self.db.execute_sql( + """ + SELECT thumbnail_embedding FROM vec_thumbnails WHERE id = ? + """, + [event_id], + ) + row = cursor.fetchone() if cursor else None + if row: + thumbnail_embedding = np.frombuffer(row[0], dtype=np.float32) + + if process_type == "text": + cursor = self.db.execute_sql( + """ + SELECT description_embedding FROM vec_descriptions WHERE id = ? + """, + [event_id], + ) + row = cursor.fetchone() if cursor else None + if row: + description_embedding = np.frombuffer(row[0], dtype=np.float32) + + # Skip processing if we don't have any embeddings + if thumbnail_embedding is None and description_embedding is None: + logger.debug(f"No embeddings found for {event_id}") + return + + # Determine which embedding to compare based on trigger type + if ( + trigger["type"] in ["text", "thumbnail"] + and thumbnail_embedding is not None + ): + data_embedding = thumbnail_embedding + normalized_distance = self.thumb_stats.normalize( + [cosine_distance(data_embedding, trigger_embedding)], + save_stats=False, + )[0] + elif trigger["type"] == "description" and description_embedding is not None: + data_embedding = description_embedding + normalized_distance = self.desc_stats.normalize( + [cosine_distance(data_embedding, trigger_embedding)], + save_stats=False, + )[0] + + else: + continue + + similarity = 1 - normalized_distance + + logger.debug( + f"Trigger {trigger['name']} ({trigger['data'] if trigger['type'] == 'text' or trigger['type'] == 'description' else 'image'}): " + f"normalized distance: {normalized_distance:.4f}, " + f"similarity: {similarity:.4f}, threshold: {trigger['threshold']}" + ) + + # Check if similarity meets threshold + if similarity >= trigger["threshold"]: + logger.info( + f"Trigger {trigger['name']} activated with similarity {similarity:.4f}" + ) + + # Update the trigger's last_triggered and triggering_event_id + Trigger.update( + last_triggered=datetime.datetime.now(), triggering_event_id=event_id + ).where( + Trigger.camera == camera, Trigger.name == trigger["name"] + ).execute() + + # Always publish MQTT message + self.requestor.send_data( + "triggers", + json.dumps( + { + "name": trigger["name"], + "camera": camera, + "event_id": event_id, + "type": trigger["type"], + "score": similarity, + } + ), + ) + + if ( + self.config.cameras[camera] + .semantic_search.triggers[trigger["name"]] + .actions + ): + # TODO: handle actions for the trigger + # notifications already handled by webpush + pass + + if WRITE_DEBUG_IMAGES: + try: + event: Event = Event.get(Event.id == event_id) + except DoesNotExist: + return + + # Skip the event if not an object + if event.data.get("type") != "object": + return + + thumbnail_bytes = get_event_thumbnail_bytes(event) + + nparr = np.frombuffer(thumbnail_bytes, np.uint8) + thumbnail = cv2.imdecode(nparr, cv2.IMREAD_COLOR) + + font_scale = 0.5 + font = cv2.FONT_HERSHEY_SIMPLEX + cv2.putText( + thumbnail, + f"{similarity:.4f}", + (10, 30), + font, + fontScale=font_scale, + color=(0, 255, 0), + thickness=2, + ) + + current_time = int(datetime.datetime.now().timestamp()) + cv2.imwrite( + f"debug/frames/trigger-{event_id}_{current_time}.jpg", + thumbnail, + ) + + def handle_request(self, topic, request_data): + return None + + def expire_object(self, object_id, camera): + pass diff --git a/frigate/embeddings/__init__.py b/frigate/embeddings/__init__.py index d4887e0d2..ab69adb68 100644 --- a/frigate/embeddings/__init__.py +++ b/frigate/embeddings/__init__.py @@ -296,3 +296,15 @@ class EmbeddingsContext: return self.requestor.send_data( EmbeddingsRequestEnum.transcribe_audio.value, {"event": event} ) + + def generate_description_embedding(self, text: str) -> None: + return self.requestor.send_data( + EmbeddingsRequestEnum.embed_description.value, + {"id": None, "description": text, "upsert": False}, + ) + + def generate_image_embedding(self, event_id: str, thumbnail: bytes) -> None: + return self.requestor.send_data( + EmbeddingsRequestEnum.embed_thumbnail.value, + {"id": str(event_id), "thumbnail": str(thumbnail), "upsert": False}, + ) diff --git a/frigate/embeddings/embeddings.py b/frigate/embeddings/embeddings.py index 833ab9ab2..a0981f669 100644 --- a/frigate/embeddings/embeddings.py +++ b/frigate/embeddings/embeddings.py @@ -7,21 +7,26 @@ import os import threading import time -from numpy import ndarray +import numpy as np +from peewee import DoesNotExist, IntegrityError from PIL import Image from playhouse.shortcuts import model_to_dict +from frigate.comms.embeddings_updater import ( + EmbeddingsRequestEnum, +) from frigate.comms.inter_process import InterProcessRequestor from frigate.config import FrigateConfig from frigate.config.classification import SemanticSearchModelEnum from frigate.const import ( CONFIG_DIR, + TRIGGER_DIR, UPDATE_EMBEDDINGS_REINDEX_PROGRESS, UPDATE_MODEL_STATE, ) from frigate.data_processing.types import DataProcessorMetrics from frigate.db.sqlitevecq import SqliteVecQueueDatabase -from frigate.models import Event +from frigate.models import Event, Trigger from frigate.types import ModelStatusTypesEnum from frigate.util.builtin import EventsPerSecond, InferenceSpeed, serialize from frigate.util.path import get_event_thumbnail_bytes @@ -167,7 +172,7 @@ class Embeddings: def embed_thumbnail( self, event_id: str, thumbnail: bytes, upsert: bool = True - ) -> ndarray: + ) -> np.ndarray: """Embed thumbnail and optionally insert into DB. @param: event_id in Events DB @@ -194,7 +199,7 @@ class Embeddings: def batch_embed_thumbnail( self, event_thumbs: dict[str, bytes], upsert: bool = True - ) -> list[ndarray]: + ) -> list[np.ndarray]: """Embed thumbnails and optionally insert into DB. @param: event_thumbs Map of Event IDs in DB to thumbnail bytes in jpg format @@ -244,7 +249,7 @@ class Embeddings: def embed_description( self, event_id: str, description: str, upsert: bool = True - ) -> ndarray: + ) -> np.ndarray: start = datetime.datetime.now().timestamp() embedding = self.text_embedding([description])[0] @@ -264,7 +269,7 @@ class Embeddings: def batch_embed_description( self, event_descriptions: dict[str, str], upsert: bool = True - ) -> ndarray: + ) -> np.ndarray: start = datetime.datetime.now().timestamp() # upsert embeddings one by one to avoid token limit embeddings = [] @@ -417,3 +422,224 @@ class Embeddings: with self.reindex_lock: self.reindex_running = False self.reindex_thread = None + + def sync_triggers(self) -> None: + for camera in self.config.cameras.values(): + # Get all existing triggers for this camera + existing_triggers = { + trigger.name: trigger + for trigger in Trigger.select().where(Trigger.camera == camera.name) + } + + # Get all configured trigger names + configured_trigger_names = set(camera.semantic_search.triggers or {}) + + # Create or update triggers from config + for trigger_name, trigger in ( + camera.semantic_search.triggers or {} + ).items(): + if trigger_name in existing_triggers: + existing_trigger = existing_triggers[trigger_name] + needs_embedding_update = False + thumbnail_missing = False + + # Check if data has changed or thumbnail is missing for thumbnail type + if trigger.type == "thumbnail": + thumbnail_path = os.path.join( + TRIGGER_DIR, camera.name, f"{trigger.data}.webp" + ) + try: + event = Event.get(Event.id == trigger.data) + if event.data.get("type") != "object": + logger.warning( + f"Event {trigger.data} is not a tracked object for {trigger.type} trigger" + ) + continue # Skip if not an object + + # Check if thumbnail needs to be updated (data changed or missing) + if ( + existing_trigger.data != trigger.data + or not os.path.exists(thumbnail_path) + ): + thumbnail = get_event_thumbnail_bytes(event) + if not thumbnail: + logger.warning( + f"Unable to retrieve thumbnail for event ID {trigger.data} for {trigger_name}." + ) + continue + self.write_trigger_thumbnail( + camera.name, trigger.data, thumbnail + ) + thumbnail_missing = True + except DoesNotExist: + logger.warning( + f"Event ID {trigger.data} for trigger {trigger_name} does not exist." + ) + continue + + # Update existing trigger if data has changed + if ( + existing_trigger.type != trigger.type + or existing_trigger.data != trigger.data + or existing_trigger.threshold != trigger.threshold + ): + existing_trigger.type = trigger.type + existing_trigger.data = trigger.data + existing_trigger.threshold = trigger.threshold + needs_embedding_update = True + + # Check if embedding is missing or needs update + if ( + not existing_trigger.embedding + or needs_embedding_update + or thumbnail_missing + ): + existing_trigger.embedding = self._calculate_trigger_embedding( + trigger + ) + needs_embedding_update = True + + if needs_embedding_update: + existing_trigger.save() + else: + # Create new trigger + try: + try: + event: Event = Event.get(Event.id == trigger.data) + except DoesNotExist: + logger.warning( + f"Event ID {trigger.data} for trigger {trigger_name} does not exist." + ) + continue + + # Skip the event if not an object + if event.data.get("type") != "object": + logger.warning( + f"Event ID {trigger.data} for trigger {trigger_name} is not a tracked object." + ) + continue + + thumbnail = get_event_thumbnail_bytes(event) + + if not thumbnail: + logger.warning( + f"Unable to retrieve thumbnail for event ID {trigger.data} for {trigger_name}." + ) + continue + + self.write_trigger_thumbnail( + camera.name, trigger.data, thumbnail + ) + + # Calculate embedding for new trigger + embedding = self._calculate_trigger_embedding(trigger) + + Trigger.create( + camera=camera.name, + name=trigger_name, + type=trigger.type, + data=trigger.data, + threshold=trigger.threshold, + model=self.config.semantic_search.model, + embedding=embedding, + triggering_event_id="", + last_triggered=None, + ) + + except IntegrityError: + pass # Handle duplicate creation attempts + + # Remove triggers that are no longer in config + triggers_to_remove = ( + set(existing_triggers.keys()) - configured_trigger_names + ) + if triggers_to_remove: + Trigger.delete().where( + Trigger.camera == camera.name, Trigger.name.in_(triggers_to_remove) + ).execute() + for trigger_name in triggers_to_remove: + self.remove_trigger_thumbnail(camera.name, trigger_name) + + def write_trigger_thumbnail( + self, camera: str, event_id: str, thumbnail: bytes + ) -> None: + """Write the thumbnail to the trigger directory.""" + try: + os.makedirs(os.path.join(TRIGGER_DIR, camera), exist_ok=True) + with open(os.path.join(TRIGGER_DIR, camera, f"{event_id}.webp"), "wb") as f: + f.write(thumbnail) + logger.debug( + f"Writing thumbnail for trigger with data {event_id} in {camera}." + ) + except Exception as e: + logger.error( + f"Failed to write thumbnail for trigger with data {event_id} in {camera}: {e}" + ) + + def remove_trigger_thumbnail(self, camera: str, event_id: str) -> None: + """Write the thumbnail to the trigger directory.""" + try: + os.remove(os.path.join(TRIGGER_DIR, camera, f"{event_id}.webp")) + logger.debug( + f"Deleted thumbnail for trigger with data {event_id} in {camera}." + ) + except Exception as e: + logger.error( + f"Failed to delete thumbnail for trigger with data {event_id} in {camera}: {e}" + ) + + def _calculate_trigger_embedding(self, trigger) -> bytes: + """Calculate embedding for a trigger based on its type and data.""" + if trigger.type == "description": + logger.debug(f"Generating embedding for trigger description {trigger.name}") + embedding = self.requestor.send_data( + EmbeddingsRequestEnum.embed_description.value, + {"id": None, "description": trigger.data, "upsert": False}, + ) + return embedding.astype(np.float32).tobytes() + + elif trigger.type == "thumbnail": + # For image triggers, trigger.data should be an image ID + # Try to get embedding from vec_thumbnails table first + cursor = self.db.execute_sql( + "SELECT thumbnail_embedding FROM vec_thumbnails WHERE id = ?", + [trigger.data], + ) + row = cursor.fetchone() if cursor else None + if row: + return row[0] # Already in bytes format + else: + logger.debug( + f"No thumbnail embedding found for image ID: {trigger.data}, generating from saved trigger thumbnail" + ) + + try: + with open( + os.path.join( + TRIGGER_DIR, trigger.camera, f"{trigger.data}.webp" + ), + "rb", + ) as f: + thumbnail = f.read() + except Exception as e: + logger.error( + f"Failed to read thumbnail for trigger {trigger.name} with ID {trigger.data}: {e}" + ) + return b"" + + logger.debug( + f"Generating embedding for trigger thumbnail {trigger.name} with ID {trigger.data}" + ) + embedding = self.requestor.send_data( + EmbeddingsRequestEnum.embed_thumbnail.value, + { + "id": str(trigger.data), + "thumbnail": str(thumbnail), + "upsert": False, + }, + ) + return embedding.astype(np.float32).tobytes() + + else: + logger.warning(f"Unknown trigger type: {trigger.type}") + return b"" diff --git a/frigate/embeddings/maintainer.py b/frigate/embeddings/maintainer.py index c659d04fe..ec8e20a48 100644 --- a/frigate/embeddings/maintainer.py +++ b/frigate/embeddings/maintainer.py @@ -14,7 +14,10 @@ import numpy as np from peewee import DoesNotExist from frigate.comms.detections_updater import DetectionSubscriber, DetectionTypeEnum -from frigate.comms.embeddings_updater import EmbeddingsRequestEnum, EmbeddingsResponder +from frigate.comms.embeddings_updater import ( + EmbeddingsRequestEnum, + EmbeddingsResponder, +) from frigate.comms.event_metadata_updater import ( EventMetadataPublisher, EventMetadataSubscriber, @@ -46,6 +49,7 @@ from frigate.data_processing.post.audio_transcription import ( from frigate.data_processing.post.license_plate import ( LicensePlatePostProcessor, ) +from frigate.data_processing.post.semantic_trigger import SemanticTriggerProcessor from frigate.data_processing.real_time.api import RealTimeProcessorApi from frigate.data_processing.real_time.bird import BirdRealTimeProcessor from frigate.data_processing.real_time.custom_classification import ( @@ -60,7 +64,7 @@ from frigate.data_processing.types import DataProcessorMetrics, PostProcessDataE from frigate.db.sqlitevecq import SqliteVecQueueDatabase from frigate.events.types import EventTypeEnum, RegenerateDescriptionEnum from frigate.genai import get_genai_client -from frigate.models import Event, Recordings +from frigate.models import Event, Recordings, Trigger from frigate.types import TrackedObjectUpdateTypesEnum from frigate.util.builtin import serialize from frigate.util.image import ( @@ -93,7 +97,11 @@ class EmbeddingMaintainer(threading.Thread): self.config_updater = CameraConfigUpdateSubscriber( self.config, self.config.cameras, - [CameraConfigUpdateEnum.add, CameraConfigUpdateEnum.remove], + [ + CameraConfigUpdateEnum.add, + CameraConfigUpdateEnum.remove, + CameraConfigUpdateEnum.semantic_search, + ], ) # Configure Frigate DB @@ -109,7 +117,7 @@ class EmbeddingMaintainer(threading.Thread): ), load_vec_extension=True, ) - models = [Event, Recordings] + models = [Event, Recordings, Trigger] db.bind(models) if config.semantic_search.enabled: @@ -119,6 +127,9 @@ class EmbeddingMaintainer(threading.Thread): if config.semantic_search.reindex: self.embeddings.reindex() + # Sync semantic search triggers in db with config + self.embeddings.sync_triggers() + # create communication for updating event descriptions self.requestor = InterProcessRequestor() @@ -211,6 +222,17 @@ class EmbeddingMaintainer(threading.Thread): AudioTranscriptionPostProcessor(self.config, self.requestor, metrics) ) + if self.config.semantic_search.enabled: + self.post_processors.append( + SemanticTriggerProcessor( + db, + self.config, + self.requestor, + metrics, + self.embeddings, + ) + ) + self.stop_event = stop_event self.tracked_events: dict[str, list[Any]] = {} self.early_request_sent: dict[str, bool] = {} @@ -387,33 +409,6 @@ class EmbeddingMaintainer(threading.Thread): event_id, camera, updated_db = ended camera_config = self.config.cameras[camera] - # call any defined post processors - for processor in self.post_processors: - if isinstance(processor, LicensePlatePostProcessor): - recordings_available = self.recordings_available_through.get(camera) - if ( - recordings_available is not None - and event_id in self.detected_license_plates - and self.config.cameras[camera].type != "lpr" - ): - processor.process_data( - { - "event_id": event_id, - "camera": camera, - "recordings_available": self.recordings_available_through[ - camera - ], - "obj_data": self.detected_license_plates[event_id][ - "obj_data" - ], - }, - PostProcessDataEnum.recording, - ) - elif isinstance(processor, AudioTranscriptionPostProcessor): - continue - else: - processor.process_data(event_id, PostProcessDataEnum.event_id) - # expire in realtime processors for processor in self.realtime_processors: processor.expire_object(event_id, camera) @@ -450,6 +445,41 @@ class EmbeddingMaintainer(threading.Thread): ): self._process_genai_description(event, camera_config, thumbnail) + # call any defined post processors + for processor in self.post_processors: + if isinstance(processor, LicensePlatePostProcessor): + recordings_available = self.recordings_available_through.get(camera) + if ( + recordings_available is not None + and event_id in self.detected_license_plates + and self.config.cameras[camera].type != "lpr" + ): + processor.process_data( + { + "event_id": event_id, + "camera": camera, + "recordings_available": self.recordings_available_through[ + camera + ], + "obj_data": self.detected_license_plates[event_id][ + "obj_data" + ], + }, + PostProcessDataEnum.recording, + ) + elif isinstance(processor, AudioTranscriptionPostProcessor): + continue + elif isinstance(processor, SemanticTriggerProcessor): + processor.process_data( + {"event_id": event_id, "camera": camera, "type": "image"}, + PostProcessDataEnum.tracked_object, + ) + else: + processor.process_data( + {"event_id": event_id, "camera": camera}, + PostProcessDataEnum.tracked_object, + ) + # Delete tracked events based on the event_id if event_id in self.tracked_events: del self.tracked_events[event_id] @@ -658,6 +688,16 @@ class EmbeddingMaintainer(threading.Thread): if self.config.semantic_search.enabled: self.embeddings.embed_description(event.id, description) + # Check semantic trigger for this description + for processor in self.post_processors: + if isinstance(processor, SemanticTriggerProcessor): + processor.process_data( + {"event_id": event.id, "camera": event.camera, "type": "text"}, + PostProcessDataEnum.tracked_object, + ) + else: + continue + logger.debug( "Generated description for %s (%d images): %s", event.id, diff --git a/frigate/models.py b/frigate/models.py index 5aa0dc5b2..0ef4650b3 100644 --- a/frigate/models.py +++ b/frigate/models.py @@ -1,6 +1,8 @@ from peewee import ( + BlobField, BooleanField, CharField, + CompositeKey, DateTimeField, FloatField, ForeignKeyField, @@ -132,3 +134,18 @@ class User(Model): # type: ignore[misc] ) password_hash = CharField(null=False, max_length=120) notification_tokens = JSONField() + + +class Trigger(Model): # type: ignore[misc] + camera = CharField(max_length=20) + name = CharField() + type = CharField(max_length=10) + data = TextField() + threshold = FloatField() + model = CharField(max_length=30) + embedding = BlobField() + triggering_event_id = CharField(max_length=30) + last_triggered = DateTimeField() + + class Meta: + primary_key = CompositeKey("camera", "name") diff --git a/frigate/util/builtin.py b/frigate/util/builtin.py index d4f8d7e37..5ab29a6ea 100644 --- a/frigate/util/builtin.py +++ b/frigate/util/builtin.py @@ -428,3 +428,19 @@ def sanitize_float(value): if isinstance(value, (int, float)) and not math.isfinite(value): return 0.0 return value + + +def cosine_similarity(a: np.ndarray, b: np.ndarray) -> float: + return 1 - cosine_distance(a, b) + + +def cosine_distance(a: np.ndarray, b: np.ndarray) -> float: + """Returns cosine distance to match sqlite-vec's calculation.""" + dot = np.dot(a, b) + a_mag = np.dot(a, a) # ||a||^2 + b_mag = np.dot(b, b) # ||b||^2 + + if a_mag == 0 or b_mag == 0: + return 1.0 + + return 1.0 - (dot / (np.sqrt(a_mag) * np.sqrt(b_mag))) diff --git a/migrations/031_create_trigger_table.py b/migrations/031_create_trigger_table.py new file mode 100644 index 000000000..7c8c289cc --- /dev/null +++ b/migrations/031_create_trigger_table.py @@ -0,0 +1,50 @@ +"""Peewee migrations -- 031_create_trigger_table.py. + +This migration creates the Trigger table to track semantic search triggers for cameras. + +Some examples (model - class or model_name):: + + > Model = migrator.orm['model_name'] # Return model in current state by name + > migrator.sql(sql) # Run custom SQL + > migrator.python(func, *args, **kwargs) # Run python code + > migrator.create_model(Model) # Create a model (could be used as decorator) + > migrator.remove_model(model, cascade=True) # Remove a model + > migrator.add_fields(model, **fields) # Add fields to a model + > migrator.change_fields(model, **fields) # Change fields + > migrator.remove_fields(model, *field_names, cascade=True) + > migrator.rename_field(model, old_field_name, new_field_name) + > migrator.rename_table(model, new_table_name) + > migrator.add_index(model, *col_names, unique=False) + > migrator.drop_index(model, *col_names) + > migrator.add_not_null(model, *field_names) + > migrator.drop_not_null(model, *field_names) + > migrator.add_default(model, field_name, default) + +""" + +import peewee as pw + +SQL = pw.SQL + + +def migrate(migrator, database, fake=False, **kwargs): + migrator.sql( + """ + CREATE TABLE IF NOT EXISTS trigger ( + camera VARCHAR(20) NOT NULL, + name VARCHAR NOT NULL, + type VARCHAR(10) NOT NULL, + model VARCHAR(30) NOT NULL, + data TEXT NOT NULL, + threshold REAL, + embedding BLOB, + triggering_event_id VARCHAR(30), + last_triggered DATETIME, + PRIMARY KEY (camera, name) + ) + """ + ) + + +def rollback(migrator, database, fake=False, **kwargs): + migrator.sql("DROP TABLE IF EXISTS trigger") diff --git a/web/public/locales/en/components/dialog.json b/web/public/locales/en/components/dialog.json index 8b2dc0b88..02ab43c4c 100644 --- a/web/public/locales/en/components/dialog.json +++ b/web/public/locales/en/components/dialog.json @@ -109,5 +109,12 @@ "markAsReviewed": "Mark as reviewed", "deleteNow": "Delete Now" } + }, + "imagePicker": { + "selectImage": "Select a tracked object's thumbnail", + "search": { + "placeholder": "Search by label or sub label..." + }, + "noImages": "No thumbnails found for this camera" } } diff --git a/web/public/locales/en/views/explore.json b/web/public/locales/en/views/explore.json index 8a61dcf58..d754fee77 100644 --- a/web/public/locales/en/views/explore.json +++ b/web/public/locales/en/views/explore.json @@ -175,6 +175,10 @@ "label": "Find similar", "aria": "Find similar tracked objects" }, + "addTrigger": { + "label": "Add trigger", + "aria": "Add a trigger for this tracked object" + }, "audioTranscription": { "label": "Transcribe", "aria": "Request audio transcription" diff --git a/web/public/locales/en/views/settings.json b/web/public/locales/en/views/settings.json index 14dc809bc..b396babde 100644 --- a/web/public/locales/en/views/settings.json +++ b/web/public/locales/en/views/settings.json @@ -644,5 +644,100 @@ "success": "Frigate+ settings have been saved. Restart Frigate to apply changes.", "error": "Failed to save config changes: {{errorMessage}}" } + }, + "triggers": { + "documentTitle": "Triggers", + "management": { + "title": "Trigger Management", + "desc": "Manage triggers for {{camera}}. Use the thumbnail type to trigger on similar thumbnails to your selected tracked object, and the description type to trigger on similar descriptions to text you specify." + }, + "addTrigger": "Add Trigger", + "table": { + "name": "Name", + "type": "Type", + "content": "Content", + "threshold": "Threshold", + "actions": "Actions", + "noTriggers": "No triggers configured for this camera.", + "edit": "Edit", + "deleteTrigger": "Delete Trigger", + "lastTriggered": "Last triggered" + }, + "type": { + "thumbnail": "Thumbnail", + "description": "Description" + }, + "actions": { + "alert": "Mark as Alert", + "notification": "Send Notification" + }, + "dialog": { + "createTrigger": { + "title": "Create Trigger", + "desc": "Create a trigger for camera {{camera}}" + }, + "editTrigger": { + "title": "Edit Trigger", + "desc": "Edit the settings for trigger on camera {{camera}}" + }, + "deleteTrigger": { + "title": "Delete Trigger", + "desc": "Are you sure you want to delete the trigger {{triggerName}}? This action cannot be undone." + }, + "form": { + "name": { + "title": "Name", + "placeholder": "Enter trigger name", + "error": { + "minLength": "Name must be at least 2 characters long.", + "invalidCharacters": "Name can only contain letters, numbers, underscores, and hyphens.", + "alreadyExists": "A trigger with this name already exists for this camera." + } + }, + "enabled": { + "description": "Enable or disable this trigger" + }, + "type": { + "title": "Type", + "placeholder": "Select trigger type" + }, + "content": { + "title": "Content", + "imagePlaceholder": "Select an image", + "textPlaceholder": "Enter text content", + "imageDesc": "Select an image to trigger this action when a similar image is detected.", + "textDesc": "Enter text to trigger this action when a similar tracked object description is detected.", + "error": { + "required": "Content is required." + } + }, + "threshold": { + "title": "Threshold", + "error": { + "min": "Threshold must be at least 0", + "max": "Threshold must be at most 1" + } + }, + "actions": { + "title": "Actions", + "desc": "By default, Frigate fires an MQTT message for all triggers. Choose an additional action to perform when this trigger fires.", + "error": { + "min": "At least one action must be selected." + } + } + } + }, + "toast": { + "success": { + "createTrigger": "Trigger {{name}} created successfully.", + "updateTrigger": "Trigger {{name}} updated successfully.", + "deleteTrigger": "Trigger {{name}} deleted successfully." + }, + "error": { + "createTriggerFailed": "Failed to create trigger: {{errorMessage}}", + "updateTriggerFailed": "Failed to update trigger: {{errorMessage}}", + "deleteTriggerFailed": "Failed to delete trigger: {{errorMessage}}" + } + } } } diff --git a/web/src/api/ws.tsx b/web/src/api/ws.tsx index 78c596e13..cc3ea05bf 100644 --- a/web/src/api/ws.tsx +++ b/web/src/api/ws.tsx @@ -9,6 +9,7 @@ import { ModelState, ToggleableSetting, TrackedObjectUpdateReturnType, + TriggerStatus, } from "@/types/ws"; import { FrigateStats } from "@/types/stats"; import { createContainer } from "react-tracked"; @@ -572,3 +573,13 @@ export function useNotificationTest(): { } = useWs("notification_test", "notification_test"); return { payload: payload as string, send }; } + +export function useTriggers(): { payload: TriggerStatus } { + const { + value: { payload }, + } = useWs("triggers", ""); + const parsed = payload + ? JSON.parse(payload as string) + : { name: "", camera: "", event_id: "", type: "", score: 0 }; + return { payload: useDeepMemo(parsed) }; +} diff --git a/web/src/components/card/SearchThumbnailFooter.tsx b/web/src/components/card/SearchThumbnailFooter.tsx index c86e9c3c6..e23d1c3f6 100644 --- a/web/src/components/card/SearchThumbnailFooter.tsx +++ b/web/src/components/card/SearchThumbnailFooter.tsx @@ -15,6 +15,7 @@ type SearchThumbnailProps = { refreshResults: () => void; showObjectLifecycle: () => void; showSnapshot: () => void; + addTrigger: () => void; }; export default function SearchThumbnailFooter({ @@ -24,6 +25,7 @@ export default function SearchThumbnailFooter({ refreshResults, showObjectLifecycle, showSnapshot, + addTrigger, }: SearchThumbnailProps) { const { t } = useTranslation(["views/search"]); const { data: config } = useSWR("config"); @@ -61,6 +63,7 @@ export default function SearchThumbnailFooter({ refreshResults={refreshResults} showObjectLifecycle={showObjectLifecycle} showSnapshot={showSnapshot} + addTrigger={addTrigger} />
    diff --git a/web/src/components/menu/SearchResultActions.tsx b/web/src/components/menu/SearchResultActions.tsx index 1779430f0..2c928becf 100644 --- a/web/src/components/menu/SearchResultActions.tsx +++ b/web/src/components/menu/SearchResultActions.tsx @@ -41,6 +41,7 @@ import { import useSWR from "swr"; import { Trans, useTranslation } from "react-i18next"; +import { BsFillLightningFill } from "react-icons/bs"; type SearchResultActionsProps = { searchResult: SearchResult; @@ -48,6 +49,7 @@ type SearchResultActionsProps = { refreshResults: () => void; showObjectLifecycle: () => void; showSnapshot: () => void; + addTrigger: () => void; isContextMenu?: boolean; children?: ReactNode; }; @@ -58,6 +60,7 @@ export default function SearchResultActions({ refreshResults, showObjectLifecycle, showSnapshot, + addTrigger, isContextMenu = false, children, }: SearchResultActionsProps) { @@ -138,6 +141,16 @@ export default function SearchResultActions({ {t("itemMenu.findSimilar.label")} )} + {config?.semantic_search?.enabled && + searchResult.data.type == "object" && ( + + + {t("itemMenu.addTrigger.label")} + + )} {isMobileOnly && config?.plus?.enabled && searchResult.has_snapshot && diff --git a/web/src/components/overlay/CreateTriggerDialog.tsx b/web/src/components/overlay/CreateTriggerDialog.tsx new file mode 100644 index 000000000..5672c4802 --- /dev/null +++ b/web/src/components/overlay/CreateTriggerDialog.tsx @@ -0,0 +1,416 @@ +import { useEffect, useMemo } from "react"; +import { useTranslation } from "react-i18next"; +import { useForm } from "react-hook-form"; +import { zodResolver } from "@hookform/resolvers/zod"; +import { z } from "zod"; +import useSWR from "swr"; +import { + Dialog, + DialogContent, + DialogDescription, + DialogFooter, + DialogHeader, + DialogTitle, +} from "@/components/ui/dialog"; +import { + Form, + FormControl, + FormDescription, + FormField, + FormItem, + FormLabel, + FormMessage, +} from "@/components/ui/form"; +import { Input } from "@/components/ui/input"; +import { + Select, + SelectContent, + SelectItem, + SelectTrigger, + SelectValue, +} from "@/components/ui/select"; +import { Checkbox } from "@/components/ui/checkbox"; +import { Button } from "@/components/ui/button"; +import ActivityIndicator from "@/components/indicators/activity-indicator"; +import { FrigateConfig } from "@/types/frigateConfig"; +import ImagePicker from "@/components/overlay/ImagePicker"; +import { Trigger, TriggerAction, TriggerType } from "@/types/trigger"; +import { Switch } from "@/components/ui/switch"; +import { Textarea } from "../ui/textarea"; + +type CreateTriggerDialogProps = { + show: boolean; + trigger: Trigger | null; + selectedCamera: string; + isLoading: boolean; + onCreate: ( + enabled: boolean, + name: string, + type: TriggerType, + data: string, + threshold: number, + actions: TriggerAction[], + ) => void; + onEdit: (trigger: Trigger) => void; + onCancel: () => void; +}; + +export default function CreateTriggerDialog({ + show, + trigger, + selectedCamera, + isLoading, + onCreate, + onEdit, + onCancel, +}: CreateTriggerDialogProps) { + const { t } = useTranslation("views/settings"); + const { data: config } = useSWR("config"); + + const existingTriggerNames = useMemo(() => { + if ( + !config || + !selectedCamera || + !config.cameras[selectedCamera]?.semantic_search?.triggers + ) { + return []; + } + return Object.keys(config.cameras[selectedCamera].semantic_search.triggers); + }, [config, selectedCamera]); + + const formSchema = z.object({ + enabled: z.boolean(), + name: z + .string() + .min(2, t("triggers.dialog.form.name.error.minLength")) + .regex( + /^[a-zA-Z0-9_-]+$/, + t("triggers.dialog.form.name.error.invalidCharacters"), + ) + .refine( + (value) => + !existingTriggerNames.includes(value) || value === trigger?.name, + t("triggers.dialog.form.name.error.alreadyExists"), + ), + type: z.enum(["thumbnail", "description"]), + data: z.string().min(1, t("triggers.dialog.form.content.error.required")), + threshold: z + .number() + .min(0, t("triggers.dialog.form.threshold.error.min")) + .max(1, t("triggers.dialog.form.threshold.error.max")), + actions: z.array(z.enum(["notification"])), + }); + + const form = useForm>({ + resolver: zodResolver(formSchema), + mode: "onChange", + defaultValues: { + enabled: trigger?.enabled ?? true, + name: trigger?.name ?? "", + type: trigger?.type ?? "description", + data: trigger?.data ?? "", + threshold: trigger?.threshold ?? 0.5, + actions: trigger?.actions ?? [], + }, + }); + + const onSubmit = async (values: z.infer) => { + if (trigger) { + onEdit({ ...values }); + } else { + onCreate( + values.enabled, + values.name, + values.type, + values.data, + values.threshold, + values.actions, + ); + } + }; + + useEffect(() => { + if (!show) { + form.reset({ + enabled: true, + name: "", + type: "description", + data: "", + threshold: 0.5, + actions: [], + }); + } else if (trigger) { + form.reset( + { + enabled: trigger.enabled, + name: trigger.name, + type: trigger.type, + data: trigger.data, + threshold: trigger.threshold, + actions: trigger.actions, + }, + { keepDirty: false, keepTouched: false }, // Reset validation state + ); + // Trigger validation to ensure isValid updates + // form.trigger(); + } + }, [show, trigger, form]); + + const handleCancel = () => { + form.reset(); + onCancel(); + }; + + return ( + + + + + {t( + trigger + ? "triggers.dialog.editTrigger.title" + : "triggers.dialog.createTrigger.title", + )} + + + {t( + trigger + ? "triggers.dialog.editTrigger.desc" + : "triggers.dialog.createTrigger.desc", + { camera: selectedCamera }, + )} + + + +
    + + ( + + {t("triggers.dialog.form.name.title")} + + + + + + )} + /> + + ( + +
    + + {t("enabled", { ns: "common" })} + +
    + {t("triggers.dialog.form.enabled.description")} +
    +
    + + + +
    + )} + /> + + ( + + {t("triggers.dialog.form.type.title")} + + + + )} + /> + + ( + + + {t("triggers.dialog.form.content.title")} + + {form.watch("type") === "thumbnail" ? ( + <> + + + + + {t("triggers.dialog.form.content.imageDesc")} + + + ) : ( + <> + +