From d24b96d3bb8ac43500e35fbcc96c45df460b26ff Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Thu, 26 Feb 2026 21:16:10 -0700 Subject: [PATCH 01/56] Early 0.18 work (#22138) * Update version * Create scaffolding for case management (#21293) * implement case management for export apis (#21295) * refactor vainfo to search for first GPU (#21296) use existing LibvaGpuSelector to pick appropritate libva device * Case management UI (#21299) * Refactor export cards to match existing cards in other UI pages * Show cases separately from exports * Add proper filtering and display of cases * Add ability to edit and select cases for exports * Cleanup typing * Hide if no unassigned * Cleanup hiding logic * fix scrolling * Improve layout * Camera connection quality indicator (#21297) * add camera connection quality metrics and indicator * formatting * move stall calcs to watchdog * clean up * change watchdog to 1s and separately track time for ffmpeg retry_interval * implement status caching to reduce message volume * Export filter UI (#21322) * Get started on export filters * implement basic filter * Implement filtering and adjust api * Improve filter handling * Improve navigation * Cleanup * handle scrolling * Refactor temperature reporting for detectors and implement Hailo temp reading (#21395) * Add Hailo temperature retrieval * Refactor `get_hailo_temps()` to use ctxmanager * Show Hailo temps in system UI * Move hailo_platform import to get_hailo_temps * Refactor temperatures calculations to use within detector block * Adjust webUI to handle new location --------- Co-authored-by: tigattack <10629864+tigattack@users.noreply.github.com> * Camera-specific hwaccel settings for timelapse exports (correct base) (#21386) * added hwaccel_args to camera.record.export config struct * populate camera.record.export.hwaccel_args with a cascade up to camera then global if 'auto' * use new hwaccel args in export * added documentation for camera-specific hwaccel export * fix c/p error * missed an import * fleshed out the docs and comments a bit * ruff lint * separated out the tips in the doc * fix documentation * fix and simplify reference config doc * Add support for GPU and NPU temperatures (#21495) * Add rockchip temps * Add support for GPU and NPU temperatures in the frontend * Add support for Nvidia temperature * Improve separation * Adjust graph scaling * Exports Improvements (#21521) * Add images to case folder view * Add ability to select case in export dialog * Add to mobile review too * Add API to handle deleting recordings (#21520) * Add recording delete API * Re-organize recordings apis * Fix import * Consolidate query types * Add media sync API endpoint (#21526) * add media cleanup functions * add endpoint * remove scheduled sync recordings from cleanup * move to utils dir * tweak import * remove sync_recordings and add config migrator * remove sync_recordings * docs * remove key * clean up docs * docs fix * docs tweak * Media sync API refactor and UI (#21542) * generic job infrastructure * types and dispatcher changes for jobs * save data in memory only for completed jobs * implement media sync job and endpoints * change logs to debug * websocket hook and types * frontend * i18n * docs tweaks * endpoint descriptions * tweak docs * use same logging pattern in sync_recordings as the other sync functions (#21625) * Fix incorrect counting in sync_recordings (#21626) * Update go2rtc to v1.9.13 (#21648) Co-authored-by: Eugeny Tulupov * Refactor Time-Lapse Export (#21668) * refactor time lapse creation to be a separate API call with ability to pass arbitrary ffmpeg args * Add CPU fallback * Optimize empty directory cleanup for recordings (#21695) The previous empty directory cleanup did a full recursive directory walk, which can be extremely slow. This new implementation only removes directories which have a chance of being empty due to a recent file deletion. * Implement llama.cpp GenAI Provider (#21690) * Implement llama.cpp GenAI Provider * Add docs * Update links * Fix broken mqtt links * Fix more broken anchors * Remove parents in remove_empty_directories (#21726) The original implementation did a full directory tree walk to find and remove empty directories, so this implementation should remove the parents as well, like the original did. * Implement LLM Chat API with tool calling support (#21731) * Implement initial tools definiton APIs * Add initial chat completion API with tool support * Implement other providers * Cleanup * Offline preview image (#21752) * use latest preview frame for latest image when camera is offline * remove frame extraction logic * tests * frontend * add description to api endpoint * Update to ROCm 7.2.0 (#21753) * Update to ROCm 7.2.0 * ROCm now works properly with JinaV1 * Arcface has compilation error * Add live context tool to LLM (#21754) * Add live context tool * Improve handling of images in request * Improve prompt caching * Add networking options for configuring listening ports (#21779) * feat: add X-Frame-Time when returning snapshot (#21932) Co-authored-by: Florent MORICONI <170678386+fmcloudconsulting@users.noreply.github.com> * Improve jsmpeg player websocket handling (#21943) * improve jsmpeg player websocket handling prevent websocket console messages from appearing when player is destroyed * reformat files after ruff upgrade * Allow API Events to be Detections or Alerts, depending on the Event Label (#21923) * - API created events will be alerts OR detections, depending on the event label, defaulting to alerts - Indefinite API events will extend the recording segment until those events are ended - API event start time is the actual start time, instead of having a pre-buffer of record.event_pre_capture * Instead of checking for indefinite events on a camera before deciding if we should end the segment, only update last_detection_time and last_alert_time if frame_time is greater, which should have the same effect * Add the ability to set a pre_capture number of seconds when creating a manual event via the API. Default behavior unchanged * Remove unnecessary _publish_segment_start() call * Formatting * handle last_alert_time or last_detection_time being None when checking them against the frame_time * comment manual_info["label"].split(": ")[0] for clarity * ffmpeg Preview Segment Optimization for "high" and "very_high" (#21996) * Introduce qmax parameter for ffmpeg preview encoding Added PREVIEW_QMAX_PARAM to control ffmpeg encoding quality. * formatting * Fix spacing in qmax parameters for preview quality * Adapt to new Gemini format * Fix frame time access * Remove exceptions * Cleanup --------- Co-authored-by: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> Co-authored-by: tigattack <10629864+tigattack@users.noreply.github.com> Co-authored-by: Andrew Roberts Co-authored-by: Eugeny Tulupov Co-authored-by: Eugeny Tulupov Co-authored-by: John Shaw <1753078+johnshaw@users.noreply.github.com> Co-authored-by: Eric Work Co-authored-by: FL42 <46161216+fl42@users.noreply.github.com> Co-authored-by: Florent MORICONI <170678386+fmcloudconsulting@users.noreply.github.com> Co-authored-by: nulledy <254504350+nulledy@users.noreply.github.com> --- Makefile | 5 +- docker/main/Dockerfile | 2 +- .../etc/s6-overlay/s6-rc.d/certsync/run | 7 +- .../rootfs/etc/s6-overlay/s6-rc.d/nginx/run | 12 +- .../rootfs/usr/local/nginx/get_base_path.py | 11 - .../usr/local/nginx/get_listen_settings.py | 35 - .../usr/local/nginx/get_nginx_settings.py | 62 ++ .../local/nginx/templates/base_path.gotmpl | 2 +- .../usr/local/nginx/templates/listen.gotmpl | 61 +- docker/rocm/Dockerfile | 4 +- docker/rocm/requirements-wheels-rocm.txt | 2 +- docker/rocm/rocm.hcl | 2 +- docs/docs/configuration/advanced.md | 43 +- docs/docs/configuration/camera_specific.md | 2 +- docs/docs/configuration/genai/config.md | 42 +- docs/docs/configuration/genai/objects.md | 4 +- .../configuration/genai/review_summaries.md | 2 +- docs/docs/configuration/record.md | 21 +- docs/docs/configuration/reference.md | 21 +- docs/docs/configuration/restream.md | 4 +- docs/docs/guides/configuring_go2rtc.md | 6 +- docs/sidebars.ts | 2 +- docs/static/frigate-api.yaml | 60 ++ frigate/api/app.py | 111 ++- frigate/api/auth.py | 36 +- frigate/api/chat.py | 642 ++++++++++++++ .../api/defs/query/media_query_parameters.py | 15 +- .../defs/query/recordings_query_parameters.py | 21 + frigate/api/defs/request/app_body.py | 17 +- frigate/api/defs/request/chat_body.py | 41 + frigate/api/defs/request/events_body.py | 1 + frigate/api/defs/request/export_case_body.py | 35 + .../defs/request/export_recordings_body.py | 43 +- frigate/api/defs/response/chat_response.py | 37 + .../api/defs/response/export_case_response.py | 22 + frigate/api/defs/response/export_response.py | 3 + frigate/api/defs/tags.py | 12 +- frigate/api/event.py | 1 + frigate/api/export.py | 350 +++++++- frigate/api/fastapi_app.py | 4 + frigate/api/media.py | 393 +-------- frigate/api/record.py | 479 +++++++++++ frigate/camera/__init__.py | 4 + frigate/comms/dispatcher.py | 17 + frigate/config/__init__.py | 1 + frigate/config/camera/genai.py | 1 + frigate/config/camera/record.py | 11 +- frigate/config/config.py | 8 + frigate/config/network.py | 18 +- frigate/const.py | 2 +- frigate/detectors/detection_runners.py | 4 +- frigate/genai/__init__.py | 63 +- frigate/genai/azure-openai.py | 93 +- frigate/genai/gemini.py | 199 ++++- frigate/genai/llama_cpp.py | 238 ++++++ frigate/genai/ollama.py | 118 +++ frigate/genai/openai.py | 113 ++- frigate/jobs/__init__.py | 0 frigate/jobs/job.py | 21 + frigate/jobs/manager.py | 70 ++ frigate/jobs/media_sync.py | 135 +++ frigate/models.py | 14 + frigate/output/preview.py | 56 +- frigate/record/cleanup.py | 57 +- frigate/record/export.py | 128 ++- frigate/record/util.py | 147 ---- frigate/review/maintainer.py | 66 +- frigate/stats/util.py | 114 ++- .../test/http_api/test_http_latest_frame.py | 107 +++ frigate/test/test_preview_loader.py | 80 ++ frigate/track/object_processing.py | 11 +- frigate/track/tracked_object.py | 19 +- frigate/types.py | 9 + frigate/util/config.py | 52 +- frigate/util/media.py | 808 ++++++++++++++++++ frigate/util/services.py | 106 ++- frigate/video.py | 121 ++- migrations/033_create_export_case_table.py | 50 ++ migrations/034_add_export_case_to_exports.py | 40 + web/public/locales/en/components/dialog.json | 4 + web/public/locales/en/config/cameras.json | 5 +- web/public/locales/en/config/networking.json | 15 +- web/public/locales/en/config/record.json | 5 +- web/public/locales/en/views/exports.json | 18 +- web/public/locales/en/views/settings.json | 48 ++ web/public/locales/en/views/system.json | 13 + web/src/api/ws.tsx | 38 + web/src/components/auth/ProtectedRoute.tsx | 2 +- .../camera/ConnectionQualityIndicator.tsx | 76 ++ web/src/components/card/ExportCard.tsx | 234 ++--- .../components/filter/ExportFilterGroup.tsx | 67 ++ web/src/components/overlay/ExportDialog.tsx | 63 +- .../overlay/MobileReviewSettingsDrawer.tsx | 10 +- .../overlay/dialog/OptionAndInputDialog.tsx | 166 ++++ web/src/components/player/JSMpegPlayer.tsx | 28 +- web/src/components/player/LivePlayer.tsx | 27 + web/src/hooks/use-allowed-cameras.ts | 2 +- web/src/pages/Exports.tsx | 524 ++++++++++-- web/src/pages/Settings.tsx | 6 + web/src/types/export.ts | 19 + web/src/types/frigateConfig.ts | 2 - web/src/types/stats.ts | 8 +- web/src/types/ws.ts | 29 + .../settings/MaintenanceSettingsView.tsx | 442 ++++++++++ web/src/views/system/CameraMetrics.tsx | 34 +- web/src/views/system/GeneralMetrics.tsx | 153 +++- web/vite.config.ts | 2 +- 107 files changed, 6766 insertions(+), 1050 deletions(-) delete mode 100644 docker/main/rootfs/usr/local/nginx/get_base_path.py delete mode 100644 docker/main/rootfs/usr/local/nginx/get_listen_settings.py create mode 100644 docker/main/rootfs/usr/local/nginx/get_nginx_settings.py create mode 100644 frigate/api/chat.py create mode 100644 frigate/api/defs/query/recordings_query_parameters.py create mode 100644 frigate/api/defs/request/chat_body.py create mode 100644 frigate/api/defs/request/export_case_body.py create mode 100644 frigate/api/defs/response/chat_response.py create mode 100644 frigate/api/defs/response/export_case_response.py create mode 100644 frigate/api/record.py create mode 100644 frigate/genai/llama_cpp.py create mode 100644 frigate/jobs/__init__.py create mode 100644 frigate/jobs/job.py create mode 100644 frigate/jobs/manager.py create mode 100644 frigate/jobs/media_sync.py delete mode 100644 frigate/record/util.py create mode 100644 frigate/test/http_api/test_http_latest_frame.py create mode 100644 frigate/test/test_preview_loader.py create mode 100644 frigate/util/media.py create mode 100644 migrations/033_create_export_case_table.py create mode 100644 migrations/034_add_export_case_to_exports.py create mode 100644 web/src/components/camera/ConnectionQualityIndicator.tsx create mode 100644 web/src/components/filter/ExportFilterGroup.tsx create mode 100644 web/src/components/overlay/dialog/OptionAndInputDialog.tsx create mode 100644 web/src/views/settings/MaintenanceSettingsView.tsx diff --git a/Makefile b/Makefile index d1427b6df..3800399ea 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ default_target: local COMMIT_HASH := $(shell git log -1 --pretty=format:"%h"|tail -1) -VERSION = 0.17.0 +VERSION = 0.18.0 IMAGE_REPO ?= ghcr.io/blakeblackshear/frigate GITHUB_REF_NAME ?= $(shell git rev-parse --abbrev-ref HEAD) BOARDS= #Initialized empty @@ -49,7 +49,8 @@ push: push-boards --push run: local - docker run --rm --publish=5000:5000 --volume=${PWD}/config:/config frigate:latest + docker run --rm --publish=5000:5000 --publish=8971:8971 \ + --volume=${PWD}/config:/config frigate:latest run_tests: local docker run --rm --workdir=/opt/frigate --entrypoint= frigate:latest \ diff --git a/docker/main/Dockerfile b/docker/main/Dockerfile index 055a1458f..b14320033 100644 --- a/docker/main/Dockerfile +++ b/docker/main/Dockerfile @@ -55,7 +55,7 @@ RUN --mount=type=tmpfs,target=/tmp --mount=type=tmpfs,target=/var/cache/apt \ FROM scratch AS go2rtc ARG TARGETARCH WORKDIR /rootfs/usr/local/go2rtc/bin -ADD --link --chmod=755 "https://github.com/AlexxIT/go2rtc/releases/download/v1.9.10/go2rtc_linux_${TARGETARCH}" go2rtc +ADD --link --chmod=755 "https://github.com/AlexxIT/go2rtc/releases/download/v1.9.13/go2rtc_linux_${TARGETARCH}" go2rtc FROM wget AS tempio ARG TARGETARCH diff --git a/docker/main/rootfs/etc/s6-overlay/s6-rc.d/certsync/run b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/certsync/run index 4ce1c133f..b834c09bb 100755 --- a/docker/main/rootfs/etc/s6-overlay/s6-rc.d/certsync/run +++ b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/certsync/run @@ -10,7 +10,8 @@ echo "[INFO] Starting certsync..." lefile="/etc/letsencrypt/live/frigate/fullchain.pem" -tls_enabled=`python3 /usr/local/nginx/get_listen_settings.py | jq -r .tls.enabled` +tls_enabled=`python3 /usr/local/nginx/get_nginx_settings.py | jq -r .tls.enabled` +listen_external_port=`python3 /usr/local/nginx/get_nginx_settings.py | jq -r .listen.external_port` while true do @@ -34,7 +35,7 @@ do ;; esac - liveprint=`echo | openssl s_client -showcerts -connect 127.0.0.1:8971 2>&1 | openssl x509 -fingerprint 2>&1 | grep -i fingerprint || echo 'failed'` + liveprint=`echo | openssl s_client -showcerts -connect 127.0.0.1:$listen_external_port 2>&1 | openssl x509 -fingerprint 2>&1 | grep -i fingerprint || echo 'failed'` case "$liveprint" in *Fingerprint*) @@ -55,4 +56,4 @@ do done -exit 0 \ No newline at end of file +exit 0 diff --git a/docker/main/rootfs/etc/s6-overlay/s6-rc.d/nginx/run b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/nginx/run index 8bd9b5250..a3c7b3248 100755 --- a/docker/main/rootfs/etc/s6-overlay/s6-rc.d/nginx/run +++ b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/nginx/run @@ -80,14 +80,14 @@ if [ ! \( -f "$letsencrypt_path/privkey.pem" -a -f "$letsencrypt_path/fullchain. fi # build templates for optional FRIGATE_BASE_PATH environment variable -python3 /usr/local/nginx/get_base_path.py | \ +python3 /usr/local/nginx/get_nginx_settings.py | \ tempio -template /usr/local/nginx/templates/base_path.gotmpl \ - -out /usr/local/nginx/conf/base_path.conf + -out /usr/local/nginx/conf/base_path.conf -# build templates for optional TLS support -python3 /usr/local/nginx/get_listen_settings.py | \ - tempio -template /usr/local/nginx/templates/listen.gotmpl \ - -out /usr/local/nginx/conf/listen.conf +# build templates for additional network settings +python3 /usr/local/nginx/get_nginx_settings.py | \ + tempio -template /usr/local/nginx/templates/listen.gotmpl \ + -out /usr/local/nginx/conf/listen.conf # Replace the bash process with the NGINX process, redirecting stderr to stdout exec 2>&1 diff --git a/docker/main/rootfs/usr/local/nginx/get_base_path.py b/docker/main/rootfs/usr/local/nginx/get_base_path.py deleted file mode 100644 index 2e78a7de9..000000000 --- a/docker/main/rootfs/usr/local/nginx/get_base_path.py +++ /dev/null @@ -1,11 +0,0 @@ -"""Prints the base path as json to stdout.""" - -import json -import os -from typing import Any - -base_path = os.environ.get("FRIGATE_BASE_PATH", "") - -result: dict[str, Any] = {"base_path": base_path} - -print(json.dumps(result)) diff --git a/docker/main/rootfs/usr/local/nginx/get_listen_settings.py b/docker/main/rootfs/usr/local/nginx/get_listen_settings.py deleted file mode 100644 index d879db56e..000000000 --- a/docker/main/rootfs/usr/local/nginx/get_listen_settings.py +++ /dev/null @@ -1,35 +0,0 @@ -"""Prints the tls config as json to stdout.""" - -import json -import sys -from typing import Any - -from ruamel.yaml import YAML - -sys.path.insert(0, "/opt/frigate") -from frigate.util.config import find_config_file - -sys.path.remove("/opt/frigate") - -yaml = YAML() - -config_file = find_config_file() - -try: - with open(config_file) as f: - raw_config = f.read() - - if config_file.endswith((".yaml", ".yml")): - config: dict[str, Any] = yaml.load(raw_config) - elif config_file.endswith(".json"): - config: dict[str, Any] = json.loads(raw_config) -except FileNotFoundError: - config: dict[str, Any] = {} - -tls_config: dict[str, any] = config.get("tls", {"enabled": True}) -networking_config = config.get("networking", {}) -ipv6_config = networking_config.get("ipv6", {"enabled": False}) - -output = {"tls": tls_config, "ipv6": ipv6_config} - -print(json.dumps(output)) diff --git a/docker/main/rootfs/usr/local/nginx/get_nginx_settings.py b/docker/main/rootfs/usr/local/nginx/get_nginx_settings.py new file mode 100644 index 000000000..79cda3686 --- /dev/null +++ b/docker/main/rootfs/usr/local/nginx/get_nginx_settings.py @@ -0,0 +1,62 @@ +"""Prints the nginx settings as json to stdout.""" + +import json +import os +import sys +from typing import Any + +from ruamel.yaml import YAML + +sys.path.insert(0, "/opt/frigate") +from frigate.util.config import find_config_file + +sys.path.remove("/opt/frigate") + +yaml = YAML() + +config_file = find_config_file() + +try: + with open(config_file) as f: + raw_config = f.read() + + if config_file.endswith((".yaml", ".yml")): + config: dict[str, Any] = yaml.load(raw_config) + elif config_file.endswith(".json"): + config: dict[str, Any] = json.loads(raw_config) +except FileNotFoundError: + config: dict[str, Any] = {} + +tls_config: dict[str, Any] = config.get("tls", {}) +tls_config.setdefault("enabled", True) + +networking_config: dict[str, Any] = config.get("networking", {}) +ipv6_config: dict[str, Any] = networking_config.get("ipv6", {}) +ipv6_config.setdefault("enabled", False) + +listen_config: dict[str, Any] = networking_config.get("listen", {}) +listen_config.setdefault("internal", 5000) +listen_config.setdefault("external", 8971) + +# handle case where internal port is a string with ip:port +internal_port = listen_config["internal"] +if type(internal_port) is str: + internal_port = int(internal_port.split(":")[-1]) +listen_config["internal_port"] = internal_port + +# handle case where external port is a string with ip:port +external_port = listen_config["external"] +if type(external_port) is str: + external_port = int(external_port.split(":")[-1]) +listen_config["external_port"] = external_port + +base_path = os.environ.get("FRIGATE_BASE_PATH", "") + +result: dict[str, Any] = { + "tls": tls_config, + "ipv6": ipv6_config, + "listen": listen_config, + "base_path": base_path, +} + +print(json.dumps(result)) diff --git a/docker/main/rootfs/usr/local/nginx/templates/base_path.gotmpl b/docker/main/rootfs/usr/local/nginx/templates/base_path.gotmpl index ace4443ee..ca945ba1f 100644 --- a/docker/main/rootfs/usr/local/nginx/templates/base_path.gotmpl +++ b/docker/main/rootfs/usr/local/nginx/templates/base_path.gotmpl @@ -7,7 +7,7 @@ location ^~ {{ .base_path }}/ { # remove base_url from the path before passing upstream rewrite ^{{ .base_path }}/(.*) /$1 break; - proxy_pass $scheme://127.0.0.1:8971; + proxy_pass $scheme://127.0.0.1:{{ .listen.external_port }}; proxy_http_version 1.1; proxy_set_header Upgrade $http_upgrade; proxy_set_header Connection "upgrade"; diff --git a/docker/main/rootfs/usr/local/nginx/templates/listen.gotmpl b/docker/main/rootfs/usr/local/nginx/templates/listen.gotmpl index 066f872cb..628784b60 100644 --- a/docker/main/rootfs/usr/local/nginx/templates/listen.gotmpl +++ b/docker/main/rootfs/usr/local/nginx/templates/listen.gotmpl @@ -1,45 +1,36 @@ - # Internal (IPv4 always; IPv6 optional) -listen 5000; -{{ if .ipv6 }}{{ if .ipv6.enabled }}listen [::]:5000;{{ end }}{{ end }} - +listen {{ .listen.internal }}; +{{ if .ipv6.enabled }}listen [::]:{{ .listen.internal_port }};{{ end }} # intended for external traffic, protected by auth -{{ if .tls }} - {{ if .tls.enabled }} - # external HTTPS (IPv4 always; IPv6 optional) - listen 8971 ssl; - {{ if .ipv6 }}{{ if .ipv6.enabled }}listen [::]:8971 ssl;{{ end }}{{ end }} +{{ if .tls.enabled }} + # external HTTPS (IPv4 always; IPv6 optional) + listen {{ .listen.external }} ssl; + {{ if .ipv6.enabled }}listen [::]:{{ .listen.external_port }} ssl;{{ end }} - ssl_certificate /etc/letsencrypt/live/frigate/fullchain.pem; - ssl_certificate_key /etc/letsencrypt/live/frigate/privkey.pem; + ssl_certificate /etc/letsencrypt/live/frigate/fullchain.pem; + ssl_certificate_key /etc/letsencrypt/live/frigate/privkey.pem; - # generated 2024-06-01, Mozilla Guideline v5.7, nginx 1.25.3, OpenSSL 1.1.1w, modern configuration, no OCSP - # https://ssl-config.mozilla.org/#server=nginx&version=1.25.3&config=modern&openssl=1.1.1w&ocsp=false&guideline=5.7 - ssl_session_timeout 1d; - ssl_session_cache shared:MozSSL:10m; # about 40000 sessions - ssl_session_tickets off; + # generated 2024-06-01, Mozilla Guideline v5.7, nginx 1.25.3, OpenSSL 1.1.1w, modern configuration, no OCSP + # https://ssl-config.mozilla.org/#server=nginx&version=1.25.3&config=modern&openssl=1.1.1w&ocsp=false&guideline=5.7 + ssl_session_timeout 1d; + ssl_session_cache shared:MozSSL:10m; # about 40000 sessions + ssl_session_tickets off; - # modern configuration - ssl_protocols TLSv1.3; - ssl_prefer_server_ciphers off; + # modern configuration + ssl_protocols TLSv1.3; + ssl_prefer_server_ciphers off; - # HSTS (ngx_http_headers_module is required) (63072000 seconds) - add_header Strict-Transport-Security "max-age=63072000" always; + # HSTS (ngx_http_headers_module is required) (63072000 seconds) + add_header Strict-Transport-Security "max-age=63072000" always; - # ACME challenge location - location /.well-known/acme-challenge/ { - default_type "text/plain"; - root /etc/letsencrypt/www; - } - {{ else }} - # external HTTP (IPv4 always; IPv6 optional) - listen 8971; - {{ if .ipv6 }}{{ if .ipv6.enabled }}listen [::]:8971;{{ end }}{{ end }} - {{ end }} + # ACME challenge location + location /.well-known/acme-challenge/ { + default_type "text/plain"; + root /etc/letsencrypt/www; + } {{ else }} - # (No tls section) default to HTTP (IPv4 always; IPv6 optional) - listen 8971; - {{ if .ipv6 }}{{ if .ipv6.enabled }}listen [::]:8971;{{ end }}{{ end }} + # (No tls) default to HTTP (IPv4 always; IPv6 optional) + listen {{ .listen.external }}; + {{ if .ipv6.enabled }}listen [::]:{{ .listen.external_port }};{{ end }} {{ end }} - diff --git a/docker/rocm/Dockerfile b/docker/rocm/Dockerfile index 9edcd6058..42447a26b 100644 --- a/docker/rocm/Dockerfile +++ b/docker/rocm/Dockerfile @@ -13,7 +13,7 @@ ARG ROCM RUN apt update -qq && \ apt install -y wget gpg && \ - wget -O rocm.deb https://repo.radeon.com/amdgpu-install/7.1.1/ubuntu/jammy/amdgpu-install_7.1.1.70101-1_all.deb && \ + wget -O rocm.deb https://repo.radeon.com/amdgpu-install/7.2/ubuntu/jammy/amdgpu-install_7.2.70200-1_all.deb && \ apt install -y ./rocm.deb && \ apt update && \ apt install -qq -y rocm @@ -56,6 +56,8 @@ FROM scratch AS rocm-dist ARG ROCM +# Copy HIP headers required for MIOpen JIT (BuildHip) / HIPRTC at runtime +COPY --from=rocm /opt/rocm-${ROCM}/include/ /opt/rocm-${ROCM}/include/ COPY --from=rocm /opt/rocm-$ROCM/bin/rocminfo /opt/rocm-$ROCM/bin/migraphx-driver /opt/rocm-$ROCM/bin/ # Copy MIOpen database files for gfx10xx and gfx11xx only (RDNA2/RDNA3) COPY --from=rocm /opt/rocm-$ROCM/share/miopen/db/*gfx10* /opt/rocm-$ROCM/share/miopen/db/ diff --git a/docker/rocm/requirements-wheels-rocm.txt b/docker/rocm/requirements-wheels-rocm.txt index b6a202f93..da22f2ff6 100644 --- a/docker/rocm/requirements-wheels-rocm.txt +++ b/docker/rocm/requirements-wheels-rocm.txt @@ -1 +1 @@ -onnxruntime-migraphx @ https://github.com/NickM-27/frigate-onnxruntime-rocm/releases/download/v7.1.0/onnxruntime_migraphx-1.23.1-cp311-cp311-linux_x86_64.whl \ No newline at end of file +onnxruntime-migraphx @ https://github.com/NickM-27/frigate-onnxruntime-rocm/releases/download/v7.2.0/onnxruntime_migraphx-1.23.1-cp311-cp311-linux_x86_64.whl \ No newline at end of file diff --git a/docker/rocm/rocm.hcl b/docker/rocm/rocm.hcl index 6595066c5..710bfe995 100644 --- a/docker/rocm/rocm.hcl +++ b/docker/rocm/rocm.hcl @@ -1,5 +1,5 @@ variable "ROCM" { - default = "7.1.1" + default = "7.2.0" } variable "HSA_OVERRIDE_GFX_VERSION" { default = "" diff --git a/docs/docs/configuration/advanced.md b/docs/docs/configuration/advanced.md index 17eb2053d..b8dbffd62 100644 --- a/docs/docs/configuration/advanced.md +++ b/docs/docs/configuration/advanced.md @@ -155,34 +155,33 @@ services: ### Enabling IPv6 -IPv6 is disabled by default, to enable IPv6 listen.gotmpl needs to be bind mounted with IPv6 enabled. For example: +IPv6 is disabled by default, to enable IPv6 modify your Frigate configuration as follows: -``` -{{ if not .enabled }} -# intended for external traffic, protected by auth -listen 8971; -{{ else }} -# intended for external traffic, protected by auth -listen 8971 ssl; - -# intended for internal traffic, not protected by auth -listen 5000; +```yaml +networking: + ipv6: + enabled: True ``` -becomes +### Listen on different ports -``` -{{ if not .enabled }} -# intended for external traffic, protected by auth -listen [::]:8971 ipv6only=off; -{{ else }} -# intended for external traffic, protected by auth -listen [::]:8971 ipv6only=off ssl; +You can change the ports Nginx uses for listening using Frigate's configuration file. The internal port (unauthenticated) and external port (authenticated) can be changed independently. You can also specify an IP address using the format `ip:port` if you wish to bind the port to a specific interface. This may be useful for example to prevent exposing the internal port outside the container. -# intended for internal traffic, not protected by auth -listen [::]:5000 ipv6only=off; +For example: + +```yaml +networking: + listen: + internal: 127.0.0.1:5000 + external: 8971 ``` +:::warning + +This setting is for advanced users. For the majority of use cases it's recommended to change the `ports` section of your Docker compose file or use the Docker `run` `--publish` option instead, e.g. `-p 443:8971`. Changing Frigate's ports may break some integrations. + +::: + ## Base path By default, Frigate runs at the root path (`/`). However some setups require to run Frigate under a custom path prefix (e.g. `/frigate`), especially when Frigate is located behind a reverse proxy that requires path-based routing. @@ -234,7 +233,7 @@ To do this: ### Custom go2rtc version -Frigate currently includes go2rtc v1.9.10, there may be certain cases where you want to run a different version of go2rtc. +Frigate currently includes go2rtc v1.9.13, there may be certain cases where you want to run a different version of go2rtc. To do this: diff --git a/docs/docs/configuration/camera_specific.md b/docs/docs/configuration/camera_specific.md index 50d5c52aa..aae8c57b4 100644 --- a/docs/docs/configuration/camera_specific.md +++ b/docs/docs/configuration/camera_specific.md @@ -244,7 +244,7 @@ go2rtc: - rtspx://192.168.1.1:7441/abcdefghijk ``` -[See the go2rtc docs for more information](https://github.com/AlexxIT/go2rtc/tree/v1.9.10#source-rtsp) +[See the go2rtc docs for more information](https://github.com/AlexxIT/go2rtc/tree/v1.9.13#source-rtsp) In the Unifi 2.0 update Unifi Protect Cameras had a change in audio sample rate which causes issues for ffmpeg. The input rate needs to be set for record if used directly with unifi protect. diff --git a/docs/docs/configuration/genai/config.md b/docs/docs/configuration/genai/config.md index e1f79b744..6a004e353 100644 --- a/docs/docs/configuration/genai/config.md +++ b/docs/docs/configuration/genai/config.md @@ -5,7 +5,7 @@ title: Configuring Generative AI ## Configuration -A Generative AI provider can be configured in the global config, which will make the Generative AI features available for use. There are currently 3 native providers available to integrate with Frigate. Other providers that support the OpenAI standard API can also be used. See the OpenAI section below. +A Generative AI provider can be configured in the global config, which will make the Generative AI features available for use. There are currently 4 native providers available to integrate with Frigate. Other providers that support the OpenAI standard API can also be used. See the OpenAI section below. To use Generative AI, you must define a single provider at the global level of your Frigate configuration. If the provider you choose requires an API key, you may either directly paste it in your configuration, or store it in an environment variable prefixed with `FRIGATE_`. @@ -77,8 +77,46 @@ genai: provider: ollama base_url: http://localhost:11434 model: qwen3-vl:4b + provider_options: # other Ollama client options can be defined + keep_alive: -1 + options: + num_ctx: 8192 # make sure the context matches other services that are using ollama ``` +## llama.cpp + +[llama.cpp](https://github.com/ggml-org/llama.cpp) is a C++ implementation of LLaMA that provides a high-performance inference server. Using llama.cpp directly gives you access to all native llama.cpp options and parameters. + +:::warning + +Using llama.cpp on CPU is not recommended, high inference times make using Generative AI impractical. + +::: + +It is highly recommended to host the llama.cpp server on a machine with a discrete graphics card, or on an Apple silicon Mac for best performance. + +### Supported Models + +You must use a vision capable model with Frigate. The llama.cpp server supports various vision models in GGUF format. + +### Configuration + +```yaml +genai: + provider: llamacpp + base_url: http://localhost:8080 + model: your-model-name + provider_options: + temperature: 0.7 + repeat_penalty: 1.05 + top_p: 0.8 + top_k: 40 + min_p: 0.05 + seed: -1 +``` + +All llama.cpp native options can be passed through `provider_options`, including `temperature`, `top_k`, `top_p`, `min_p`, `repeat_penalty`, `repeat_last_n`, `seed`, `grammar`, and more. See the [llama.cpp server documentation](https://github.com/ggml-org/llama.cpp/blob/master/tools/server/README.md) for a complete list of available parameters. + ## Google Gemini Google Gemini has a [free tier](https://ai.google.dev/pricing) for the API, however the limits may not be sufficient for standard Frigate usage. Choose a plan appropriate for your installation. @@ -185,4 +223,4 @@ genai: base_url: https://instance.cognitiveservices.azure.com/openai/responses?api-version=2025-04-01-preview model: gpt-5-mini api_key: "{FRIGATE_OPENAI_API_KEY}" -``` +``` \ No newline at end of file diff --git a/docs/docs/configuration/genai/objects.md b/docs/docs/configuration/genai/objects.md index e3ae31393..c878f5ec8 100644 --- a/docs/docs/configuration/genai/objects.md +++ b/docs/docs/configuration/genai/objects.md @@ -11,7 +11,7 @@ By default, descriptions will be generated for all tracked objects and all zones Optionally, you can generate the description using a snapshot (if enabled) by setting `use_snapshot` to `True`. By default, this is set to `False`, which sends the uncompressed images from the `detect` stream collected over the object's lifetime to the model. Once the object lifecycle ends, only a single compressed and cropped thumbnail is saved with the tracked object. Using a snapshot might be useful when you want to _regenerate_ a tracked object's description as it will provide the AI with a higher-quality image (typically downscaled by the AI itself) than the cropped/compressed thumbnail. Using a snapshot otherwise has a trade-off in that only a single image is sent to your provider, which will limit the model's ability to determine object movement or direction. -Generative AI object descriptions can also be toggled dynamically for a camera via MQTT with the topic `frigate//object_descriptions/set`. See the [MQTT documentation](/integrations/mqtt/#frigatecamera_nameobjectdescriptionsset). +Generative AI object descriptions can also be toggled dynamically for a camera via MQTT with the topic `frigate//object_descriptions/set`. See the [MQTT documentation](/integrations/mqtt#frigatecamera_nameobject_descriptionsset). ## Usage and Best Practices @@ -75,4 +75,4 @@ Many providers also have a public facing chat interface for their models. Downlo - OpenAI - [ChatGPT](https://chatgpt.com) - Gemini - [Google AI Studio](https://aistudio.google.com) -- Ollama - [Open WebUI](https://docs.openwebui.com/) +- Ollama - [Open WebUI](https://docs.openwebui.com/) \ No newline at end of file diff --git a/docs/docs/configuration/genai/review_summaries.md b/docs/docs/configuration/genai/review_summaries.md index df287446c..c6f5e53ec 100644 --- a/docs/docs/configuration/genai/review_summaries.md +++ b/docs/docs/configuration/genai/review_summaries.md @@ -7,7 +7,7 @@ Generative AI can be used to automatically generate structured summaries of revi Requests for a summary are requested automatically to your AI provider for alert review items when the activity has ended, they can also be optionally enabled for detections as well. -Generative AI review summaries can also be toggled dynamically for a [camera via MQTT](/integrations/mqtt/#frigatecamera_namereviewdescriptionsset). +Generative AI review summaries can also be toggled dynamically for a [camera via MQTT](/integrations/mqtt#frigatecamera_namereview_descriptionsset). ## Review Summary Usage and Best Practices diff --git a/docs/docs/configuration/record.md b/docs/docs/configuration/record.md index 4dfd8b77c..eb5d736e4 100644 --- a/docs/docs/configuration/record.md +++ b/docs/docs/configuration/record.md @@ -139,7 +139,13 @@ record: :::tip -When using `hwaccel_args` globally hardware encoding is used for time lapse generation. The encoder determines its own behavior so the resulting file size may be undesirably large. +When using `hwaccel_args`, hardware encoding is used for timelapse generation. This setting can be overridden for a specific camera (e.g., when camera resolution exceeds hardware encoder limits); set `cameras..record.export.hwaccel_args` with the appropriate settings. Using an unrecognized value or empty string will fall back to software encoding (libx264). + +::: + +:::tip + +The encoder determines its own behavior so the resulting file size may be undesirably large. To reduce the output file size the ffmpeg parameter `-qp n` can be utilized (where `n` stands for the value of the quantisation parameter). The value can be adjusted to get an acceptable tradeoff between quality and file size for the given scenario. ::: @@ -148,19 +154,16 @@ To reduce the output file size the ffmpeg parameter `-qp n` can be utilized (whe Apple devices running the Safari browser may fail to playback h.265 recordings. The [apple compatibility option](../configuration/camera_specific.md#h265-cameras-via-safari) should be used to ensure seamless playback on Apple devices. -## Syncing Recordings With Disk +## Syncing Media Files With Disk -In some cases the recordings files may be deleted but Frigate will not know this has happened. Recordings sync can be enabled which will tell Frigate to check the file system and delete any db entries for files which don't exist. +Media files (event snapshots, event thumbnails, review thumbnails, previews, exports, and recordings) can become orphaned when database entries are deleted but the corresponding files remain on disk. -```yaml -record: - sync_recordings: True -``` +Normal operation may leave small numbers of orphaned files until Frigate's scheduled cleanup, but crashes, configuration changes, or upgrades may cause more orphaned files that Frigate does not clean up. This feature checks the file system for media files and removes any that are not referenced in the database. -This feature is meant to fix variations in files, not completely delete entries in the database. If you delete all of your media, don't use `sync_recordings`, just stop Frigate, delete the `frigate.db` database, and restart. +The Maintenance pane in the Frigate UI or an API endpoint `POST /api/media/sync` can be used to trigger a media sync. When using the API, a job ID is returned and the operation continues on the server. Status can be checked with the `/api/media/sync/status/{job_id}` endpoint. :::warning -The sync operation uses considerable CPU resources and in most cases is not needed, only enable when necessary. +This operation uses considerable CPU resources and includes a safety threshold that aborts if more than 50% of files would be deleted. Only run when necessary. If you set `force: true` the safety threshold will be bypassed; do not use `force` unless you are certain the deletions are intended. ::: diff --git a/docs/docs/configuration/reference.md b/docs/docs/configuration/reference.md index 206d7012e..5c3ca4ea8 100644 --- a/docs/docs/configuration/reference.md +++ b/docs/docs/configuration/reference.md @@ -73,11 +73,19 @@ tls: # Optional: Enable TLS for port 8971 (default: shown below) enabled: True -# Optional: IPv6 configuration +# Optional: Networking configuration networking: # Optional: Enable IPv6 on 5000, and 8971 if tls is configured (default: shown below) ipv6: enabled: False + # Optional: Override ports Frigate uses for listening (defaults: shown below) + # An IP address may also be provided to bind to a specific interface, e.g. ip:port + # NOTE: This setting is for advanced users and may break some integrations. The majority + # of users should change ports in the docker compose file + # or use the docker run `--publish` option to select a different port. + listen: + internal: 5000 + external: 8971 # Optional: Proxy configuration proxy: @@ -510,8 +518,6 @@ record: # Optional: Number of minutes to wait between cleanup runs (default: shown below) # This can be used to reduce the frequency of deleting recording segments from disk if you want to minimize i/o expire_interval: 60 - # Optional: Two-way sync recordings database with disk on startup and once a day (default: shown below). - sync_recordings: False # Optional: Continuous retention settings continuous: # Optional: Number of days to retain recordings regardless of tracked objects or motion (default: shown below) @@ -534,6 +540,8 @@ record: # The -r (framerate) dictates how smooth the output video is. # So the args would be -vf setpts=0.02*PTS -r 30 in that case. timelapse_args: "-vf setpts=0.04*PTS -r 30" + # Optional: Global hardware acceleration settings for timelapse exports. (default: inherit) + hwaccel_args: auto # Optional: Recording Preview Settings preview: # Optional: Quality of recording preview (default: shown below). @@ -752,7 +760,7 @@ classification: interval: None # Optional: Restream configuration -# Uses https://github.com/AlexxIT/go2rtc (v1.9.10) +# Uses https://github.com/AlexxIT/go2rtc (v1.9.13) # NOTE: The default go2rtc API port (1984) must be used, # changing this port for the integrated go2rtc instance is not supported. go2rtc: @@ -838,6 +846,11 @@ cameras: # Optional: camera specific output args (default: inherit) # output_args: + # Optional: camera specific hwaccel args for timelapse export (default: inherit) + # record: + # export: + # hwaccel_args: + # Optional: timeout for highest scoring image before allowing it # to be replaced by a newer image. (default: shown below) best_image_timeout: 60 diff --git a/docs/docs/configuration/restream.md b/docs/docs/configuration/restream.md index ebd506294..a3c11f2d0 100644 --- a/docs/docs/configuration/restream.md +++ b/docs/docs/configuration/restream.md @@ -7,7 +7,7 @@ title: Restream Frigate can restream your video feed as an RTSP feed for other applications such as Home Assistant to utilize it at `rtsp://:8554/`. Port 8554 must be open. [This allows you to use a video feed for detection in Frigate and Home Assistant live view at the same time without having to make two separate connections to the camera](#reduce-connections-to-camera). The video feed is copied from the original video feed directly to avoid re-encoding. This feed does not include any annotation by Frigate. -Frigate uses [go2rtc](https://github.com/AlexxIT/go2rtc/tree/v1.9.10) to provide its restream and MSE/WebRTC capabilities. The go2rtc config is hosted at the `go2rtc` in the config, see [go2rtc docs](https://github.com/AlexxIT/go2rtc/tree/v1.9.10#configuration) for more advanced configurations and features. +Frigate uses [go2rtc](https://github.com/AlexxIT/go2rtc/tree/v1.9.13) to provide its restream and MSE/WebRTC capabilities. The go2rtc config is hosted at the `go2rtc` in the config, see [go2rtc docs](https://github.com/AlexxIT/go2rtc/tree/v1.9.13#configuration) for more advanced configurations and features. :::note @@ -206,7 +206,7 @@ Enabling arbitrary exec sources allows execution of arbitrary commands through g ## Advanced Restream Configurations -The [exec](https://github.com/AlexxIT/go2rtc/tree/v1.9.10#source-exec) source in go2rtc can be used for custom ffmpeg commands. An example is below: +The [exec](https://github.com/AlexxIT/go2rtc/tree/v1.9.13#source-exec) source in go2rtc can be used for custom ffmpeg commands. An example is below: :::warning diff --git a/docs/docs/guides/configuring_go2rtc.md b/docs/docs/guides/configuring_go2rtc.md index ca50a90d3..8b01de3e7 100644 --- a/docs/docs/guides/configuring_go2rtc.md +++ b/docs/docs/guides/configuring_go2rtc.md @@ -11,7 +11,7 @@ Use of the bundled go2rtc is optional. You can still configure FFmpeg to connect ## Setup a go2rtc stream -First, you will want to configure go2rtc to connect to your camera stream by adding the stream you want to use for live view in your Frigate config file. Avoid changing any other parts of your config at this step. Note that go2rtc supports [many different stream types](https://github.com/AlexxIT/go2rtc/tree/v1.9.10#module-streams), not just rtsp. +First, you will want to configure go2rtc to connect to your camera stream by adding the stream you want to use for live view in your Frigate config file. Avoid changing any other parts of your config at this step. Note that go2rtc supports [many different stream types](https://github.com/AlexxIT/go2rtc/tree/v1.9.13#module-streams), not just rtsp. :::tip @@ -47,8 +47,8 @@ After adding this to the config, restart Frigate and try to watch the live strea - Check Video Codec: - If the camera stream works in go2rtc but not in your browser, the video codec might be unsupported. - - If using H265, switch to H264. Refer to [video codec compatibility](https://github.com/AlexxIT/go2rtc/tree/v1.9.10#codecs-madness) in go2rtc documentation. - - If unable to switch from H265 to H264, or if the stream format is different (e.g., MJPEG), re-encode the video using [FFmpeg parameters](https://github.com/AlexxIT/go2rtc/tree/v1.9.10#source-ffmpeg). It supports rotating and resizing video feeds and hardware acceleration. Keep in mind that transcoding video from one format to another is a resource intensive task and you may be better off using the built-in jsmpeg view. + - If using H265, switch to H264. Refer to [video codec compatibility](https://github.com/AlexxIT/go2rtc/tree/v1.9.13#codecs-madness) in go2rtc documentation. + - If unable to switch from H265 to H264, or if the stream format is different (e.g., MJPEG), re-encode the video using [FFmpeg parameters](https://github.com/AlexxIT/go2rtc/tree/v1.9.13#source-ffmpeg). It supports rotating and resizing video feeds and hardware acceleration. Keep in mind that transcoding video from one format to another is a resource intensive task and you may be better off using the built-in jsmpeg view. ```yaml go2rtc: streams: diff --git a/docs/sidebars.ts b/docs/sidebars.ts index ea0d2f5c8..a4c1bca9d 100644 --- a/docs/sidebars.ts +++ b/docs/sidebars.ts @@ -28,7 +28,7 @@ const sidebars: SidebarsConfig = { { type: "link", label: "Go2RTC Configuration Reference", - href: "https://github.com/AlexxIT/go2rtc/tree/v1.9.10#configuration", + href: "https://github.com/AlexxIT/go2rtc/tree/v1.9.13#configuration", } as PropSidebarItemLink, ], Detectors: [ diff --git a/docs/static/frigate-api.yaml b/docs/static/frigate-api.yaml index f1a00fe61..2063514ac 100644 --- a/docs/static/frigate-api.yaml +++ b/docs/static/frigate-api.yaml @@ -331,6 +331,59 @@ paths: application/json: schema: $ref: "#/components/schemas/HTTPValidationError" + /media/sync: + post: + tags: + - App + summary: Start media sync job + description: |- + Start an asynchronous media sync job to find and (optionally) remove orphaned media files. + Returns 202 with job details when queued, or 409 if a job is already running. + operationId: sync_media_media_sync_post + requestBody: + required: true + content: + application/json: + responses: + "202": + description: Accepted - Job queued + "409": + description: Conflict - Job already running + "422": + description: Validation Error + + /media/sync/current: + get: + tags: + - App + summary: Get current media sync job + description: |- + Retrieve the current running media sync job, if any. Returns the job details or null when no job is active. + operationId: get_media_sync_current_media_sync_current_get + responses: + "200": + description: Successful Response + "422": + description: Validation Error + + /media/sync/status/{job_id}: + get: + tags: + - App + summary: Get media sync job status + description: |- + Get status and results for the specified media sync job id. Returns 200 with job details including results, or 404 if the job is not found. + operationId: get_media_sync_status_media_sync_status__job_id__get + parameters: + - name: job_id + in: path + responses: + "200": + description: Successful Response + "404": + description: Not Found - Job not found + "422": + description: Validation Error /faces/train/{name}/classify: post: tags: @@ -3147,6 +3200,7 @@ paths: duration: 30 include_recording: true draw: {} + pre_capture: null responses: "200": description: Successful Response @@ -4949,6 +5003,12 @@ components: - type: "null" title: Draw default: {} + pre_capture: + anyOf: + - type: integer + - type: "null" + title: Pre Capture Seconds + default: null type: object title: EventsCreateBody EventsDeleteBody: diff --git a/frigate/api/app.py b/frigate/api/app.py index 440adfce4..126c613a7 100644 --- a/frigate/api/app.py +++ b/frigate/api/app.py @@ -30,15 +30,22 @@ from frigate.api.auth import ( require_role, ) from frigate.api.defs.query.app_query_parameters import AppTimelineHourlyQueryParameters -from frigate.api.defs.request.app_body import AppConfigSetBody +from frigate.api.defs.request.app_body import AppConfigSetBody, MediaSyncBody from frigate.api.defs.tags import Tags from frigate.config import FrigateConfig from frigate.config.camera.updater import ( CameraConfigUpdateEnum, CameraConfigUpdateTopic, ) +from frigate.ffmpeg_presets import FFMPEG_HWACCEL_VAAPI, _gpu_selector +from frigate.jobs.media_sync import ( + get_current_media_sync_job, + get_media_sync_job_by_id, + start_media_sync_job, +) from frigate.models import Event, Timeline from frigate.stats.prometheus import get_metrics, update_metrics +from frigate.types import JobStatusTypesEnum from frigate.util.builtin import ( clean_camera_user_pass, flatten_config_data, @@ -463,7 +470,15 @@ def config_set(request: Request, body: AppConfigSetBody): @router.get("/vainfo", dependencies=[Depends(allow_any_authenticated())]) def vainfo(): - vainfo = vainfo_hwaccel() + # Use LibvaGpuSelector to pick an appropriate libva device (if available) + selected_gpu = "" + try: + selected_gpu = _gpu_selector.get_gpu_arg(FFMPEG_HWACCEL_VAAPI, 0) or "" + except Exception: + selected_gpu = "" + + # If selected_gpu is empty, pass None to vainfo_hwaccel to run plain `vainfo`. + vainfo = vainfo_hwaccel(device_name=selected_gpu or None) return JSONResponse( content={ "return_code": vainfo.returncode, @@ -598,6 +613,98 @@ def restart(): ) +@router.post( + "/media/sync", + dependencies=[Depends(require_role(["admin"]))], + summary="Start media sync job", + description="""Start an asynchronous media sync job to find and (optionally) remove orphaned media files. + Returns 202 with job details when queued, or 409 if a job is already running.""", +) +def sync_media(body: MediaSyncBody = Body(...)): + """Start async media sync job - remove orphaned files. + + Syncs specified media types: event snapshots, event thumbnails, review thumbnails, + previews, exports, and/or recordings. Job runs in background; use /media/sync/current + or /media/sync/status/{job_id} to check status. + + Args: + body: MediaSyncBody with dry_run flag and media_types list. + media_types can include: 'all', 'event_snapshots', 'event_thumbnails', + 'review_thumbnails', 'previews', 'exports', 'recordings' + + Returns: + 202 Accepted with job_id, or 409 Conflict if job already running. + """ + job_id = start_media_sync_job( + dry_run=body.dry_run, media_types=body.media_types, force=body.force + ) + + if job_id is None: + # A job is already running + current = get_current_media_sync_job() + return JSONResponse( + content={ + "error": "A media sync job is already running", + "current_job_id": current.id if current else None, + }, + status_code=409, + ) + + return JSONResponse( + content={ + "job": { + "job_type": "media_sync", + "status": JobStatusTypesEnum.queued, + "id": job_id, + } + }, + status_code=202, + ) + + +@router.get( + "/media/sync/current", + dependencies=[Depends(require_role(["admin"]))], + summary="Get current media sync job", + description="""Retrieve the current running media sync job, if any. Returns the job details + or null when no job is active.""", +) +def get_media_sync_current(): + """Get the current running media sync job, if any.""" + job = get_current_media_sync_job() + + if job is None: + return JSONResponse(content={"job": None}, status_code=200) + + return JSONResponse( + content={"job": job.to_dict()}, + status_code=200, + ) + + +@router.get( + "/media/sync/status/{job_id}", + dependencies=[Depends(require_role(["admin"]))], + summary="Get media sync job status", + description="""Get status and results for the specified media sync job id. Returns 200 with + job details including results, or 404 if the job is not found.""", +) +def get_media_sync_status(job_id: str): + """Get the status of a specific media sync job.""" + job = get_media_sync_job_by_id(job_id) + + if job is None: + return JSONResponse( + content={"error": "Job not found"}, + status_code=404, + ) + + return JSONResponse( + content={"job": job.to_dict()}, + status_code=200, + ) + + @router.get("/labels", dependencies=[Depends(allow_any_authenticated())]) def get_labels(camera: str = ""): try: diff --git a/frigate/api/auth.py b/frigate/api/auth.py index e0a6ec924..04a5bd19a 100644 --- a/frigate/api/auth.py +++ b/frigate/api/auth.py @@ -26,7 +26,7 @@ from frigate.api.defs.request.app_body import ( AppPutRoleBody, ) from frigate.api.defs.tags import Tags -from frigate.config import AuthConfig, ProxyConfig +from frigate.config import AuthConfig, NetworkingConfig, ProxyConfig from frigate.const import CONFIG_DIR, JWT_SECRET_ENV_VAR, PASSWORD_HASH_ALGORITHM from frigate.models import User @@ -41,7 +41,7 @@ def require_admin_by_default(): endpoints require admin access unless explicitly overridden with allow_public(), allow_any_authenticated(), or require_role(). - Port 5000 (internal) always has admin role set by the /auth endpoint, + Internal port always has admin role set by the /auth endpoint, so this check passes automatically for internal requests. Certain paths are exempted from the global admin check because they must @@ -130,7 +130,7 @@ def require_admin_by_default(): pass # For all other paths, require admin role - # Port 5000 (internal) requests have admin role set automatically + # Internal port requests have admin role set automatically role = request.headers.get("remote-role") if role == "admin": return @@ -143,6 +143,17 @@ def require_admin_by_default(): return admin_checker +def _is_authenticated(request: Request) -> bool: + """ + Helper to determine if a request is from an authenticated user. + + Returns True if the request has a valid authenticated user (not anonymous). + Internal port requests are considered anonymous despite having admin role. + """ + username = request.headers.get("remote-user") + return username is not None and username != "anonymous" + + def allow_public(): """ Override dependency to allow unauthenticated access to an endpoint. @@ -171,6 +182,7 @@ def allow_any_authenticated(): Rejects: - Requests with no remote-user header (did not pass through /auth endpoint) + - External port requests with anonymous user (auth disabled, no proxy auth) Example: @router.get("/authenticated-endpoint", dependencies=[Depends(allow_any_authenticated())]) @@ -179,8 +191,14 @@ def allow_any_authenticated(): async def auth_checker(request: Request): # Ensure a remote-user has been set by the /auth endpoint username = request.headers.get("remote-user") - if username is None: - raise HTTPException(status_code=401, detail="Authentication required") + + # Internal port requests have admin role and should be allowed + role = request.headers.get("remote-role") + + if role != "admin": + if username is None or not _is_authenticated(request): + raise HTTPException(status_code=401, detail="Authentication required") + return return auth_checker @@ -570,12 +588,18 @@ def resolve_role( def auth(request: Request): auth_config: AuthConfig = request.app.frigate_config.auth proxy_config: ProxyConfig = request.app.frigate_config.proxy + networking_config: NetworkingConfig = request.app.frigate_config.networking success_response = Response("", status_code=202) + # handle case where internal port is a string with ip:port + internal_port = networking_config.listen.internal + if type(internal_port) is str: + internal_port = int(internal_port.split(":")[-1]) + # dont require auth if the request is on the internal port # this header is set by Frigate's nginx proxy, so it cant be spoofed - if int(request.headers.get("x-server-port", default=0)) == 5000: + if int(request.headers.get("x-server-port", default=0)) == internal_port: success_response.headers["remote-user"] = "anonymous" success_response.headers["remote-role"] = "admin" return success_response diff --git a/frigate/api/chat.py b/frigate/api/chat.py new file mode 100644 index 000000000..1f5cc2297 --- /dev/null +++ b/frigate/api/chat.py @@ -0,0 +1,642 @@ +"""Chat and LLM tool calling APIs.""" + +import base64 +import json +import logging +from datetime import datetime, timezone +from typing import Any, Dict, List, Optional + +import cv2 +from fastapi import APIRouter, Body, Depends, Request +from fastapi.responses import JSONResponse +from pydantic import BaseModel + +from frigate.api.auth import ( + allow_any_authenticated, + get_allowed_cameras_for_filter, +) +from frigate.api.defs.query.events_query_parameters import EventsQueryParams +from frigate.api.defs.request.chat_body import ChatCompletionRequest +from frigate.api.defs.response.chat_response import ( + ChatCompletionResponse, + ChatMessageResponse, +) +from frigate.api.defs.tags import Tags +from frigate.api.event import events +from frigate.genai import get_genai_client + +logger = logging.getLogger(__name__) + +router = APIRouter(tags=[Tags.chat]) + + +class ToolExecuteRequest(BaseModel): + """Request model for tool execution.""" + + tool_name: str + arguments: Dict[str, Any] + + +def get_tool_definitions() -> List[Dict[str, Any]]: + """ + Get OpenAI-compatible tool definitions for Frigate. + + Returns a list of tool definitions that can be used with OpenAI-compatible + function calling APIs. + """ + return [ + { + "type": "function", + "function": { + "name": "search_objects", + "description": ( + "Search for detected objects in Frigate by camera, object label, time range, " + "zones, and other filters. Use this to answer questions about when " + "objects were detected, what objects appeared, or to find specific object detections. " + "An 'object' in Frigate represents a tracked detection (e.g., a person, package, car)." + ), + "parameters": { + "type": "object", + "properties": { + "camera": { + "type": "string", + "description": "Camera name to filter by (optional). Use 'all' for all cameras.", + }, + "label": { + "type": "string", + "description": "Object label to filter by (e.g., 'person', 'package', 'car').", + }, + "after": { + "type": "string", + "description": "Start time in ISO 8601 format (e.g., '2024-01-01T00:00:00Z').", + }, + "before": { + "type": "string", + "description": "End time in ISO 8601 format (e.g., '2024-01-01T23:59:59Z').", + }, + "zones": { + "type": "array", + "items": {"type": "string"}, + "description": "List of zone names to filter by.", + }, + "limit": { + "type": "integer", + "description": "Maximum number of objects to return (default: 10).", + "default": 10, + }, + }, + }, + "required": [], + }, + }, + { + "type": "function", + "function": { + "name": "get_live_context", + "description": ( + "Get the current detection information for a camera: objects being tracked, " + "zones, timestamps. Use this to understand what is visible in the live view. " + "Call this when the user has included a live image (via include_live_image) or " + "when answering questions about what is happening right now on a specific camera." + ), + "parameters": { + "type": "object", + "properties": { + "camera": { + "type": "string", + "description": "Camera name to get live context for.", + }, + }, + "required": ["camera"], + }, + }, + }, + ] + + +@router.get( + "/chat/tools", + dependencies=[Depends(allow_any_authenticated())], + summary="Get available tools", + description="Returns OpenAI-compatible tool definitions for function calling.", +) +def get_tools(request: Request) -> JSONResponse: + """Get list of available tools for LLM function calling.""" + tools = get_tool_definitions() + return JSONResponse(content={"tools": tools}) + + +async def _execute_search_objects( + request: Request, + arguments: Dict[str, Any], + allowed_cameras: List[str], +) -> JSONResponse: + """ + Execute the search_objects tool. + + This searches for detected objects (events) in Frigate using the same + logic as the events API endpoint. + """ + # Parse ISO 8601 timestamps to Unix timestamps if provided + after = arguments.get("after") + before = arguments.get("before") + + if after: + try: + after_dt = datetime.fromisoformat(after.replace("Z", "+00:00")) + after = after_dt.timestamp() + except (ValueError, AttributeError): + logger.warning(f"Invalid 'after' timestamp format: {after}") + after = None + + if before: + try: + before_dt = datetime.fromisoformat(before.replace("Z", "+00:00")) + before = before_dt.timestamp() + except (ValueError, AttributeError): + logger.warning(f"Invalid 'before' timestamp format: {before}") + before = None + + # Convert zones array to comma-separated string if provided + zones = arguments.get("zones") + if isinstance(zones, list): + zones = ",".join(zones) + elif zones is None: + zones = "all" + + # Build query parameters compatible with EventsQueryParams + query_params = EventsQueryParams( + camera=arguments.get("camera", "all"), + cameras=arguments.get("camera", "all"), + label=arguments.get("label", "all"), + labels=arguments.get("label", "all"), + zones=zones, + zone=zones, + after=after, + before=before, + limit=arguments.get("limit", 10), + ) + + try: + # Call the events endpoint function directly + # The events function is synchronous and takes params and allowed_cameras + response = events(query_params, allowed_cameras) + + # The response is already a JSONResponse with event data + # Return it as-is for the LLM + return response + except Exception as e: + logger.error(f"Error executing search_objects: {e}", exc_info=True) + return JSONResponse( + content={ + "success": False, + "message": "Error searching objects", + }, + status_code=500, + ) + + +@router.post( + "/chat/execute", + dependencies=[Depends(allow_any_authenticated())], + summary="Execute a tool", + description="Execute a tool function call from an LLM.", +) +async def execute_tool( + request: Request, + body: ToolExecuteRequest = Body(...), + allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter), +) -> JSONResponse: + """ + Execute a tool function call. + + This endpoint receives tool calls from LLMs and executes the corresponding + Frigate operations, returning results in a format the LLM can understand. + """ + tool_name = body.tool_name + arguments = body.arguments + + logger.debug(f"Executing tool: {tool_name} with arguments: {arguments}") + + if tool_name == "search_objects": + return await _execute_search_objects(request, arguments, allowed_cameras) + + return JSONResponse( + content={ + "success": False, + "message": f"Unknown tool: {tool_name}", + "tool": tool_name, + }, + status_code=400, + ) + + +async def _execute_get_live_context( + request: Request, + camera: str, + allowed_cameras: List[str], +) -> Dict[str, Any]: + if camera not in allowed_cameras: + return { + "error": f"Camera '{camera}' not found or access denied", + } + + if camera not in request.app.frigate_config.cameras: + return { + "error": f"Camera '{camera}' not found", + } + + try: + frame_processor = request.app.detected_frames_processor + camera_state = frame_processor.camera_states.get(camera) + + if camera_state is None: + return { + "error": f"Camera '{camera}' state not available", + } + + tracked_objects_dict = {} + with camera_state.current_frame_lock: + tracked_objects = camera_state.tracked_objects.copy() + frame_time = camera_state.current_frame_time + + for obj_id, tracked_obj in tracked_objects.items(): + obj_dict = tracked_obj.to_dict() + if obj_dict.get("frame_time") == frame_time: + tracked_objects_dict[obj_id] = { + "label": obj_dict.get("label"), + "zones": obj_dict.get("current_zones", []), + "sub_label": obj_dict.get("sub_label"), + "stationary": obj_dict.get("stationary", False), + } + + return { + "camera": camera, + "timestamp": frame_time, + "detections": list(tracked_objects_dict.values()), + } + + except Exception as e: + logger.error(f"Error executing get_live_context: {e}", exc_info=True) + return { + "error": "Error getting live context", + } + + +async def _get_live_frame_image_url( + request: Request, + camera: str, + allowed_cameras: List[str], +) -> Optional[str]: + """ + Fetch the current live frame for a camera as a base64 data URL. + + Returns None if the frame cannot be retrieved. Used when include_live_image + is set to attach the image to the first user message. + """ + if ( + camera not in allowed_cameras + or camera not in request.app.frigate_config.cameras + ): + return None + try: + frame_processor = request.app.detected_frames_processor + if camera not in frame_processor.camera_states: + return None + frame = frame_processor.get_current_frame(camera, {}) + if frame is None: + return None + height, width = frame.shape[:2] + max_dimension = 1024 + if height > max_dimension or width > max_dimension: + scale = max_dimension / max(height, width) + frame = cv2.resize( + frame, + (int(width * scale), int(height * scale)), + interpolation=cv2.INTER_AREA, + ) + _, img_encoded = cv2.imencode(".jpg", frame, [cv2.IMWRITE_JPEG_QUALITY, 85]) + b64 = base64.b64encode(img_encoded.tobytes()).decode("utf-8") + return f"data:image/jpeg;base64,{b64}" + except Exception as e: + logger.debug("Failed to get live frame for %s: %s", camera, e) + return None + + +async def _execute_tool_internal( + tool_name: str, + arguments: Dict[str, Any], + request: Request, + allowed_cameras: List[str], +) -> Dict[str, Any]: + """ + Internal helper to execute a tool and return the result as a dict. + + This is used by the chat completion endpoint to execute tools. + """ + if tool_name == "search_objects": + response = await _execute_search_objects(request, arguments, allowed_cameras) + try: + if hasattr(response, "body"): + body_str = response.body.decode("utf-8") + return json.loads(body_str) + elif hasattr(response, "content"): + return response.content + else: + return {} + except (json.JSONDecodeError, AttributeError) as e: + logger.warning(f"Failed to extract tool result: {e}") + return {"error": "Failed to parse tool result"} + elif tool_name == "get_live_context": + camera = arguments.get("camera") + if not camera: + return {"error": "Camera parameter is required"} + return await _execute_get_live_context(request, camera, allowed_cameras) + else: + return {"error": f"Unknown tool: {tool_name}"} + + +@router.post( + "/chat/completion", + response_model=ChatCompletionResponse, + dependencies=[Depends(allow_any_authenticated())], + summary="Chat completion with tool calling", + description=( + "Send a chat message to the configured GenAI provider with tool calling support. " + "The LLM can call Frigate tools to answer questions about your cameras and events." + ), +) +async def chat_completion( + request: Request, + body: ChatCompletionRequest = Body(...), + allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter), +) -> JSONResponse: + """ + Chat completion endpoint with tool calling support. + + This endpoint: + 1. Gets the configured GenAI client + 2. Gets tool definitions + 3. Sends messages + tools to LLM + 4. Handles tool_calls if present + 5. Executes tools and sends results back to LLM + 6. Repeats until final answer + 7. Returns response to user + """ + genai_client = get_genai_client(request.app.frigate_config) + if not genai_client: + return JSONResponse( + content={ + "error": "GenAI is not configured. Please configure a GenAI provider in your Frigate config.", + }, + status_code=400, + ) + + tools = get_tool_definitions() + conversation = [] + + current_datetime = datetime.now(timezone.utc) + current_date_str = current_datetime.strftime("%Y-%m-%d") + current_time_str = current_datetime.strftime("%H:%M:%S %Z") + + cameras_info = [] + config = request.app.frigate_config + for camera_id in allowed_cameras: + if camera_id not in config.cameras: + continue + camera_config = config.cameras[camera_id] + friendly_name = ( + camera_config.friendly_name + if camera_config.friendly_name + else camera_id.replace("_", " ").title() + ) + cameras_info.append(f" - {friendly_name} (ID: {camera_id})") + + cameras_section = "" + if cameras_info: + cameras_section = ( + "\n\nAvailable cameras:\n" + + "\n".join(cameras_info) + + "\n\nWhen users refer to cameras by their friendly name (e.g., 'Back Deck Camera'), use the corresponding camera ID (e.g., 'back_deck_cam') in tool calls." + ) + + live_image_note = "" + if body.include_live_image: + live_image_note = ( + f"\n\nThe first user message includes a live image from camera " + f"'{body.include_live_image}'. Use get_live_context for that camera to get " + "current detection details (objects, zones) to aid in understanding the image." + ) + + system_prompt = f"""You are a helpful assistant for Frigate, a security camera NVR system. You help users answer questions about their cameras, detected objects, and events. + +Current date and time: {current_date_str} at {current_time_str} (UTC) + +When users ask questions about "today", "yesterday", "this week", etc., use the current date above as reference. +When searching for objects or events, use ISO 8601 format for dates (e.g., {current_date_str}T00:00:00Z for the start of today). +Always be accurate with time calculations based on the current date provided.{cameras_section}{live_image_note}""" + + conversation.append( + { + "role": "system", + "content": system_prompt, + } + ) + + first_user_message_seen = False + for msg in body.messages: + msg_dict = { + "role": msg.role, + "content": msg.content, + } + if msg.tool_call_id: + msg_dict["tool_call_id"] = msg.tool_call_id + if msg.name: + msg_dict["name"] = msg.name + + if ( + msg.role == "user" + and not first_user_message_seen + and body.include_live_image + ): + first_user_message_seen = True + image_url = await _get_live_frame_image_url( + request, body.include_live_image, allowed_cameras + ) + if image_url: + msg_dict["content"] = [ + {"type": "text", "text": msg.content}, + {"type": "image_url", "image_url": {"url": image_url}}, + ] + + conversation.append(msg_dict) + + tool_iterations = 0 + max_iterations = body.max_tool_iterations + + logger.debug( + f"Starting chat completion with {len(conversation)} message(s), " + f"{len(tools)} tool(s) available, max_iterations={max_iterations}" + ) + + try: + while tool_iterations < max_iterations: + logger.debug( + f"Calling LLM (iteration {tool_iterations + 1}/{max_iterations}) " + f"with {len(conversation)} message(s) in conversation" + ) + response = genai_client.chat_with_tools( + messages=conversation, + tools=tools if tools else None, + tool_choice="auto", + ) + + if response.get("finish_reason") == "error": + logger.error("GenAI client returned an error") + return JSONResponse( + content={ + "error": "An error occurred while processing your request.", + }, + status_code=500, + ) + + assistant_message = { + "role": "assistant", + "content": response.get("content"), + } + if response.get("tool_calls"): + assistant_message["tool_calls"] = [ + { + "id": tc["id"], + "type": "function", + "function": { + "name": tc["name"], + "arguments": json.dumps(tc["arguments"]), + }, + } + for tc in response["tool_calls"] + ] + conversation.append(assistant_message) + + tool_calls = response.get("tool_calls") + if not tool_calls: + logger.debug( + f"Chat completion finished with final answer (iterations: {tool_iterations})" + ) + return JSONResponse( + content=ChatCompletionResponse( + message=ChatMessageResponse( + role="assistant", + content=response.get("content"), + tool_calls=None, + ), + finish_reason=response.get("finish_reason", "stop"), + tool_iterations=tool_iterations, + ).model_dump(), + ) + + # Execute tools + tool_iterations += 1 + logger.debug( + f"Tool calls detected (iteration {tool_iterations}/{max_iterations}): " + f"{len(tool_calls)} tool(s) to execute" + ) + tool_results = [] + + for tool_call in tool_calls: + tool_name = tool_call["name"] + tool_args = tool_call["arguments"] + tool_call_id = tool_call["id"] + + logger.debug( + f"Executing tool: {tool_name} (id: {tool_call_id}) with arguments: {json.dumps(tool_args, indent=2)}" + ) + + try: + tool_result = await _execute_tool_internal( + tool_name, tool_args, request, allowed_cameras + ) + + if isinstance(tool_result, dict): + result_content = json.dumps(tool_result) + result_summary = tool_result + if isinstance(tool_result, dict) and isinstance( + tool_result.get("content"), list + ): + result_count = len(tool_result.get("content", [])) + result_summary = { + "count": result_count, + "sample": tool_result.get("content", [])[:2] + if result_count > 0 + else [], + } + logger.debug( + f"Tool {tool_name} (id: {tool_call_id}) completed successfully. " + f"Result: {json.dumps(result_summary, indent=2)}" + ) + elif isinstance(tool_result, str): + result_content = tool_result + logger.debug( + f"Tool {tool_name} (id: {tool_call_id}) completed successfully. " + f"Result length: {len(result_content)} characters" + ) + else: + result_content = str(tool_result) + logger.debug( + f"Tool {tool_name} (id: {tool_call_id}) completed successfully. " + f"Result type: {type(tool_result).__name__}" + ) + + tool_results.append( + { + "role": "tool", + "tool_call_id": tool_call_id, + "content": result_content, + } + ) + except Exception as e: + logger.error( + f"Error executing tool {tool_name} (id: {tool_call_id}): {e}", + exc_info=True, + ) + error_content = json.dumps({"error": "Tool execution failed"}) + tool_results.append( + { + "role": "tool", + "tool_call_id": tool_call_id, + "content": error_content, + } + ) + logger.debug( + f"Tool {tool_name} (id: {tool_call_id}) failed. Error result added to conversation." + ) + + conversation.extend(tool_results) + logger.debug( + f"Added {len(tool_results)} tool result(s) to conversation. " + f"Continuing with next LLM call..." + ) + + logger.warning( + f"Max tool iterations ({max_iterations}) reached. Returning partial response." + ) + return JSONResponse( + content=ChatCompletionResponse( + message=ChatMessageResponse( + role="assistant", + content="I reached the maximum number of tool call iterations. Please try rephrasing your question.", + tool_calls=None, + ), + finish_reason="length", + tool_iterations=tool_iterations, + ).model_dump(), + ) + + except Exception as e: + logger.error(f"Error in chat completion: {e}", exc_info=True) + return JSONResponse( + content={ + "error": "An error occurred while processing your request.", + }, + status_code=500, + ) diff --git a/frigate/api/defs/query/media_query_parameters.py b/frigate/api/defs/query/media_query_parameters.py index a16f0d53f..7438f2f2f 100644 --- a/frigate/api/defs/query/media_query_parameters.py +++ b/frigate/api/defs/query/media_query_parameters.py @@ -1,8 +1,7 @@ from enum import Enum -from typing import Optional, Union +from typing import Optional from pydantic import BaseModel -from pydantic.json_schema import SkipJsonSchema class Extension(str, Enum): @@ -48,15 +47,3 @@ class MediaMjpegFeedQueryParams(BaseModel): mask: Optional[int] = None motion: Optional[int] = None regions: Optional[int] = None - - -class MediaRecordingsSummaryQueryParams(BaseModel): - timezone: str = "utc" - cameras: Optional[str] = "all" - - -class MediaRecordingsAvailabilityQueryParams(BaseModel): - cameras: str = "all" - before: Union[float, SkipJsonSchema[None]] = None - after: Union[float, SkipJsonSchema[None]] = None - scale: int = 30 diff --git a/frigate/api/defs/query/recordings_query_parameters.py b/frigate/api/defs/query/recordings_query_parameters.py new file mode 100644 index 000000000..d4f1b0a7b --- /dev/null +++ b/frigate/api/defs/query/recordings_query_parameters.py @@ -0,0 +1,21 @@ +from typing import Optional, Union + +from pydantic import BaseModel +from pydantic.json_schema import SkipJsonSchema + + +class MediaRecordingsSummaryQueryParams(BaseModel): + timezone: str = "utc" + cameras: Optional[str] = "all" + + +class MediaRecordingsAvailabilityQueryParams(BaseModel): + cameras: str = "all" + before: Union[float, SkipJsonSchema[None]] = None + after: Union[float, SkipJsonSchema[None]] = None + scale: int = 30 + + +class RecordingsDeleteQueryParams(BaseModel): + keep: Optional[str] = None + cameras: Optional[str] = "all" diff --git a/frigate/api/defs/request/app_body.py b/frigate/api/defs/request/app_body.py index c4129d8da..6059daf6e 100644 --- a/frigate/api/defs/request/app_body.py +++ b/frigate/api/defs/request/app_body.py @@ -1,6 +1,6 @@ -from typing import Any, Dict, Optional +from typing import Any, Dict, List, Optional -from pydantic import BaseModel +from pydantic import BaseModel, Field class AppConfigSetBody(BaseModel): @@ -27,3 +27,16 @@ class AppPostLoginBody(BaseModel): class AppPutRoleBody(BaseModel): role: str + + +class MediaSyncBody(BaseModel): + dry_run: bool = Field( + default=True, description="If True, only report orphans without deleting them" + ) + media_types: List[str] = Field( + default=["all"], + description="Types of media to sync: 'all', 'event_snapshots', 'event_thumbnails', 'review_thumbnails', 'previews', 'exports', 'recordings'", + ) + force: bool = Field( + default=False, description="If True, bypass safety threshold checks" + ) diff --git a/frigate/api/defs/request/chat_body.py b/frigate/api/defs/request/chat_body.py new file mode 100644 index 000000000..fa3c3860a --- /dev/null +++ b/frigate/api/defs/request/chat_body.py @@ -0,0 +1,41 @@ +"""Chat API request models.""" + +from typing import Optional + +from pydantic import BaseModel, Field + + +class ChatMessage(BaseModel): + """A single message in a chat conversation.""" + + role: str = Field( + description="Message role: 'user', 'assistant', 'system', or 'tool'" + ) + content: str = Field(description="Message content") + tool_call_id: Optional[str] = Field( + default=None, description="For tool messages, the ID of the tool call" + ) + name: Optional[str] = Field( + default=None, description="For tool messages, the tool name" + ) + + +class ChatCompletionRequest(BaseModel): + """Request for chat completion with tool calling.""" + + messages: list[ChatMessage] = Field( + description="List of messages in the conversation" + ) + max_tool_iterations: int = Field( + default=5, + ge=1, + le=10, + description="Maximum number of tool call iterations (default: 5)", + ) + include_live_image: Optional[str] = Field( + default=None, + description=( + "If set, the current live frame from this camera is attached to the first " + "user message as multimodal content. Use with get_live_context for detection info." + ), + ) diff --git a/frigate/api/defs/request/events_body.py b/frigate/api/defs/request/events_body.py index 50754e92a..d844c31ca 100644 --- a/frigate/api/defs/request/events_body.py +++ b/frigate/api/defs/request/events_body.py @@ -41,6 +41,7 @@ class EventsCreateBody(BaseModel): duration: Optional[int] = 30 include_recording: Optional[bool] = True draw: Optional[dict] = {} + pre_capture: Optional[int] = None class EventsEndBody(BaseModel): diff --git a/frigate/api/defs/request/export_case_body.py b/frigate/api/defs/request/export_case_body.py new file mode 100644 index 000000000..35cd8ff7f --- /dev/null +++ b/frigate/api/defs/request/export_case_body.py @@ -0,0 +1,35 @@ +from typing import Optional + +from pydantic import BaseModel, Field + + +class ExportCaseCreateBody(BaseModel): + """Request body for creating a new export case.""" + + name: str = Field(max_length=100, description="Friendly name of the export case") + description: Optional[str] = Field( + default=None, description="Optional description of the export case" + ) + + +class ExportCaseUpdateBody(BaseModel): + """Request body for updating an existing export case.""" + + name: Optional[str] = Field( + default=None, + max_length=100, + description="Updated friendly name of the export case", + ) + description: Optional[str] = Field( + default=None, description="Updated description of the export case" + ) + + +class ExportCaseAssignBody(BaseModel): + """Request body for assigning or unassigning an export to a case.""" + + export_case_id: Optional[str] = Field( + default=None, + max_length=30, + description="Case ID to assign to the export, or null to unassign", + ) diff --git a/frigate/api/defs/request/export_recordings_body.py b/frigate/api/defs/request/export_recordings_body.py index 19fc2f019..96ecccaa4 100644 --- a/frigate/api/defs/request/export_recordings_body.py +++ b/frigate/api/defs/request/export_recordings_body.py @@ -3,18 +3,47 @@ from typing import Optional, Union from pydantic import BaseModel, Field from pydantic.json_schema import SkipJsonSchema -from frigate.record.export import ( - PlaybackFactorEnum, - PlaybackSourceEnum, -) +from frigate.record.export import PlaybackSourceEnum class ExportRecordingsBody(BaseModel): - playback: PlaybackFactorEnum = Field( - default=PlaybackFactorEnum.realtime, title="Playback factor" - ) source: PlaybackSourceEnum = Field( default=PlaybackSourceEnum.recordings, title="Playback source" ) name: Optional[str] = Field(title="Friendly name", default=None, max_length=256) image_path: Union[str, SkipJsonSchema[None]] = None + export_case_id: Optional[str] = Field( + default=None, + title="Export case ID", + max_length=30, + description="ID of the export case to assign this export to", + ) + + +class ExportRecordingsCustomBody(BaseModel): + source: PlaybackSourceEnum = Field( + default=PlaybackSourceEnum.recordings, title="Playback source" + ) + name: str = Field(title="Friendly name", default=None, max_length=256) + image_path: Union[str, SkipJsonSchema[None]] = None + export_case_id: Optional[str] = Field( + default=None, + title="Export case ID", + max_length=30, + description="ID of the export case to assign this export to", + ) + ffmpeg_input_args: Optional[str] = Field( + default=None, + title="FFmpeg input arguments", + description="Custom FFmpeg input arguments. If not provided, defaults to timelapse input args.", + ) + ffmpeg_output_args: Optional[str] = Field( + default=None, + title="FFmpeg output arguments", + description="Custom FFmpeg output arguments. If not provided, defaults to timelapse output args.", + ) + cpu_fallback: bool = Field( + default=False, + title="CPU Fallback", + description="If true, retry export without hardware acceleration if the initial export fails.", + ) diff --git a/frigate/api/defs/response/chat_response.py b/frigate/api/defs/response/chat_response.py new file mode 100644 index 000000000..f1cc9194b --- /dev/null +++ b/frigate/api/defs/response/chat_response.py @@ -0,0 +1,37 @@ +"""Chat API response models.""" + +from typing import Any, Optional + +from pydantic import BaseModel, Field + + +class ToolCall(BaseModel): + """A tool call from the LLM.""" + + id: str = Field(description="Unique identifier for this tool call") + name: str = Field(description="Tool name to call") + arguments: dict[str, Any] = Field(description="Arguments for the tool call") + + +class ChatMessageResponse(BaseModel): + """A message in the chat response.""" + + role: str = Field(description="Message role") + content: Optional[str] = Field( + default=None, description="Message content (None if tool calls present)" + ) + tool_calls: Optional[list[ToolCall]] = Field( + default=None, description="Tool calls if LLM wants to call tools" + ) + + +class ChatCompletionResponse(BaseModel): + """Response from chat completion.""" + + message: ChatMessageResponse = Field(description="The assistant's message") + finish_reason: str = Field( + description="Reason generation stopped: 'stop', 'tool_calls', 'length', 'error'" + ) + tool_iterations: int = Field( + default=0, description="Number of tool call iterations performed" + ) diff --git a/frigate/api/defs/response/export_case_response.py b/frigate/api/defs/response/export_case_response.py new file mode 100644 index 000000000..713e16683 --- /dev/null +++ b/frigate/api/defs/response/export_case_response.py @@ -0,0 +1,22 @@ +from typing import List, Optional + +from pydantic import BaseModel, Field + + +class ExportCaseModel(BaseModel): + """Model representing a single export case.""" + + id: str = Field(description="Unique identifier for the export case") + name: str = Field(description="Friendly name of the export case") + description: Optional[str] = Field( + default=None, description="Optional description of the export case" + ) + created_at: float = Field( + description="Unix timestamp when the export case was created" + ) + updated_at: float = Field( + description="Unix timestamp when the export case was last updated" + ) + + +ExportCasesResponse = List[ExportCaseModel] diff --git a/frigate/api/defs/response/export_response.py b/frigate/api/defs/response/export_response.py index 63a9e91a1..600794f97 100644 --- a/frigate/api/defs/response/export_response.py +++ b/frigate/api/defs/response/export_response.py @@ -15,6 +15,9 @@ class ExportModel(BaseModel): in_progress: bool = Field( description="Whether the export is currently being processed" ) + export_case_id: Optional[str] = Field( + default=None, description="ID of the export case this export belongs to" + ) class StartExportResponse(BaseModel): diff --git a/frigate/api/defs/tags.py b/frigate/api/defs/tags.py index f804385d1..3aaaa59ef 100644 --- a/frigate/api/defs/tags.py +++ b/frigate/api/defs/tags.py @@ -3,13 +3,15 @@ from enum import Enum class Tags(Enum): app = "App" + auth = "Auth" camera = "Camera" - preview = "Preview" + chat = "Chat" + events = "Events" + export = "Export" + classification = "Classification" logs = "Logs" media = "Media" notifications = "Notifications" + preview = "Preview" + recordings = "Recordings" review = "Review" - export = "Export" - events = "Events" - classification = "Classification" - auth = "Auth" diff --git a/frigate/api/event.py b/frigate/api/event.py index c03cfb431..b0a749018 100644 --- a/frigate/api/event.py +++ b/frigate/api/event.py @@ -1782,6 +1782,7 @@ def create_event( body.duration, "api", body.draw, + body.pre_capture, ), EventMetadataTypeEnum.manual_event_create.value, ) diff --git a/frigate/api/export.py b/frigate/api/export.py index 24fed93b0..23f975618 100644 --- a/frigate/api/export.py +++ b/frigate/api/export.py @@ -4,10 +4,10 @@ import logging import random import string from pathlib import Path -from typing import List +from typing import List, Optional import psutil -from fastapi import APIRouter, Depends, Request +from fastapi import APIRouter, Depends, Query, Request from fastapi.responses import JSONResponse from pathvalidate import sanitize_filepath from peewee import DoesNotExist @@ -19,8 +19,20 @@ from frigate.api.auth import ( require_camera_access, require_role, ) -from frigate.api.defs.request.export_recordings_body import ExportRecordingsBody +from frigate.api.defs.request.export_case_body import ( + ExportCaseAssignBody, + ExportCaseCreateBody, + ExportCaseUpdateBody, +) +from frigate.api.defs.request.export_recordings_body import ( + ExportRecordingsBody, + ExportRecordingsCustomBody, +) from frigate.api.defs.request.export_rename_body import ExportRenameBody +from frigate.api.defs.response.export_case_response import ( + ExportCaseModel, + ExportCasesResponse, +) from frigate.api.defs.response.export_response import ( ExportModel, ExportsResponse, @@ -29,9 +41,9 @@ from frigate.api.defs.response.export_response import ( from frigate.api.defs.response.generic_response import GenericResponse from frigate.api.defs.tags import Tags from frigate.const import CLIPS_DIR, EXPORT_DIR -from frigate.models import Export, Previews, Recordings +from frigate.models import Export, ExportCase, Previews, Recordings from frigate.record.export import ( - PlaybackFactorEnum, + DEFAULT_TIME_LAPSE_FFMPEG_ARGS, PlaybackSourceEnum, RecordingExporter, ) @@ -52,17 +64,182 @@ router = APIRouter(tags=[Tags.export]) ) def get_exports( allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter), + export_case_id: Optional[str] = None, + cameras: Optional[str] = Query(default="all"), + start_date: Optional[float] = None, + end_date: Optional[float] = None, ): - exports = ( - Export.select() - .where(Export.camera << allowed_cameras) - .order_by(Export.date.desc()) - .dicts() - .iterator() - ) + query = Export.select().where(Export.camera << allowed_cameras) + + if export_case_id is not None: + if export_case_id == "unassigned": + query = query.where(Export.export_case.is_null(True)) + else: + query = query.where(Export.export_case == export_case_id) + + if cameras and cameras != "all": + requested = set(cameras.split(",")) + filtered_cameras = list(requested.intersection(allowed_cameras)) + if not filtered_cameras: + return JSONResponse(content=[]) + query = query.where(Export.camera << filtered_cameras) + + if start_date is not None: + query = query.where(Export.date >= start_date) + + if end_date is not None: + query = query.where(Export.date <= end_date) + + exports = query.order_by(Export.date.desc()).dicts().iterator() return JSONResponse(content=[e for e in exports]) +@router.get( + "/cases", + response_model=ExportCasesResponse, + dependencies=[Depends(allow_any_authenticated())], + summary="Get export cases", + description="Gets all export cases from the database.", +) +def get_export_cases(): + cases = ( + ExportCase.select().order_by(ExportCase.created_at.desc()).dicts().iterator() + ) + return JSONResponse(content=[c for c in cases]) + + +@router.post( + "/cases", + response_model=ExportCaseModel, + dependencies=[Depends(require_role(["admin"]))], + summary="Create export case", + description="Creates a new export case.", +) +def create_export_case(body: ExportCaseCreateBody): + case = ExportCase.create( + id="".join(random.choices(string.ascii_lowercase + string.digits, k=12)), + name=body.name, + description=body.description, + created_at=Path().stat().st_mtime, + updated_at=Path().stat().st_mtime, + ) + return JSONResponse(content=model_to_dict(case)) + + +@router.get( + "/cases/{case_id}", + response_model=ExportCaseModel, + dependencies=[Depends(allow_any_authenticated())], + summary="Get a single export case", + description="Gets a specific export case by ID.", +) +def get_export_case(case_id: str): + try: + case = ExportCase.get(ExportCase.id == case_id) + return JSONResponse(content=model_to_dict(case)) + except DoesNotExist: + return JSONResponse( + content={"success": False, "message": "Export case not found"}, + status_code=404, + ) + + +@router.patch( + "/cases/{case_id}", + response_model=GenericResponse, + dependencies=[Depends(require_role(["admin"]))], + summary="Update export case", + description="Updates an existing export case.", +) +def update_export_case(case_id: str, body: ExportCaseUpdateBody): + try: + case = ExportCase.get(ExportCase.id == case_id) + except DoesNotExist: + return JSONResponse( + content={"success": False, "message": "Export case not found"}, + status_code=404, + ) + + if body.name is not None: + case.name = body.name + if body.description is not None: + case.description = body.description + + case.save() + + return JSONResponse( + content={"success": True, "message": "Successfully updated export case."} + ) + + +@router.delete( + "/cases/{case_id}", + response_model=GenericResponse, + dependencies=[Depends(require_role(["admin"]))], + summary="Delete export case", + description="""Deletes an export case.\n Exports that reference this case will have their export_case set to null.\n """, +) +def delete_export_case(case_id: str): + try: + case = ExportCase.get(ExportCase.id == case_id) + except DoesNotExist: + return JSONResponse( + content={"success": False, "message": "Export case not found"}, + status_code=404, + ) + + # Unassign exports from this case but keep the exports themselves + Export.update(export_case=None).where(Export.export_case == case).execute() + + case.delete_instance() + + return JSONResponse( + content={"success": True, "message": "Successfully deleted export case."} + ) + + +@router.patch( + "/export/{export_id}/case", + response_model=GenericResponse, + dependencies=[Depends(require_role(["admin"]))], + summary="Assign export to case", + description=( + "Assigns an export to a case, or unassigns it if export_case_id is null." + ), +) +async def assign_export_case( + export_id: str, + body: ExportCaseAssignBody, + request: Request, +): + try: + export: Export = Export.get(Export.id == export_id) + await require_camera_access(export.camera, request=request) + except DoesNotExist: + return JSONResponse( + content={"success": False, "message": "Export not found."}, + status_code=404, + ) + + if body.export_case_id is not None: + try: + ExportCase.get(ExportCase.id == body.export_case_id) + except DoesNotExist: + return JSONResponse( + content={"success": False, "message": "Export case not found."}, + status_code=404, + ) + export.export_case = body.export_case_id + else: + export.export_case = None + + export.save() + + return JSONResponse( + content={"success": True, "message": "Successfully updated export case."} + ) + + @router.post( "/export/{camera_name}/start/{start_time}/end/{end_time}", response_model=StartExportResponse, @@ -88,11 +265,20 @@ def export_recording( status_code=404, ) - playback_factor = body.playback playback_source = body.source friendly_name = body.name existing_image = sanitize_filepath(body.image_path) if body.image_path else None + export_case_id = body.export_case_id + if export_case_id is not None: + try: + ExportCase.get(ExportCase.id == export_case_id) + except DoesNotExist: + return JSONResponse( + content={"success": False, "message": "Export case not found"}, + status_code=404, + ) + # Ensure that existing_image is a valid path if existing_image and not existing_image.startswith(CLIPS_DIR): return JSONResponse( @@ -151,16 +337,12 @@ def export_recording( existing_image, int(start_time), int(end_time), - ( - PlaybackFactorEnum[playback_factor] - if playback_factor in PlaybackFactorEnum.__members__.values() - else PlaybackFactorEnum.realtime - ), ( PlaybackSourceEnum[playback_source] if playback_source in PlaybackSourceEnum.__members__.values() else PlaybackSourceEnum.recordings ), + export_case_id, ) exporter.start() return JSONResponse( @@ -271,6 +453,138 @@ async def export_delete(event_id: str, request: Request): ) +@router.post( + "/export/custom/{camera_name}/start/{start_time}/end/{end_time}", + response_model=StartExportResponse, + dependencies=[Depends(require_camera_access)], + summary="Start custom recording export", + description="""Starts an export of a recording for the specified time range using custom FFmpeg arguments. + The export can be from recordings or preview footage. Returns the export ID if + successful, or an error message if the camera is invalid or no recordings/previews + are found for the time range. If ffmpeg_input_args and ffmpeg_output_args are not provided, + defaults to timelapse export settings.""", +) +def export_recording_custom( + request: Request, + camera_name: str, + start_time: float, + end_time: float, + body: ExportRecordingsCustomBody, +): + if not camera_name or not request.app.frigate_config.cameras.get(camera_name): + return JSONResponse( + content=( + {"success": False, "message": f"{camera_name} is not a valid camera."} + ), + status_code=404, + ) + + playback_source = body.source + friendly_name = body.name + existing_image = sanitize_filepath(body.image_path) if body.image_path else None + ffmpeg_input_args = body.ffmpeg_input_args + ffmpeg_output_args = body.ffmpeg_output_args + cpu_fallback = body.cpu_fallback + + export_case_id = body.export_case_id + if export_case_id is not None: + try: + ExportCase.get(ExportCase.id == export_case_id) + except DoesNotExist: + return JSONResponse( + content={"success": False, "message": "Export case not found"}, + status_code=404, + ) + + # Ensure that existing_image is a valid path + if existing_image and not existing_image.startswith(CLIPS_DIR): + return JSONResponse( + content=({"success": False, "message": "Invalid image path"}), + status_code=400, + ) + + if playback_source == "recordings": + recordings_count = ( + Recordings.select() + .where( + Recordings.start_time.between(start_time, end_time) + | Recordings.end_time.between(start_time, end_time) + | ( + (start_time > Recordings.start_time) + & (end_time < Recordings.end_time) + ) + ) + .where(Recordings.camera == camera_name) + .count() + ) + + if recordings_count <= 0: + return JSONResponse( + content=( + {"success": False, "message": "No recordings found for time range"} + ), + status_code=400, + ) + else: + previews_count = ( + Previews.select() + .where( + Previews.start_time.between(start_time, end_time) + | Previews.end_time.between(start_time, end_time) + | ((start_time > Previews.start_time) & (end_time < Previews.end_time)) + ) + .where(Previews.camera == camera_name) + .count() + ) + + if not is_current_hour(start_time) and previews_count <= 0: + return JSONResponse( + content=( + {"success": False, "message": "No previews found for time range"} + ), + status_code=400, + ) + + export_id = f"{camera_name}_{''.join(random.choices(string.ascii_lowercase + string.digits, k=6))}" + + # Set default values if not provided (timelapse defaults) + if ffmpeg_input_args is None: + ffmpeg_input_args = "" + + if ffmpeg_output_args is None: + ffmpeg_output_args = DEFAULT_TIME_LAPSE_FFMPEG_ARGS + + exporter = RecordingExporter( + request.app.frigate_config, + export_id, + camera_name, + friendly_name, + existing_image, + int(start_time), + int(end_time), + ( + PlaybackSourceEnum[playback_source] + if playback_source in PlaybackSourceEnum.__members__.values() + else PlaybackSourceEnum.recordings + ), + export_case_id, + ffmpeg_input_args, + ffmpeg_output_args, + cpu_fallback, + ) + exporter.start() + return JSONResponse( + content=( + { + "success": True, + "message": "Starting export of recording.", + "export_id": export_id, + } + ), + status_code=200, + ) + + @router.get( "/exports/{export_id}", response_model=ExportModel, diff --git a/frigate/api/fastapi_app.py b/frigate/api/fastapi_app.py index 48c97dfaf..496c8fada 100644 --- a/frigate/api/fastapi_app.py +++ b/frigate/api/fastapi_app.py @@ -16,12 +16,14 @@ from frigate.api import app as main_app from frigate.api import ( auth, camera, + chat, classification, event, export, media, notification, preview, + record, review, ) from frigate.api.auth import get_jwt_secret, limiter, require_admin_by_default @@ -120,6 +122,7 @@ def create_fastapi_app( # Order of include_router matters: https://fastapi.tiangolo.com/tutorial/path-params/#order-matters app.include_router(auth.router) app.include_router(camera.router) + app.include_router(chat.router) app.include_router(classification.router) app.include_router(review.router) app.include_router(main_app.router) @@ -128,6 +131,7 @@ def create_fastapi_app( app.include_router(export.router) app.include_router(event.router) app.include_router(media.router) + app.include_router(record.router) # App Properties app.frigate_config = frigate_config app.embeddings = embeddings diff --git a/frigate/api/media.py b/frigate/api/media.py index 971bfef83..3cfd97674 100644 --- a/frigate/api/media.py +++ b/frigate/api/media.py @@ -8,9 +8,8 @@ import os import subprocess as sp import time from datetime import datetime, timedelta, timezone -from functools import reduce from pathlib import Path as FilePath -from typing import Any, List +from typing import Any from urllib.parse import unquote import cv2 @@ -19,12 +18,11 @@ import pytz from fastapi import APIRouter, Depends, Path, Query, Request, Response from fastapi.responses import FileResponse, JSONResponse, StreamingResponse from pathvalidate import sanitize_filename -from peewee import DoesNotExist, fn, operator +from peewee import DoesNotExist, fn from tzlocal import get_localzone_name from frigate.api.auth import ( allow_any_authenticated, - get_allowed_cameras_for_filter, require_camera_access, ) from frigate.api.defs.query.media_query_parameters import ( @@ -32,8 +30,6 @@ from frigate.api.defs.query.media_query_parameters import ( MediaEventsSnapshotQueryParams, MediaLatestFrameQueryParams, MediaMjpegFeedQueryParams, - MediaRecordingsAvailabilityQueryParams, - MediaRecordingsSummaryQueryParams, ) from frigate.api.defs.tags import Tags from frigate.camera.state import CameraState @@ -44,13 +40,12 @@ from frigate.const import ( INSTALL_DIR, MAX_SEGMENT_DURATION, PREVIEW_FRAME_TYPE, - RECORD_DIR, ) from frigate.models import Event, Previews, Recordings, Regions, ReviewSegment +from frigate.output.preview import get_most_recent_preview_frame from frigate.track.object_processing import TrackedObjectProcessor from frigate.util.file import get_event_thumbnail_bytes from frigate.util.image import get_image_from_recording -from frigate.util.time import get_dst_transitions logger = logging.getLogger(__name__) @@ -131,7 +126,9 @@ async def camera_ptz_info(request: Request, camera_name: str): @router.get( - "/{camera_name}/latest.{extension}", dependencies=[Depends(require_camera_access)] + "/{camera_name}/latest.{extension}", + dependencies=[Depends(require_camera_access)], + description="Returns the latest frame from the specified camera in the requested format (jpg, png, webp). Falls back to preview frames if the camera is offline.", ) async def latest_frame( request: Request, @@ -165,20 +162,37 @@ async def latest_frame( or 10 ) + is_offline = False if frame is None or datetime.now().timestamp() > ( frame_processor.get_current_frame_time(camera_name) + retry_interval ): - if request.app.camera_error_image is None: - error_image = glob.glob( - os.path.join(INSTALL_DIR, "frigate/images/camera-error.jpg") - ) + last_frame_time = frame_processor.get_current_frame_time(camera_name) + preview_path = get_most_recent_preview_frame( + camera_name, before=last_frame_time + ) - if len(error_image) > 0: - request.app.camera_error_image = cv2.imread( - error_image[0], cv2.IMREAD_UNCHANGED + if preview_path: + logger.debug(f"Using most recent preview frame for {camera_name}") + frame = cv2.imread(preview_path, cv2.IMREAD_UNCHANGED) + + if frame is not None: + is_offline = True + + if frame is None or not is_offline: + logger.debug( + f"No live or preview frame available for {camera_name}. Using error image." + ) + if request.app.camera_error_image is None: + error_image = glob.glob( + os.path.join(INSTALL_DIR, "frigate/images/camera-error.jpg") ) - frame = request.app.camera_error_image + if len(error_image) > 0: + request.app.camera_error_image = cv2.imread( + error_image[0], cv2.IMREAD_UNCHANGED + ) + + frame = request.app.camera_error_image height = int(params.height or str(frame.shape[0])) width = int(height * frame.shape[1] / frame.shape[0]) @@ -200,14 +214,18 @@ async def latest_frame( frame = cv2.resize(frame, dsize=(width, height), interpolation=cv2.INTER_AREA) _, img = cv2.imencode(f".{extension.value}", frame, quality_params) + + headers = { + "Cache-Control": "no-store" if not params.store else "private, max-age=60", + } + + if is_offline: + headers["X-Frigate-Offline"] = "true" + return Response( content=img.tobytes(), media_type=extension.get_mime_type(), - headers={ - "Cache-Control": "no-store" - if not params.store - else "private, max-age=60", - }, + headers=headers, ) elif ( camera_name == "birdseye" @@ -397,333 +415,6 @@ async def submit_recording_snapshot_to_plus( ) -@router.get("/recordings/storage", dependencies=[Depends(allow_any_authenticated())]) -def get_recordings_storage_usage(request: Request): - recording_stats = request.app.stats_emitter.get_latest_stats()["service"][ - "storage" - ][RECORD_DIR] - - if not recording_stats: - return JSONResponse({}) - - total_mb = recording_stats["total"] - - camera_usages: dict[str, dict] = ( - request.app.storage_maintainer.calculate_camera_usages() - ) - - for camera_name in camera_usages.keys(): - if camera_usages.get(camera_name, {}).get("usage"): - camera_usages[camera_name]["usage_percent"] = ( - camera_usages.get(camera_name, {}).get("usage", 0) / total_mb - ) * 100 - - return JSONResponse(content=camera_usages) - - -@router.get("/recordings/summary", dependencies=[Depends(allow_any_authenticated())]) -def all_recordings_summary( - request: Request, - params: MediaRecordingsSummaryQueryParams = Depends(), - allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter), -): - """Returns true/false by day indicating if recordings exist""" - - cameras = params.cameras - if cameras != "all": - requested = set(unquote(cameras).split(",")) - filtered = requested.intersection(allowed_cameras) - if not filtered: - return JSONResponse(content={}) - camera_list = list(filtered) - else: - camera_list = allowed_cameras - - time_range_query = ( - Recordings.select( - fn.MIN(Recordings.start_time).alias("min_time"), - fn.MAX(Recordings.start_time).alias("max_time"), - ) - .where(Recordings.camera << camera_list) - .dicts() - .get() - ) - - min_time = time_range_query.get("min_time") - max_time = time_range_query.get("max_time") - - if min_time is None or max_time is None: - return JSONResponse(content={}) - - dst_periods = get_dst_transitions(params.timezone, min_time, max_time) - - days: dict[str, bool] = {} - - for period_start, period_end, period_offset in dst_periods: - hours_offset = int(period_offset / 60 / 60) - minutes_offset = int(period_offset / 60 - hours_offset * 60) - period_hour_modifier = f"{hours_offset} hour" - period_minute_modifier = f"{minutes_offset} minute" - - period_query = ( - Recordings.select( - fn.strftime( - "%Y-%m-%d", - fn.datetime( - Recordings.start_time, - "unixepoch", - period_hour_modifier, - period_minute_modifier, - ), - ).alias("day") - ) - .where( - (Recordings.camera << camera_list) - & (Recordings.end_time >= period_start) - & (Recordings.start_time <= period_end) - ) - .group_by( - fn.strftime( - "%Y-%m-%d", - fn.datetime( - Recordings.start_time, - "unixepoch", - period_hour_modifier, - period_minute_modifier, - ), - ) - ) - .order_by(Recordings.start_time.desc()) - .namedtuples() - ) - - for g in period_query: - days[g.day] = True - - return JSONResponse(content=dict(sorted(days.items()))) - - -@router.get( - "/{camera_name}/recordings/summary", dependencies=[Depends(require_camera_access)] -) -async def recordings_summary(camera_name: str, timezone: str = "utc"): - """Returns hourly summary for recordings of given camera""" - - time_range_query = ( - Recordings.select( - fn.MIN(Recordings.start_time).alias("min_time"), - fn.MAX(Recordings.start_time).alias("max_time"), - ) - .where(Recordings.camera == camera_name) - .dicts() - .get() - ) - - min_time = time_range_query.get("min_time") - max_time = time_range_query.get("max_time") - - days: dict[str, dict] = {} - - if min_time is None or max_time is None: - return JSONResponse(content=list(days.values())) - - dst_periods = get_dst_transitions(timezone, min_time, max_time) - - for period_start, period_end, period_offset in dst_periods: - hours_offset = int(period_offset / 60 / 60) - minutes_offset = int(period_offset / 60 - hours_offset * 60) - period_hour_modifier = f"{hours_offset} hour" - period_minute_modifier = f"{minutes_offset} minute" - - recording_groups = ( - Recordings.select( - fn.strftime( - "%Y-%m-%d %H", - fn.datetime( - Recordings.start_time, - "unixepoch", - period_hour_modifier, - period_minute_modifier, - ), - ).alias("hour"), - fn.SUM(Recordings.duration).alias("duration"), - fn.SUM(Recordings.motion).alias("motion"), - fn.SUM(Recordings.objects).alias("objects"), - ) - .where( - (Recordings.camera == camera_name) - & (Recordings.end_time >= period_start) - & (Recordings.start_time <= period_end) - ) - .group_by((Recordings.start_time + period_offset).cast("int") / 3600) - .order_by(Recordings.start_time.desc()) - .namedtuples() - ) - - event_groups = ( - Event.select( - fn.strftime( - "%Y-%m-%d %H", - fn.datetime( - Event.start_time, - "unixepoch", - period_hour_modifier, - period_minute_modifier, - ), - ).alias("hour"), - fn.COUNT(Event.id).alias("count"), - ) - .where(Event.camera == camera_name, Event.has_clip) - .where( - (Event.start_time >= period_start) & (Event.start_time <= period_end) - ) - .group_by((Event.start_time + period_offset).cast("int") / 3600) - .namedtuples() - ) - - event_map = {g.hour: g.count for g in event_groups} - - for recording_group in recording_groups: - parts = recording_group.hour.split() - hour = parts[1] - day = parts[0] - events_count = event_map.get(recording_group.hour, 0) - hour_data = { - "hour": hour, - "events": events_count, - "motion": recording_group.motion, - "objects": recording_group.objects, - "duration": round(recording_group.duration), - } - if day in days: - # merge counts if already present (edge-case at DST boundary) - days[day]["events"] += events_count or 0 - days[day]["hours"].append(hour_data) - else: - days[day] = { - "events": events_count or 0, - "hours": [hour_data], - "day": day, - } - - return JSONResponse(content=list(days.values())) - - -@router.get("/{camera_name}/recordings", dependencies=[Depends(require_camera_access)]) -async def recordings( - camera_name: str, - after: float = (datetime.now() - timedelta(hours=1)).timestamp(), - before: float = datetime.now().timestamp(), -): - """Return specific camera recordings between the given 'after'/'end' times. If not provided the last hour will be used""" - recordings = ( - Recordings.select( - Recordings.id, - Recordings.start_time, - Recordings.end_time, - Recordings.segment_size, - Recordings.motion, - Recordings.objects, - Recordings.duration, - ) - .where( - Recordings.camera == camera_name, - Recordings.end_time >= after, - Recordings.start_time <= before, - ) - .order_by(Recordings.start_time) - .dicts() - .iterator() - ) - - return JSONResponse(content=list(recordings)) - - -@router.get( - "/recordings/unavailable", - response_model=list[dict], - dependencies=[Depends(allow_any_authenticated())], -) -async def no_recordings( - request: Request, - params: MediaRecordingsAvailabilityQueryParams = Depends(), - allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter), -): - """Get time ranges with no recordings.""" - cameras = params.cameras - if cameras != "all": - requested = set(unquote(cameras).split(",")) - filtered = requested.intersection(allowed_cameras) - if not filtered: - return JSONResponse(content=[]) - cameras = ",".join(filtered) - else: - cameras = allowed_cameras - - before = params.before or datetime.datetime.now().timestamp() - after = ( - params.after - or (datetime.datetime.now() - datetime.timedelta(hours=1)).timestamp() - ) - scale = params.scale - - clauses = [(Recordings.end_time >= after) & (Recordings.start_time <= before)] - if cameras != "all": - camera_list = cameras.split(",") - clauses.append((Recordings.camera << camera_list)) - else: - camera_list = allowed_cameras - - # Get recording start times - data: list[Recordings] = ( - Recordings.select(Recordings.start_time, Recordings.end_time) - .where(reduce(operator.and_, clauses)) - .order_by(Recordings.start_time.asc()) - .dicts() - .iterator() - ) - - # Convert recordings to list of (start, end) tuples - recordings = [(r["start_time"], r["end_time"]) for r in data] - - # Iterate through time segments and check if each has any recording - no_recording_segments = [] - current = after - current_gap_start = None - - while current < before: - segment_end = min(current + scale, before) - - # Check if this segment overlaps with any recording - has_recording = any( - rec_start < segment_end and rec_end > current - for rec_start, rec_end in recordings - ) - - if not has_recording: - # This segment has no recordings - if current_gap_start is None: - current_gap_start = current # Start a new gap - else: - # This segment has recordings - if current_gap_start is not None: - # End the current gap and append it - no_recording_segments.append( - {"start_time": int(current_gap_start), "end_time": int(current)} - ) - current_gap_start = None - - current = segment_end - - # Append the last gap if it exists - if current_gap_start is not None: - no_recording_segments.append( - {"start_time": int(current_gap_start), "end_time": int(before)} - ) - - return JSONResponse(content=no_recording_segments) - - @router.get( "/{camera_name}/start/{start_ts}/end/{end_ts}/clip.mp4", dependencies=[Depends(require_camera_access)], @@ -1046,6 +737,7 @@ async def event_snapshot( ): event_complete = False jpg_bytes = None + frame_time = 0 try: event = Event.get(Event.id == event_id, Event.end_time != None) event_complete = True @@ -1070,7 +762,7 @@ async def event_snapshot( if event_id in camera_state.tracked_objects: tracked_obj = camera_state.tracked_objects.get(event_id) if tracked_obj is not None: - jpg_bytes = tracked_obj.get_img_bytes( + jpg_bytes, frame_time = tracked_obj.get_img_bytes( ext="jpg", timestamp=params.timestamp, bounding_box=params.bbox, @@ -1099,6 +791,7 @@ async def event_snapshot( headers = { "Content-Type": "image/jpeg", "Cache-Control": "private, max-age=31536000" if event_complete else "no-store", + "X-Frame-Time": str(frame_time), } if params.download: diff --git a/frigate/api/record.py b/frigate/api/record.py new file mode 100644 index 000000000..789aa4a80 --- /dev/null +++ b/frigate/api/record.py @@ -0,0 +1,479 @@ +"""Recording APIs.""" + +import logging +from datetime import datetime, timedelta +from functools import reduce +from pathlib import Path +from typing import List +from urllib.parse import unquote + +from fastapi import APIRouter, Depends, Request +from fastapi import Path as PathParam +from fastapi.responses import JSONResponse +from peewee import fn, operator + +from frigate.api.auth import ( + allow_any_authenticated, + get_allowed_cameras_for_filter, + require_camera_access, + require_role, +) +from frigate.api.defs.query.recordings_query_parameters import ( + MediaRecordingsAvailabilityQueryParams, + MediaRecordingsSummaryQueryParams, + RecordingsDeleteQueryParams, +) +from frigate.api.defs.response.generic_response import GenericResponse +from frigate.api.defs.tags import Tags +from frigate.const import RECORD_DIR +from frigate.models import Event, Recordings +from frigate.util.time import get_dst_transitions + +logger = logging.getLogger(__name__) + +router = APIRouter(tags=[Tags.recordings]) + + +@router.get("/recordings/storage", dependencies=[Depends(allow_any_authenticated())]) +def get_recordings_storage_usage(request: Request): + recording_stats = request.app.stats_emitter.get_latest_stats()["service"][ + "storage" + ][RECORD_DIR] + + if not recording_stats: + return JSONResponse({}) + + total_mb = recording_stats["total"] + + camera_usages: dict[str, dict] = ( + request.app.storage_maintainer.calculate_camera_usages() + ) + + for camera_name in camera_usages.keys(): + if camera_usages.get(camera_name, {}).get("usage"): + camera_usages[camera_name]["usage_percent"] = ( + camera_usages.get(camera_name, {}).get("usage", 0) / total_mb + ) * 100 + + return JSONResponse(content=camera_usages) + + +@router.get("/recordings/summary", dependencies=[Depends(allow_any_authenticated())]) +def all_recordings_summary( + request: Request, + params: MediaRecordingsSummaryQueryParams = Depends(), + allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter), +): + """Returns true/false by day indicating if recordings exist""" + + cameras = params.cameras + if cameras != "all": + requested = set(unquote(cameras).split(",")) + filtered = requested.intersection(allowed_cameras) + if not filtered: + return JSONResponse(content={}) + camera_list = list(filtered) + else: + camera_list = allowed_cameras + + time_range_query = ( + Recordings.select( + fn.MIN(Recordings.start_time).alias("min_time"), + fn.MAX(Recordings.start_time).alias("max_time"), + ) + .where(Recordings.camera << camera_list) + .dicts() + .get() + ) + + min_time = time_range_query.get("min_time") + max_time = time_range_query.get("max_time") + + if min_time is None or max_time is None: + return JSONResponse(content={}) + + dst_periods = get_dst_transitions(params.timezone, min_time, max_time) + + days: dict[str, bool] = {} + + for period_start, period_end, period_offset in dst_periods: + hours_offset = int(period_offset / 60 / 60) + minutes_offset = int(period_offset / 60 - hours_offset * 60) + period_hour_modifier = f"{hours_offset} hour" + period_minute_modifier = f"{minutes_offset} minute" + + period_query = ( + Recordings.select( + fn.strftime( + "%Y-%m-%d", + fn.datetime( + Recordings.start_time, + "unixepoch", + period_hour_modifier, + period_minute_modifier, + ), + ).alias("day") + ) + .where( + (Recordings.camera << camera_list) + & (Recordings.end_time >= period_start) + & (Recordings.start_time <= period_end) + ) + .group_by( + fn.strftime( + "%Y-%m-%d", + fn.datetime( + Recordings.start_time, + "unixepoch", + period_hour_modifier, + period_minute_modifier, + ), + ) + ) + .order_by(Recordings.start_time.desc()) + .namedtuples() + ) + + for g in period_query: + days[g.day] = True + + return JSONResponse(content=dict(sorted(days.items()))) + + +@router.get( + "/{camera_name}/recordings/summary", dependencies=[Depends(require_camera_access)] +) +async def recordings_summary(camera_name: str, timezone: str = "utc"): + """Returns hourly summary for recordings of given camera""" + + time_range_query = ( + Recordings.select( + fn.MIN(Recordings.start_time).alias("min_time"), + fn.MAX(Recordings.start_time).alias("max_time"), + ) + .where(Recordings.camera == camera_name) + .dicts() + .get() + ) + + min_time = time_range_query.get("min_time") + max_time = time_range_query.get("max_time") + + days: dict[str, dict] = {} + + if min_time is None or max_time is None: + return JSONResponse(content=list(days.values())) + + dst_periods = get_dst_transitions(timezone, min_time, max_time) + + for period_start, period_end, period_offset in dst_periods: + hours_offset = int(period_offset / 60 / 60) + minutes_offset = int(period_offset / 60 - hours_offset * 60) + period_hour_modifier = f"{hours_offset} hour" + period_minute_modifier = f"{minutes_offset} minute" + + recording_groups = ( + Recordings.select( + fn.strftime( + "%Y-%m-%d %H", + fn.datetime( + Recordings.start_time, + "unixepoch", + period_hour_modifier, + period_minute_modifier, + ), + ).alias("hour"), + fn.SUM(Recordings.duration).alias("duration"), + fn.SUM(Recordings.motion).alias("motion"), + fn.SUM(Recordings.objects).alias("objects"), + ) + .where( + (Recordings.camera == camera_name) + & (Recordings.end_time >= period_start) + & (Recordings.start_time <= period_end) + ) + .group_by((Recordings.start_time + period_offset).cast("int") / 3600) + .order_by(Recordings.start_time.desc()) + .namedtuples() + ) + + event_groups = ( + Event.select( + fn.strftime( + "%Y-%m-%d %H", + fn.datetime( + Event.start_time, + "unixepoch", + period_hour_modifier, + period_minute_modifier, + ), + ).alias("hour"), + fn.COUNT(Event.id).alias("count"), + ) + .where(Event.camera == camera_name, Event.has_clip) + .where( + (Event.start_time >= period_start) & (Event.start_time <= period_end) + ) + .group_by((Event.start_time + period_offset).cast("int") / 3600) + .namedtuples() + ) + + event_map = {g.hour: g.count for g in event_groups} + + for recording_group in recording_groups: + parts = recording_group.hour.split() + hour = parts[1] + day = parts[0] + events_count = event_map.get(recording_group.hour, 0) + hour_data = { + "hour": hour, + "events": events_count, + "motion": recording_group.motion, + "objects": recording_group.objects, + "duration": round(recording_group.duration), + } + if day in days: + # merge counts if already present (edge-case at DST boundary) + days[day]["events"] += events_count or 0 + days[day]["hours"].append(hour_data) + else: + days[day] = { + "events": events_count or 0, + "hours": [hour_data], + "day": day, + } + + return JSONResponse(content=list(days.values())) + + +@router.get("/{camera_name}/recordings", dependencies=[Depends(require_camera_access)]) +async def recordings( + camera_name: str, + after: float = (datetime.now() - timedelta(hours=1)).timestamp(), + before: float = datetime.now().timestamp(), +): + """Return specific camera recordings between the given 'after'/'end' times. If not provided the last hour will be used""" + recordings = ( + Recordings.select( + Recordings.id, + Recordings.start_time, + Recordings.end_time, + Recordings.segment_size, + Recordings.motion, + Recordings.objects, + Recordings.duration, + ) + .where( + Recordings.camera == camera_name, + Recordings.end_time >= after, + Recordings.start_time <= before, + ) + .order_by(Recordings.start_time) + .dicts() + .iterator() + ) + + return JSONResponse(content=list(recordings)) + + +@router.get( + "/recordings/unavailable", + response_model=list[dict], + dependencies=[Depends(allow_any_authenticated())], +) +async def no_recordings( + request: Request, + params: MediaRecordingsAvailabilityQueryParams = Depends(), + allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter), +): + """Get time ranges with no recordings.""" + cameras = params.cameras + if cameras != "all": + requested = set(unquote(cameras).split(",")) + filtered = requested.intersection(allowed_cameras) + if not filtered: + return JSONResponse(content=[]) + cameras = ",".join(filtered) + else: + cameras = allowed_cameras + + before = params.before or datetime.datetime.now().timestamp() + after = ( + params.after + or (datetime.datetime.now() - datetime.timedelta(hours=1)).timestamp() + ) + scale = params.scale + + clauses = [(Recordings.end_time >= after) & (Recordings.start_time <= before)] + if cameras != "all": + camera_list = cameras.split(",") + clauses.append((Recordings.camera << camera_list)) + else: + camera_list = allowed_cameras + + # Get recording start times + data: list[Recordings] = ( + Recordings.select(Recordings.start_time, Recordings.end_time) + .where(reduce(operator.and_, clauses)) + .order_by(Recordings.start_time.asc()) + .dicts() + .iterator() + ) + + # Convert recordings to list of (start, end) tuples + recordings = [(r["start_time"], r["end_time"]) for r in data] + + # Iterate through time segments and check if each has any recording + no_recording_segments = [] + current = after + current_gap_start = None + + while current < before: + segment_end = min(current + scale, before) + + # Check if this segment overlaps with any recording + has_recording = any( + rec_start < segment_end and rec_end > current + for rec_start, rec_end in recordings + ) + + if not has_recording: + # This segment has no recordings + if current_gap_start is None: + current_gap_start = current # Start a new gap + else: + # This segment has recordings + if current_gap_start is not None: + # End the current gap and append it + no_recording_segments.append( + {"start_time": int(current_gap_start), "end_time": int(current)} + ) + current_gap_start = None + + current = segment_end + + # Append the last gap if it exists + if current_gap_start is not None: + no_recording_segments.append( + {"start_time": int(current_gap_start), "end_time": int(before)} + ) + + return JSONResponse(content=no_recording_segments) + + +@router.delete( + "/recordings/start/{start}/end/{end}", + response_model=GenericResponse, + dependencies=[Depends(require_role(["admin"]))], + summary="Delete recordings", + description="""Deletes recordings within the specified time range. + Recordings can be filtered by cameras and kept based on motion, objects, or audio attributes. + """, +) +async def delete_recordings( + start: float = PathParam(..., description="Start timestamp (unix)"), + end: float = PathParam(..., description="End timestamp (unix)"), + params: RecordingsDeleteQueryParams = Depends(), + allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter), +): + """Delete recordings in the specified time range.""" + if start >= end: + return JSONResponse( + content={ + "success": False, + "message": "Start time must be less than end time.", + }, + status_code=400, + ) + + cameras = params.cameras + + if cameras != "all": + requested = set(cameras.split(",")) + filtered = requested.intersection(allowed_cameras) + + if not filtered: + return JSONResponse( + content={ + "success": False, + "message": "No valid cameras found in the request.", + }, + status_code=400, + ) + + camera_list = list(filtered) + else: + camera_list = allowed_cameras + + # Parse keep parameter + keep_set = set() + + if params.keep: + keep_set = set(params.keep.split(",")) + + # Build query to find overlapping recordings + clauses = [ + ( + Recordings.start_time.between(start, end) + | Recordings.end_time.between(start, end) + | ((start > Recordings.start_time) & (end < Recordings.end_time)) + ), + (Recordings.camera << camera_list), + ] + + keep_clauses = [] + + if "motion" in keep_set: + keep_clauses.append(Recordings.motion.is_null(False) & (Recordings.motion > 0)) + + if "object" in keep_set: + keep_clauses.append( + Recordings.objects.is_null(False) & (Recordings.objects > 0) + ) + + if "audio" in keep_set: + keep_clauses.append(Recordings.dBFS.is_null(False)) + + if keep_clauses: + keep_condition = reduce(operator.or_, keep_clauses) + clauses.append(~keep_condition) + + recordings_to_delete = ( + Recordings.select(Recordings.id, Recordings.path) + .where(reduce(operator.and_, clauses)) + .dicts() + .iterator() + ) + + recording_ids = [] + deleted_count = 0 + error_count = 0 + + for recording in recordings_to_delete: + recording_ids.append(recording["id"]) + + try: + Path(recording["path"]).unlink(missing_ok=True) + deleted_count += 1 + except Exception as e: + logger.error(f"Failed to delete recording file {recording['path']}: {e}") + error_count += 1 + + if recording_ids: + max_deletes = 100000 + recording_ids_list = list(recording_ids) + + for i in range(0, len(recording_ids_list), max_deletes): + Recordings.delete().where( + Recordings.id << recording_ids_list[i : i + max_deletes] + ).execute() + + message = f"Successfully deleted {deleted_count} recording(s)." + + if error_count > 0: + message += f" {error_count} file deletion error(s) occurred." + + return JSONResponse( + content={"success": True, "message": message}, + status_code=200, + ) diff --git a/frigate/camera/__init__.py b/frigate/camera/__init__.py index 77b1fd424..0461c98cb 100644 --- a/frigate/camera/__init__.py +++ b/frigate/camera/__init__.py @@ -19,6 +19,8 @@ class CameraMetrics: process_pid: Synchronized capture_process_pid: Synchronized ffmpeg_pid: Synchronized + reconnects_last_hour: Synchronized + stalls_last_hour: Synchronized def __init__(self, manager: SyncManager): self.camera_fps = manager.Value("d", 0) @@ -35,6 +37,8 @@ class CameraMetrics: self.process_pid = manager.Value("i", 0) self.capture_process_pid = manager.Value("i", 0) self.ffmpeg_pid = manager.Value("i", 0) + self.reconnects_last_hour = manager.Value("i", 0) + self.stalls_last_hour = manager.Value("i", 0) class PTZMetrics: diff --git a/frigate/comms/dispatcher.py b/frigate/comms/dispatcher.py index 6e45ac175..68749b102 100644 --- a/frigate/comms/dispatcher.py +++ b/frigate/comms/dispatcher.py @@ -28,6 +28,7 @@ from frigate.const import ( UPDATE_CAMERA_ACTIVITY, UPDATE_EMBEDDINGS_REINDEX_PROGRESS, UPDATE_EVENT_DESCRIPTION, + UPDATE_JOB_STATE, UPDATE_MODEL_STATE, UPDATE_REVIEW_DESCRIPTION, UPSERT_REVIEW_SEGMENT, @@ -60,6 +61,7 @@ class Dispatcher: self.camera_activity = CameraActivityManager(config, self.publish) self.audio_activity = AudioActivityManager(config, self.publish) self.model_state: dict[str, ModelStatusTypesEnum] = {} + self.job_state: dict[str, dict[str, Any]] = {} # {job_type: job_data} self.embeddings_reindex: dict[str, Any] = {} self.birdseye_layout: dict[str, Any] = {} self.audio_transcription_state: str = "idle" @@ -180,6 +182,19 @@ class Dispatcher: def handle_model_state() -> None: self.publish("model_state", json.dumps(self.model_state.copy())) + def handle_update_job_state() -> None: + if payload and isinstance(payload, dict): + job_type = payload.get("job_type") + if job_type: + self.job_state[job_type] = payload + self.publish( + "job_state", + json.dumps(self.job_state), + ) + + def handle_job_state() -> None: + self.publish("job_state", json.dumps(self.job_state.copy())) + def handle_update_audio_transcription_state() -> None: if payload: self.audio_transcription_state = payload @@ -277,6 +292,7 @@ class Dispatcher: UPDATE_EVENT_DESCRIPTION: handle_update_event_description, UPDATE_REVIEW_DESCRIPTION: handle_update_review_description, UPDATE_MODEL_STATE: handle_update_model_state, + UPDATE_JOB_STATE: handle_update_job_state, UPDATE_EMBEDDINGS_REINDEX_PROGRESS: handle_update_embeddings_reindex_progress, UPDATE_BIRDSEYE_LAYOUT: handle_update_birdseye_layout, UPDATE_AUDIO_TRANSCRIPTION_STATE: handle_update_audio_transcription_state, @@ -284,6 +300,7 @@ class Dispatcher: "restart": handle_restart, "embeddingsReindexProgress": handle_embeddings_reindex_progress, "modelState": handle_model_state, + "jobState": handle_job_state, "audioTranscriptionState": handle_audio_transcription_state, "birdseyeLayout": handle_birdseye_layout, "onConnect": handle_on_connect, diff --git a/frigate/config/__init__.py b/frigate/config/__init__.py index c6ff535b0..88f7b79f9 100644 --- a/frigate/config/__init__.py +++ b/frigate/config/__init__.py @@ -8,6 +8,7 @@ from .config import * # noqa: F403 from .database import * # noqa: F403 from .logger import * # noqa: F403 from .mqtt import * # noqa: F403 +from .network import * # noqa: F403 from .proxy import * # noqa: F403 from .telemetry import * # noqa: F403 from .tls import * # noqa: F403 diff --git a/frigate/config/camera/genai.py b/frigate/config/camera/genai.py index a4d9199af..3dd596c3b 100644 --- a/frigate/config/camera/genai.py +++ b/frigate/config/camera/genai.py @@ -14,6 +14,7 @@ class GenAIProviderEnum(str, Enum): azure_openai = "azure_openai" gemini = "gemini" ollama = "ollama" + llamacpp = "llamacpp" class GenAIConfig(FrigateBaseModel): diff --git a/frigate/config/camera/record.py b/frigate/config/camera/record.py index 09a7a84d5..fe24cf522 100644 --- a/frigate/config/camera/record.py +++ b/frigate/config/camera/record.py @@ -1,5 +1,5 @@ from enum import Enum -from typing import Optional +from typing import Optional, Union from pydantic import Field @@ -19,8 +19,6 @@ __all__ = [ "RetainModeEnum", ] -DEFAULT_TIME_LAPSE_FFMPEG_ARGS = "-vf setpts=0.04*PTS -r 30" - class RecordRetainConfig(FrigateBaseModel): days: float = Field(default=0, ge=0, title="Default retention period.") @@ -67,16 +65,13 @@ class RecordPreviewConfig(FrigateBaseModel): class RecordExportConfig(FrigateBaseModel): - timelapse_args: str = Field( - default=DEFAULT_TIME_LAPSE_FFMPEG_ARGS, title="Timelapse Args" + hwaccel_args: Union[str, list[str]] = Field( + default="auto", title="Export-specific FFmpeg hardware acceleration arguments." ) class RecordConfig(FrigateBaseModel): enabled: bool = Field(default=False, title="Enable record on all cameras.") - sync_recordings: bool = Field( - default=False, title="Sync recordings with disk on startup and once a day." - ) expire_interval: int = Field( default=60, title="Number of minutes to wait between cleanup runs.", diff --git a/frigate/config/config.py b/frigate/config/config.py index a26d4c50e..370c89458 100644 --- a/frigate/config/config.py +++ b/frigate/config/config.py @@ -525,6 +525,14 @@ class FrigateConfig(FrigateBaseModel): if camera_config.ffmpeg.hwaccel_args == "auto": camera_config.ffmpeg.hwaccel_args = self.ffmpeg.hwaccel_args + # Resolve export hwaccel_args: camera export -> camera ffmpeg -> global ffmpeg + # This allows per-camera override for exports (e.g., when camera resolution + # exceeds hardware encoder limits) + if camera_config.record.export.hwaccel_args == "auto": + camera_config.record.export.hwaccel_args = ( + camera_config.ffmpeg.hwaccel_args + ) + for input in camera_config.ffmpeg.inputs: need_detect_dimensions = "detect" in input.roles and ( camera_config.detect.height is None diff --git a/frigate/config/network.py b/frigate/config/network.py index c8b3cfd1c..ab4e5b83e 100644 --- a/frigate/config/network.py +++ b/frigate/config/network.py @@ -1,13 +1,27 @@ +from typing import Union + from pydantic import Field from .base import FrigateBaseModel -__all__ = ["IPv6Config", "NetworkingConfig"] +__all__ = ["IPv6Config", "ListenConfig", "NetworkingConfig"] class IPv6Config(FrigateBaseModel): enabled: bool = Field(default=False, title="Enable IPv6 for port 5000 and/or 8971") +class ListenConfig(FrigateBaseModel): + internal: Union[int, str] = Field( + default=5000, title="Internal listening port for Frigate" + ) + external: Union[int, str] = Field( + default=8971, title="External listening port for Frigate" + ) + + class NetworkingConfig(FrigateBaseModel): - ipv6: IPv6Config = Field(default_factory=IPv6Config, title="Network configuration") + ipv6: IPv6Config = Field(default_factory=IPv6Config, title="IPv6 configuration") + listen: ListenConfig = Field( + default_factory=ListenConfig, title="Listening ports configuration" + ) diff --git a/frigate/const.py b/frigate/const.py index 41c24f087..87fdb8e70 100644 --- a/frigate/const.py +++ b/frigate/const.py @@ -14,7 +14,6 @@ RECORD_DIR = f"{BASE_DIR}/recordings" TRIGGER_DIR = f"{CLIPS_DIR}/triggers" BIRDSEYE_PIPE = "/tmp/cache/birdseye" CACHE_DIR = "/tmp/cache" -FRIGATE_LOCALHOST = "http://127.0.0.1:5000" PLUS_ENV_VAR = "PLUS_API_KEY" PLUS_API_HOST = "https://api.frigate.video" @@ -122,6 +121,7 @@ UPDATE_REVIEW_DESCRIPTION = "update_review_description" UPDATE_MODEL_STATE = "update_model_state" UPDATE_EMBEDDINGS_REINDEX_PROGRESS = "handle_embeddings_reindex_progress" UPDATE_BIRDSEYE_LAYOUT = "update_birdseye_layout" +UPDATE_JOB_STATE = "update_job_state" NOTIFICATION_TEST = "notification_test" # IO Nice Values diff --git a/frigate/detectors/detection_runners.py b/frigate/detectors/detection_runners.py index fcbb41e66..da7df9d36 100644 --- a/frigate/detectors/detection_runners.py +++ b/frigate/detectors/detection_runners.py @@ -131,10 +131,8 @@ class ONNXModelRunner(BaseModelRunner): return model_type in [ EnrichmentModelTypeEnum.paddleocr.value, - EnrichmentModelTypeEnum.yolov9_license_plate.value, - EnrichmentModelTypeEnum.jina_v1.value, EnrichmentModelTypeEnum.jina_v2.value, - EnrichmentModelTypeEnum.facenet.value, + EnrichmentModelTypeEnum.arcface.value, ModelTypeEnum.rfdetr.value, ModelTypeEnum.dfine.value, ] diff --git a/frigate/genai/__init__.py b/frigate/genai/__init__.py index be1f6d1e7..0ae664b9f 100644 --- a/frigate/genai/__init__.py +++ b/frigate/genai/__init__.py @@ -69,7 +69,7 @@ class GenAIClient: return "\n- (No objects detected)" context_prompt = f""" -Your task is to analyze the sequence of images ({len(thumbnails)} total) taken in chronological order from the perspective of the {review_data["camera"]} security camera. +Your task is to analyze a sequence of images taken in chronological order from a security camera. ## Normal Activity Patterns for This Property @@ -108,7 +108,8 @@ Your response MUST be a flat JSON object with: ## Sequence Details -- Frame 1 = earliest, Frame {len(thumbnails)} = latest +- Camera: {review_data["camera"]} +- Total frames: {len(thumbnails)} (Frame 1 = earliest, Frame {len(thumbnails)} = latest) - Activity started at {review_data["start"]} and lasted {review_data["duration"]} seconds - Zones involved: {", ".join(review_data["zones"]) if review_data["zones"] else "None"} @@ -292,6 +293,64 @@ Guidelines: """Get the context window size for this provider in tokens.""" return 4096 + def chat_with_tools( + self, + messages: list[dict[str, Any]], + tools: Optional[list[dict[str, Any]]] = None, + tool_choice: Optional[str] = "auto", + ) -> dict[str, Any]: + """ + Send chat messages to LLM with optional tool definitions. + + This method handles conversation-style interactions with the LLM, + including function calling/tool usage capabilities. + + Args: + messages: List of message dictionaries. Each message should have: + - 'role': str - One of 'user', 'assistant', 'system', or 'tool' + - 'content': str - The message content + - 'tool_call_id': Optional[str] - For tool responses, the ID of the tool call + - 'name': Optional[str] - For tool messages, the tool name + tools: Optional list of tool definitions in OpenAI-compatible format. + Each tool should have 'type': 'function' and 'function' with: + - 'name': str - Tool name + - 'description': str - Tool description + - 'parameters': dict - JSON schema for parameters + tool_choice: How the model should handle tools: + - 'auto': Model decides whether to call tools + - 'none': Model must not call tools + - 'required': Model must call at least one tool + - Or a dict specifying a specific tool to call + **kwargs: Additional provider-specific parameters. + + Returns: + Dictionary with: + - 'content': Optional[str] - The text response from the LLM, None if tool calls + - 'tool_calls': Optional[List[Dict]] - List of tool calls if LLM wants to call tools. + Each tool call dict has: + - 'id': str - Unique identifier for this tool call + - 'name': str - Tool name to call + - 'arguments': dict - Arguments for the tool call (parsed JSON) + - 'finish_reason': str - Reason generation stopped: + - 'stop': Normal completion + - 'tool_calls': LLM wants to call tools + - 'length': Hit token limit + - 'error': An error occurred + + Raises: + NotImplementedError: If the provider doesn't implement this method. + """ + # Base implementation - each provider should override this + logger.warning( + f"{self.__class__.__name__} does not support chat_with_tools. " + "This method should be overridden by the provider implementation." + ) + return { + "content": None, + "tool_calls": None, + "finish_reason": "error", + } + def get_genai_client(config: FrigateConfig) -> Optional[GenAIClient]: """Get the GenAI client.""" diff --git a/frigate/genai/azure-openai.py b/frigate/genai/azure-openai.py index eb08f7786..21ed5d856 100644 --- a/frigate/genai/azure-openai.py +++ b/frigate/genai/azure-openai.py @@ -1,8 +1,9 @@ """Azure OpenAI Provider for Frigate AI.""" import base64 +import json import logging -from typing import Optional +from typing import Any, Optional from urllib.parse import parse_qs, urlparse from openai import AzureOpenAI @@ -76,3 +77,93 @@ class OpenAIClient(GenAIClient): def get_context_size(self) -> int: """Get the context window size for Azure OpenAI.""" return 128000 + + def chat_with_tools( + self, + messages: list[dict[str, Any]], + tools: Optional[list[dict[str, Any]]] = None, + tool_choice: Optional[str] = "auto", + ) -> dict[str, Any]: + try: + openai_tool_choice = None + if tool_choice: + if tool_choice == "none": + openai_tool_choice = "none" + elif tool_choice == "auto": + openai_tool_choice = "auto" + elif tool_choice == "required": + openai_tool_choice = "required" + + request_params = { + "model": self.genai_config.model, + "messages": messages, + "timeout": self.timeout, + } + + if tools: + request_params["tools"] = tools + if openai_tool_choice is not None: + request_params["tool_choice"] = openai_tool_choice + + result = self.provider.chat.completions.create(**request_params) + + if ( + result is None + or not hasattr(result, "choices") + or len(result.choices) == 0 + ): + return { + "content": None, + "tool_calls": None, + "finish_reason": "error", + } + + choice = result.choices[0] + message = choice.message + + content = message.content.strip() if message.content else None + + tool_calls = None + if message.tool_calls: + tool_calls = [] + for tool_call in message.tool_calls: + try: + arguments = json.loads(tool_call.function.arguments) + except (json.JSONDecodeError, AttributeError) as e: + logger.warning( + f"Failed to parse tool call arguments: {e}, " + f"tool: {tool_call.function.name if hasattr(tool_call.function, 'name') else 'unknown'}" + ) + arguments = {} + + tool_calls.append( + { + "id": tool_call.id if hasattr(tool_call, "id") else "", + "name": tool_call.function.name + if hasattr(tool_call.function, "name") + else "", + "arguments": arguments, + } + ) + + finish_reason = "error" + if hasattr(choice, "finish_reason") and choice.finish_reason: + finish_reason = choice.finish_reason + elif tool_calls: + finish_reason = "tool_calls" + elif content: + finish_reason = "stop" + + return { + "content": content, + "tool_calls": tool_calls, + "finish_reason": finish_reason, + } + + except Exception as e: + logger.warning("Azure OpenAI returned an error: %s", str(e)) + return { + "content": None, + "tool_calls": None, + "finish_reason": "error", + } diff --git a/frigate/genai/gemini.py b/frigate/genai/gemini.py index b700c33a4..fd273faec 100644 --- a/frigate/genai/gemini.py +++ b/frigate/genai/gemini.py @@ -1,7 +1,7 @@ """Gemini Provider for Frigate AI.""" import logging -from typing import Optional +from typing import Any, Optional from google import genai from google.genai import errors, types @@ -76,3 +76,200 @@ class GeminiClient(GenAIClient): """Get the context window size for Gemini.""" # Gemini Pro Vision has a 1M token context window return 1000000 + + def chat_with_tools( + self, + messages: list[dict[str, Any]], + tools: Optional[list[dict[str, Any]]] = None, + tool_choice: Optional[str] = "auto", + ) -> dict[str, Any]: + """ + Send chat messages to Gemini with optional tool definitions. + + Implements function calling/tool usage for Gemini models. + """ + try: + # Convert messages to Gemini format + gemini_messages = [] + for msg in messages: + role = msg.get("role", "user") + content = msg.get("content", "") + + # Map roles to Gemini format + if role == "system": + # Gemini doesn't have system role, prepend to first user message + if gemini_messages and gemini_messages[0].role == "user": + gemini_messages[0].parts[ + 0 + ].text = f"{content}\n\n{gemini_messages[0].parts[0].text}" + else: + gemini_messages.append( + types.Content( + role="user", parts=[types.Part.from_text(text=content)] + ) + ) + elif role == "assistant": + gemini_messages.append( + types.Content( + role="model", parts=[types.Part.from_text(text=content)] + ) + ) + elif role == "tool": + # Handle tool response + function_response = { + "name": msg.get("name", ""), + "response": content, + } + gemini_messages.append( + types.Content( + role="function", + parts=[ + types.Part.from_function_response(function_response) + ], + ) + ) + else: # user + gemini_messages.append( + types.Content( + role="user", parts=[types.Part.from_text(text=content)] + ) + ) + + # Convert tools to Gemini format + gemini_tools = None + if tools: + gemini_tools = [] + for tool in tools: + if tool.get("type") == "function": + func = tool.get("function", {}) + gemini_tools.append( + types.Tool( + function_declarations=[ + types.FunctionDeclaration( + name=func.get("name", ""), + description=func.get("description", ""), + parameters=func.get("parameters", {}), + ) + ] + ) + ) + + # Configure tool choice + tool_config = None + if tool_choice: + if tool_choice == "none": + tool_config = types.ToolConfig( + function_calling_config=types.FunctionCallingConfig(mode="NONE") + ) + elif tool_choice == "auto": + tool_config = types.ToolConfig( + function_calling_config=types.FunctionCallingConfig(mode="AUTO") + ) + elif tool_choice == "required": + tool_config = types.ToolConfig( + function_calling_config=types.FunctionCallingConfig(mode="ANY") + ) + + # Build request config + config_params = {"candidate_count": 1} + + if gemini_tools: + config_params["tools"] = gemini_tools + + if tool_config: + config_params["tool_config"] = tool_config + + # Merge runtime_options + if isinstance(self.genai_config.runtime_options, dict): + config_params.update(self.genai_config.runtime_options) + + response = self.provider.models.generate_content( + model=self.genai_config.model, + contents=gemini_messages, + config=types.GenerateContentConfig(**config_params), + ) + + # Check if response is valid + if not response or not response.candidates: + return { + "content": None, + "tool_calls": None, + "finish_reason": "error", + } + + candidate = response.candidates[0] + content = None + tool_calls = None + + # Extract content and tool calls from response + if candidate.content and candidate.content.parts: + for part in candidate.content.parts: + if part.text: + content = part.text.strip() + elif part.function_call: + # Handle function call + if tool_calls is None: + tool_calls = [] + + try: + arguments = ( + dict(part.function_call.args) + if part.function_call.args + else {} + ) + except Exception: + arguments = {} + + tool_calls.append( + { + "id": part.function_call.name or "", + "name": part.function_call.name or "", + "arguments": arguments, + } + ) + + # Determine finish reason + finish_reason = "error" + if hasattr(candidate, "finish_reason") and candidate.finish_reason: + from google.genai.types import FinishReason + + if candidate.finish_reason == FinishReason.STOP: + finish_reason = "stop" + elif candidate.finish_reason == FinishReason.MAX_TOKENS: + finish_reason = "length" + elif candidate.finish_reason in [ + FinishReason.SAFETY, + FinishReason.RECITATION, + ]: + finish_reason = "error" + elif tool_calls: + finish_reason = "tool_calls" + elif content: + finish_reason = "stop" + elif tool_calls: + finish_reason = "tool_calls" + elif content: + finish_reason = "stop" + + return { + "content": content, + "tool_calls": tool_calls, + "finish_reason": finish_reason, + } + + except errors.APIError as e: + logger.warning("Gemini API error during chat_with_tools: %s", str(e)) + return { + "content": None, + "tool_calls": None, + "finish_reason": "error", + } + except Exception as e: + logger.warning( + "Gemini returned an error during chat_with_tools: %s", str(e) + ) + return { + "content": None, + "tool_calls": None, + "finish_reason": "error", + } diff --git a/frigate/genai/llama_cpp.py b/frigate/genai/llama_cpp.py new file mode 100644 index 000000000..fafef74ae --- /dev/null +++ b/frigate/genai/llama_cpp.py @@ -0,0 +1,238 @@ +"""llama.cpp Provider for Frigate AI.""" + +import base64 +import json +import logging +from typing import Any, Optional + +import requests + +from frigate.config import GenAIProviderEnum +from frigate.genai import GenAIClient, register_genai_provider + +logger = logging.getLogger(__name__) + + +@register_genai_provider(GenAIProviderEnum.llamacpp) +class LlamaCppClient(GenAIClient): + """Generative AI client for Frigate using llama.cpp server.""" + + LOCAL_OPTIMIZED_OPTIONS = { + "temperature": 0.7, + "repeat_penalty": 1.05, + "top_p": 0.8, + } + + provider: str # base_url + provider_options: dict[str, Any] + + def _init_provider(self): + """Initialize the client.""" + self.provider_options = { + **self.LOCAL_OPTIMIZED_OPTIONS, + **self.genai_config.provider_options, + } + return ( + self.genai_config.base_url.rstrip("/") + if self.genai_config.base_url + else None + ) + + def _send(self, prompt: str, images: list[bytes]) -> Optional[str]: + """Submit a request to llama.cpp server.""" + if self.provider is None: + logger.warning( + "llama.cpp provider has not been initialized, a description will not be generated. Check your llama.cpp configuration." + ) + return None + + try: + content = [] + for image in images: + encoded_image = base64.b64encode(image).decode("utf-8") + content.append( + { + "type": "image_url", + "image_url": { + "url": f"data:image/jpeg;base64,{encoded_image}", + }, + } + ) + content.append( + { + "type": "text", + "text": prompt, + } + ) + + # Build request payload with llama.cpp native options + payload = { + "messages": [ + { + "role": "user", + "content": content, + }, + ], + **self.provider_options, + } + + response = requests.post( + f"{self.provider}/v1/chat/completions", + json=payload, + timeout=self.timeout, + ) + response.raise_for_status() + result = response.json() + + if ( + result is not None + and "choices" in result + and len(result["choices"]) > 0 + ): + choice = result["choices"][0] + if "message" in choice and "content" in choice["message"]: + return choice["message"]["content"].strip() + return None + except Exception as e: + logger.warning("llama.cpp returned an error: %s", str(e)) + return None + + def get_context_size(self) -> int: + """Get the context window size for llama.cpp.""" + return self.genai_config.provider_options.get("context_size", 4096) + + def chat_with_tools( + self, + messages: list[dict[str, Any]], + tools: Optional[list[dict[str, Any]]] = None, + tool_choice: Optional[str] = "auto", + ) -> dict[str, Any]: + """ + Send chat messages to llama.cpp server with optional tool definitions. + + Uses the OpenAI-compatible endpoint but passes through all native llama.cpp + parameters (like slot_id, temperature, etc.) via provider_options. + """ + if self.provider is None: + logger.warning( + "llama.cpp provider has not been initialized. Check your llama.cpp configuration." + ) + return { + "content": None, + "tool_calls": None, + "finish_reason": "error", + } + + try: + openai_tool_choice = None + if tool_choice: + if tool_choice == "none": + openai_tool_choice = "none" + elif tool_choice == "auto": + openai_tool_choice = "auto" + elif tool_choice == "required": + openai_tool_choice = "required" + + payload = { + "messages": messages, + } + + if tools: + payload["tools"] = tools + if openai_tool_choice is not None: + payload["tool_choice"] = openai_tool_choice + + provider_opts = { + k: v for k, v in self.provider_options.items() if k != "context_size" + } + payload.update(provider_opts) + + response = requests.post( + f"{self.provider}/v1/chat/completions", + json=payload, + timeout=self.timeout, + ) + response.raise_for_status() + result = response.json() + + if result is None or "choices" not in result or len(result["choices"]) == 0: + return { + "content": None, + "tool_calls": None, + "finish_reason": "error", + } + + choice = result["choices"][0] + message = choice.get("message", {}) + + content = message.get("content") + if content: + content = content.strip() + else: + content = None + + tool_calls = None + if "tool_calls" in message and message["tool_calls"]: + tool_calls = [] + for tool_call in message["tool_calls"]: + try: + function_data = tool_call.get("function", {}) + arguments_str = function_data.get("arguments", "{}") + arguments = json.loads(arguments_str) + except (json.JSONDecodeError, KeyError, TypeError) as e: + logger.warning( + f"Failed to parse tool call arguments: {e}, " + f"tool: {function_data.get('name', 'unknown')}" + ) + arguments = {} + + tool_calls.append( + { + "id": tool_call.get("id", ""), + "name": function_data.get("name", ""), + "arguments": arguments, + } + ) + + finish_reason = "error" + if "finish_reason" in choice and choice["finish_reason"]: + finish_reason = choice["finish_reason"] + elif tool_calls: + finish_reason = "tool_calls" + elif content: + finish_reason = "stop" + + return { + "content": content, + "tool_calls": tool_calls, + "finish_reason": finish_reason, + } + + except requests.exceptions.Timeout as e: + logger.warning("llama.cpp request timed out: %s", str(e)) + return { + "content": None, + "tool_calls": None, + "finish_reason": "error", + } + except requests.exceptions.RequestException as e: + error_detail = str(e) + if hasattr(e, "response") and e.response is not None: + try: + error_body = e.response.text + error_detail = f"{str(e)} - Response: {error_body[:500]}" + except Exception: + pass + logger.warning("llama.cpp returned an error: %s", error_detail) + return { + "content": None, + "tool_calls": None, + "finish_reason": "error", + } + except Exception as e: + logger.warning("Unexpected error in llama.cpp chat_with_tools: %s", str(e)) + return { + "content": None, + "tool_calls": None, + "finish_reason": "error", + } diff --git a/frigate/genai/ollama.py b/frigate/genai/ollama.py index ab6d3c0b3..6e9a4f5d5 100644 --- a/frigate/genai/ollama.py +++ b/frigate/genai/ollama.py @@ -1,5 +1,6 @@ """Ollama Provider for Frigate AI.""" +import json import logging from typing import Any, Optional @@ -86,3 +87,120 @@ class OllamaClient(GenAIClient): return self.genai_config.provider_options.get("options", {}).get( "num_ctx", 4096 ) + + def chat_with_tools( + self, + messages: list[dict[str, Any]], + tools: Optional[list[dict[str, Any]]] = None, + tool_choice: Optional[str] = "auto", + ) -> dict[str, Any]: + if self.provider is None: + logger.warning( + "Ollama provider has not been initialized. Check your Ollama configuration." + ) + return { + "content": None, + "tool_calls": None, + "finish_reason": "error", + } + + try: + request_messages = [] + for msg in messages: + msg_dict = { + "role": msg.get("role"), + "content": msg.get("content", ""), + } + if msg.get("tool_call_id"): + msg_dict["tool_call_id"] = msg["tool_call_id"] + if msg.get("name"): + msg_dict["name"] = msg["name"] + if msg.get("tool_calls"): + msg_dict["tool_calls"] = msg["tool_calls"] + request_messages.append(msg_dict) + + request_params = { + "model": self.genai_config.model, + "messages": request_messages, + } + + if tools: + request_params["tools"] = tools + if tool_choice: + if tool_choice == "none": + request_params["tool_choice"] = "none" + elif tool_choice == "required": + request_params["tool_choice"] = "required" + elif tool_choice == "auto": + request_params["tool_choice"] = "auto" + + request_params.update(self.provider_options) + + response = self.provider.chat(**request_params) + + if not response or "message" not in response: + return { + "content": None, + "tool_calls": None, + "finish_reason": "error", + } + + message = response["message"] + content = ( + message.get("content", "").strip() if message.get("content") else None + ) + + tool_calls = None + if "tool_calls" in message and message["tool_calls"]: + tool_calls = [] + for tool_call in message["tool_calls"]: + try: + function_data = tool_call.get("function", {}) + arguments_str = function_data.get("arguments", "{}") + arguments = json.loads(arguments_str) + except (json.JSONDecodeError, KeyError, TypeError) as e: + logger.warning( + f"Failed to parse tool call arguments: {e}, " + f"tool: {function_data.get('name', 'unknown')}" + ) + arguments = {} + + tool_calls.append( + { + "id": tool_call.get("id", ""), + "name": function_data.get("name", ""), + "arguments": arguments, + } + ) + + finish_reason = "error" + if "done" in response and response["done"]: + if tool_calls: + finish_reason = "tool_calls" + elif content: + finish_reason = "stop" + elif tool_calls: + finish_reason = "tool_calls" + elif content: + finish_reason = "stop" + + return { + "content": content, + "tool_calls": tool_calls, + "finish_reason": finish_reason, + } + + except (TimeoutException, ResponseError, ConnectionError) as e: + logger.warning("Ollama returned an error: %s", str(e)) + return { + "content": None, + "tool_calls": None, + "finish_reason": "error", + } + except Exception as e: + logger.warning("Unexpected error in Ollama chat_with_tools: %s", str(e)) + return { + "content": None, + "tool_calls": None, + "finish_reason": "error", + } diff --git a/frigate/genai/openai.py b/frigate/genai/openai.py index 1fb0dd852..c8d9ca7ab 100644 --- a/frigate/genai/openai.py +++ b/frigate/genai/openai.py @@ -1,8 +1,9 @@ """OpenAI Provider for Frigate AI.""" import base64 +import json import logging -from typing import Optional +from typing import Any, Optional from httpx import TimeoutException from openai import OpenAI @@ -116,3 +117,113 @@ class OpenAIClient(GenAIClient): f"Using default context size {self.context_size} for model {self.genai_config.model}" ) return self.context_size + + def chat_with_tools( + self, + messages: list[dict[str, Any]], + tools: Optional[list[dict[str, Any]]] = None, + tool_choice: Optional[str] = "auto", + ) -> dict[str, Any]: + """ + Send chat messages to OpenAI with optional tool definitions. + + Implements function calling/tool usage for OpenAI models. + """ + try: + openai_tool_choice = None + if tool_choice: + if tool_choice == "none": + openai_tool_choice = "none" + elif tool_choice == "auto": + openai_tool_choice = "auto" + elif tool_choice == "required": + openai_tool_choice = "required" + + request_params = { + "model": self.genai_config.model, + "messages": messages, + "timeout": self.timeout, + } + + if tools: + request_params["tools"] = tools + if openai_tool_choice is not None: + request_params["tool_choice"] = openai_tool_choice + + if isinstance(self.genai_config.provider_options, dict): + excluded_options = {"context_size"} + provider_opts = { + k: v + for k, v in self.genai_config.provider_options.items() + if k not in excluded_options + } + request_params.update(provider_opts) + + result = self.provider.chat.completions.create(**request_params) + + if ( + result is None + or not hasattr(result, "choices") + or len(result.choices) == 0 + ): + return { + "content": None, + "tool_calls": None, + "finish_reason": "error", + } + + choice = result.choices[0] + message = choice.message + content = message.content.strip() if message.content else None + + tool_calls = None + if message.tool_calls: + tool_calls = [] + for tool_call in message.tool_calls: + try: + arguments = json.loads(tool_call.function.arguments) + except (json.JSONDecodeError, AttributeError) as e: + logger.warning( + f"Failed to parse tool call arguments: {e}, " + f"tool: {tool_call.function.name if hasattr(tool_call.function, 'name') else 'unknown'}" + ) + arguments = {} + + tool_calls.append( + { + "id": tool_call.id if hasattr(tool_call, "id") else "", + "name": tool_call.function.name + if hasattr(tool_call.function, "name") + else "", + "arguments": arguments, + } + ) + + finish_reason = "error" + if hasattr(choice, "finish_reason") and choice.finish_reason: + finish_reason = choice.finish_reason + elif tool_calls: + finish_reason = "tool_calls" + elif content: + finish_reason = "stop" + + return { + "content": content, + "tool_calls": tool_calls, + "finish_reason": finish_reason, + } + + except TimeoutException as e: + logger.warning("OpenAI request timed out: %s", str(e)) + return { + "content": None, + "tool_calls": None, + "finish_reason": "error", + } + except Exception as e: + logger.warning("OpenAI returned an error: %s", str(e)) + return { + "content": None, + "tool_calls": None, + "finish_reason": "error", + } diff --git a/frigate/jobs/__init__.py b/frigate/jobs/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/frigate/jobs/job.py b/frigate/jobs/job.py new file mode 100644 index 000000000..a445eebf5 --- /dev/null +++ b/frigate/jobs/job.py @@ -0,0 +1,21 @@ +"""Generic base class for long-running background jobs.""" + +from dataclasses import asdict, dataclass, field +from typing import Any, Optional + + +@dataclass +class Job: + """Base class for long-running background jobs.""" + + id: str = field(default_factory=lambda: __import__("uuid").uuid4().__str__()[:12]) + job_type: str = "" # Must be set by subclasses + status: str = "queued" # queued, running, success, failed, cancelled + results: Optional[dict[str, Any]] = None + start_time: Optional[float] = None + end_time: Optional[float] = None + error_message: Optional[str] = None + + def to_dict(self) -> dict[str, Any]: + """Convert to dictionary for WebSocket transmission.""" + return asdict(self) diff --git a/frigate/jobs/manager.py b/frigate/jobs/manager.py new file mode 100644 index 000000000..8aa77b3c7 --- /dev/null +++ b/frigate/jobs/manager.py @@ -0,0 +1,70 @@ +"""Generic job management for long-running background tasks.""" + +import threading +from typing import Optional + +from frigate.jobs.job import Job +from frigate.types import JobStatusTypesEnum + +# Global state and locks for enforcing single concurrent job per job type +_job_locks: dict[str, threading.Lock] = {} +_current_jobs: dict[str, Optional[Job]] = {} +# Keep completed jobs for retrieval, keyed by (job_type, job_id) +_completed_jobs: dict[tuple[str, str], Job] = {} + + +def _get_lock(job_type: str) -> threading.Lock: + """Get or create a lock for the specified job type.""" + if job_type not in _job_locks: + _job_locks[job_type] = threading.Lock() + return _job_locks[job_type] + + +def set_current_job(job: Job) -> None: + """Set the current job for a given job type.""" + lock = _get_lock(job.job_type) + with lock: + # Store the previous job if it was completed + old_job = _current_jobs.get(job.job_type) + if old_job and old_job.status in ( + JobStatusTypesEnum.success, + JobStatusTypesEnum.failed, + JobStatusTypesEnum.cancelled, + ): + _completed_jobs[(job.job_type, old_job.id)] = old_job + _current_jobs[job.job_type] = job + + +def clear_current_job(job_type: str, job_id: Optional[str] = None) -> None: + """Clear the current job for a given job type, optionally checking the ID.""" + lock = _get_lock(job_type) + with lock: + if job_type in _current_jobs: + current = _current_jobs[job_type] + if current is None or (job_id is None or current.id == job_id): + _current_jobs[job_type] = None + + +def get_current_job(job_type: str) -> Optional[Job]: + """Get the current running/queued job for a given job type, if any.""" + lock = _get_lock(job_type) + with lock: + return _current_jobs.get(job_type) + + +def get_job_by_id(job_type: str, job_id: str) -> Optional[Job]: + """Get job by ID. Checks current job first, then completed jobs.""" + lock = _get_lock(job_type) + with lock: + # Check if it's the current job + current = _current_jobs.get(job_type) + if current and current.id == job_id: + return current + # Check if it's a completed job + return _completed_jobs.get((job_type, job_id)) + + +def job_is_running(job_type: str) -> bool: + """Check if a job of the given type is currently running or queued.""" + job = get_current_job(job_type) + return job is not None and job.status in ("queued", "running") diff --git a/frigate/jobs/media_sync.py b/frigate/jobs/media_sync.py new file mode 100644 index 000000000..7c15435fd --- /dev/null +++ b/frigate/jobs/media_sync.py @@ -0,0 +1,135 @@ +"""Media sync job management with background execution.""" + +import logging +import threading +from dataclasses import dataclass, field +from datetime import datetime +from typing import Optional + +from frigate.comms.inter_process import InterProcessRequestor +from frigate.const import UPDATE_JOB_STATE +from frigate.jobs.job import Job +from frigate.jobs.manager import ( + get_current_job, + get_job_by_id, + job_is_running, + set_current_job, +) +from frigate.types import JobStatusTypesEnum +from frigate.util.media import sync_all_media + +logger = logging.getLogger(__name__) + + +@dataclass +class MediaSyncJob(Job): + """In-memory job state for media sync operations.""" + + job_type: str = "media_sync" + dry_run: bool = False + media_types: list[str] = field(default_factory=lambda: ["all"]) + force: bool = False + + +class MediaSyncRunner(threading.Thread): + """Thread-based runner for media sync jobs.""" + + def __init__(self, job: MediaSyncJob) -> None: + super().__init__(daemon=True, name="media_sync") + self.job = job + self.requestor = InterProcessRequestor() + + def run(self) -> None: + """Execute the media sync job and broadcast status updates.""" + try: + # Update job status to running + self.job.status = JobStatusTypesEnum.running + self.job.start_time = datetime.now().timestamp() + self._broadcast_status() + + # Execute sync with provided parameters + logger.debug( + f"Starting media sync job {self.job.id}: " + f"media_types={self.job.media_types}, " + f"dry_run={self.job.dry_run}, " + f"force={self.job.force}" + ) + + results = sync_all_media( + dry_run=self.job.dry_run, + media_types=self.job.media_types, + force=self.job.force, + ) + + # Store results and mark as complete + self.job.results = results.to_dict() + self.job.status = JobStatusTypesEnum.success + self.job.end_time = datetime.now().timestamp() + + logger.debug(f"Media sync job {self.job.id} completed successfully") + self._broadcast_status() + + except Exception as e: + logger.error(f"Media sync job {self.job.id} failed: {e}", exc_info=True) + self.job.status = JobStatusTypesEnum.failed + self.job.error_message = str(e) + self.job.end_time = datetime.now().timestamp() + self._broadcast_status() + + finally: + if self.requestor: + self.requestor.stop() + + def _broadcast_status(self) -> None: + """Broadcast job status update via IPC to all WebSocket subscribers.""" + try: + self.requestor.send_data( + UPDATE_JOB_STATE, + self.job.to_dict(), + ) + except Exception as e: + logger.warning(f"Failed to broadcast media sync status: {e}") + + +def start_media_sync_job( + dry_run: bool = False, + media_types: Optional[list[str]] = None, + force: bool = False, +) -> Optional[str]: + """Start a new media sync job if none is currently running. + + Returns job ID on success, None if job already running. + """ + # Check if a job is already running + if job_is_running("media_sync"): + current = get_current_job("media_sync") + logger.warning( + f"Media sync job {current.id} is already running. Rejecting new request." + ) + return None + + # Create and start new job + job = MediaSyncJob( + dry_run=dry_run, + media_types=media_types or ["all"], + force=force, + ) + + logger.debug(f"Creating new media sync job: {job.id}") + set_current_job(job) + + # Start the background runner + runner = MediaSyncRunner(job) + runner.start() + + return job.id + + +def get_current_media_sync_job() -> Optional[MediaSyncJob]: + """Get the current running/queued media sync job, if any.""" + return get_current_job("media_sync") + + +def get_media_sync_job_by_id(job_id: str) -> Optional[MediaSyncJob]: + """Get media sync job by ID. Currently only tracks the current job.""" + return get_job_by_id("media_sync", job_id) diff --git a/frigate/models.py b/frigate/models.py index 93f6cb54f..fd5061613 100644 --- a/frigate/models.py +++ b/frigate/models.py @@ -80,6 +80,14 @@ class Recordings(Model): regions = IntegerField(null=True) +class ExportCase(Model): + id = CharField(null=False, primary_key=True, max_length=30) + name = CharField(index=True, max_length=100) + description = TextField(null=True) + created_at = DateTimeField() + updated_at = DateTimeField() + + class Export(Model): id = CharField(null=False, primary_key=True, max_length=30) camera = CharField(index=True, max_length=20) @@ -88,6 +96,12 @@ class Export(Model): video_path = CharField(unique=True) thumb_path = CharField(unique=True) in_progress = BooleanField() + export_case = ForeignKeyField( + ExportCase, + null=True, + backref="exports", + column_name="export_case_id", + ) class ReviewSegment(Model): diff --git a/frigate/output/preview.py b/frigate/output/preview.py index 6dfd90904..b66c1298a 100644 --- a/frigate/output/preview.py +++ b/frigate/output/preview.py @@ -47,6 +47,15 @@ PREVIEW_QUALITY_BIT_RATES = { RecordQualityEnum.high: 9864, RecordQualityEnum.very_high: 10096, } +# the -qmax param for ffmpeg prevents the encoder from overly compressing frames while still trying to hit the bitrate target +# lower values are higher quality. This is especially important for iniitial frames in the segment +PREVIEW_QMAX_PARAM = { + RecordQualityEnum.very_low: "", + RecordQualityEnum.low: "", + RecordQualityEnum.medium: "", + RecordQualityEnum.high: " -qmax 25", + RecordQualityEnum.very_high: " -qmax 25", +} def get_cache_image_name(camera: str, frame_time: float) -> str: @@ -57,6 +66,51 @@ def get_cache_image_name(camera: str, frame_time: float) -> str: ) +def get_most_recent_preview_frame(camera: str, before: float = None) -> str | None: + """Get the most recent preview frame for a camera.""" + if not os.path.exists(PREVIEW_CACHE_DIR): + return None + + try: + # files are named preview_{camera}-{timestamp}.webp + # we want the largest timestamp that is less than or equal to before + preview_files = [ + f + for f in os.listdir(PREVIEW_CACHE_DIR) + if f.startswith(f"preview_{camera}-") + and f.endswith(f".{PREVIEW_FRAME_TYPE}") + ] + + if not preview_files: + return None + + # sort by timestamp in descending order + # filenames are like preview_front-1712345678.901234.webp + preview_files.sort(reverse=True) + + if before is None: + return os.path.join(PREVIEW_CACHE_DIR, preview_files[0]) + + for file_name in preview_files: + try: + # Extract timestamp: preview_front-1712345678.901234.webp + # Split by dash and extension + timestamp_part = file_name.split("-")[-1].split( + f".{PREVIEW_FRAME_TYPE}" + )[0] + timestamp = float(timestamp_part) + + if timestamp <= before: + return os.path.join(PREVIEW_CACHE_DIR, file_name) + except (ValueError, IndexError): + continue + + return None + except Exception as e: + logger.error(f"Error searching for most recent preview frame: {e}") + return None + + class FFMpegConverter(threading.Thread): """Convert a list of still frames into a vfr mp4.""" @@ -80,7 +134,7 @@ class FFMpegConverter(threading.Thread): config.ffmpeg.ffmpeg_path, "default", input="-f concat -y -protocol_whitelist pipe,file -safe 0 -threads 1 -i /dev/stdin", - output=f"-threads 1 -g {PREVIEW_KEYFRAME_INTERVAL} -bf 0 -b:v {PREVIEW_QUALITY_BIT_RATES[self.config.record.preview.quality]} {FPS_VFR_PARAM} -movflags +faststart -pix_fmt yuv420p {self.path}", + output=f"-threads 1 -g {PREVIEW_KEYFRAME_INTERVAL} -bf 0 -b:v {PREVIEW_QUALITY_BIT_RATES[self.config.record.preview.quality]}{PREVIEW_QMAX_PARAM[self.config.record.preview.quality]} {FPS_VFR_PARAM} -movflags +faststart -pix_fmt yuv420p {self.path}", type=EncodeTypeEnum.preview, ) diff --git a/frigate/record/cleanup.py b/frigate/record/cleanup.py index 94dd43eba..15a0ba7e8 100644 --- a/frigate/record/cleanup.py +++ b/frigate/record/cleanup.py @@ -13,9 +13,8 @@ from playhouse.sqlite_ext import SqliteExtDatabase from frigate.config import CameraConfig, FrigateConfig, RetainModeEnum from frigate.const import CACHE_DIR, CLIPS_DIR, MAX_WAL_SIZE, RECORD_DIR from frigate.models import Previews, Recordings, ReviewSegment, UserReviewStatus -from frigate.record.util import remove_empty_directories, sync_recordings from frigate.util.builtin import clear_and_unlink -from frigate.util.time import get_tomorrow_at_time +from frigate.util.media import remove_empty_directories logger = logging.getLogger(__name__) @@ -61,7 +60,7 @@ class RecordingCleanup(threading.Thread): db.execute_sql("PRAGMA wal_checkpoint(TRUNCATE);") db.close() - def expire_review_segments(self, config: CameraConfig, now: datetime) -> None: + def expire_review_segments(self, config: CameraConfig, now: datetime) -> set[Path]: """Delete review segments that are expired""" alert_expire_date = ( now - datetime.timedelta(days=config.record.alerts.retain.days) @@ -85,9 +84,12 @@ class RecordingCleanup(threading.Thread): .namedtuples() ) + maybe_empty_dirs = set() thumbs_to_delete = list(map(lambda x: x[1], expired_reviews)) for thumb_path in thumbs_to_delete: - Path(thumb_path).unlink(missing_ok=True) + thumb_path = Path(thumb_path) + thumb_path.unlink(missing_ok=True) + maybe_empty_dirs.add(thumb_path.parent) max_deletes = 100000 deleted_reviews_list = list(map(lambda x: x[0], expired_reviews)) @@ -100,13 +102,15 @@ class RecordingCleanup(threading.Thread): << deleted_reviews_list[i : i + max_deletes] ).execute() + return maybe_empty_dirs + def expire_existing_camera_recordings( self, continuous_expire_date: float, motion_expire_date: float, config: CameraConfig, reviews: ReviewSegment, - ) -> None: + ) -> set[Path]: """Delete recordings for existing camera based on retention config.""" # Get the timestamp for cutoff of retained days @@ -137,6 +141,8 @@ class RecordingCleanup(threading.Thread): .iterator() ) + maybe_empty_dirs = set() + # loop over recordings and see if they overlap with any non-expired reviews # TODO: expire segments based on segment stats according to config review_start = 0 @@ -191,8 +197,10 @@ class RecordingCleanup(threading.Thread): ) or (mode == RetainModeEnum.active_objects and recording.objects == 0) ): - Path(recording.path).unlink(missing_ok=True) + recording_path = Path(recording.path) + recording_path.unlink(missing_ok=True) deleted_recordings.add(recording.id) + maybe_empty_dirs.add(recording_path.parent) else: kept_recordings.append((recording.start_time, recording.end_time)) @@ -253,8 +261,10 @@ class RecordingCleanup(threading.Thread): # Delete previews without any relevant recordings if not keep: - Path(preview.path).unlink(missing_ok=True) + preview_path = Path(preview.path) + preview_path.unlink(missing_ok=True) deleted_previews.add(preview.id) + maybe_empty_dirs.add(preview_path.parent) # expire previews logger.debug(f"Expiring {len(deleted_previews)} previews") @@ -266,7 +276,9 @@ class RecordingCleanup(threading.Thread): Previews.id << deleted_previews_list[i : i + max_deletes] ).execute() - def expire_recordings(self) -> None: + return maybe_empty_dirs + + def expire_recordings(self) -> set[Path]: """Delete recordings based on retention config.""" logger.debug("Start expire recordings.") logger.debug("Start deleted cameras.") @@ -291,10 +303,14 @@ class RecordingCleanup(threading.Thread): .iterator() ) + maybe_empty_dirs = set() + deleted_recordings = set() for recording in no_camera_recordings: - Path(recording.path).unlink(missing_ok=True) + recording_path = Path(recording.path) + recording_path.unlink(missing_ok=True) deleted_recordings.add(recording.id) + maybe_empty_dirs.add(recording_path.parent) logger.debug(f"Expiring {len(deleted_recordings)} recordings") # delete up to 100,000 at a time @@ -311,7 +327,7 @@ class RecordingCleanup(threading.Thread): logger.debug(f"Start camera: {camera}.") now = datetime.datetime.now() - self.expire_review_segments(config, now) + maybe_empty_dirs |= self.expire_review_segments(config, now) continuous_expire_date = ( now - datetime.timedelta(days=config.record.continuous.days) ).timestamp() @@ -341,7 +357,7 @@ class RecordingCleanup(threading.Thread): .namedtuples() ) - self.expire_existing_camera_recordings( + maybe_empty_dirs |= self.expire_existing_camera_recordings( continuous_expire_date, motion_expire_date, config, reviews ) logger.debug(f"End camera: {camera}.") @@ -349,12 +365,9 @@ class RecordingCleanup(threading.Thread): logger.debug("End all cameras.") logger.debug("End expire recordings.") - def run(self) -> None: - # on startup sync recordings with disk if enabled - if self.config.record.sync_recordings: - sync_recordings(limited=False) - next_sync = get_tomorrow_at_time(3) + return maybe_empty_dirs + def run(self) -> None: # Expire tmp clips every minute, recordings and clean directories every hour. for counter in itertools.cycle(range(self.config.record.expire_interval)): if self.stop_event.wait(60): @@ -363,16 +376,8 @@ class RecordingCleanup(threading.Thread): self.clean_tmp_previews() - if ( - self.config.record.sync_recordings - and datetime.datetime.now().astimezone(datetime.timezone.utc) - > next_sync - ): - sync_recordings(limited=True) - next_sync = get_tomorrow_at_time(3) - if counter == 0: self.clean_tmp_clips() - self.expire_recordings() - remove_empty_directories(RECORD_DIR) + maybe_empty_dirs = self.expire_recordings() + remove_empty_directories(Path(RECORD_DIR), maybe_empty_dirs) self.truncate_wal() diff --git a/frigate/record/export.py b/frigate/record/export.py index d4b49bb4b..c1c478ef4 100644 --- a/frigate/record/export.py +++ b/frigate/record/export.py @@ -33,6 +33,7 @@ from frigate.util.time import is_current_hour logger = logging.getLogger(__name__) +DEFAULT_TIME_LAPSE_FFMPEG_ARGS = "-vf setpts=0.04*PTS -r 30" TIMELAPSE_DATA_INPUT_ARGS = "-an -skip_frame nokey" @@ -40,11 +41,6 @@ def lower_priority(): os.nice(PROCESS_PRIORITY_LOW) -class PlaybackFactorEnum(str, Enum): - realtime = "realtime" - timelapse_25x = "timelapse_25x" - - class PlaybackSourceEnum(str, Enum): recordings = "recordings" preview = "preview" @@ -62,8 +58,11 @@ class RecordingExporter(threading.Thread): image: Optional[str], start_time: int, end_time: int, - playback_factor: PlaybackFactorEnum, playback_source: PlaybackSourceEnum, + export_case_id: Optional[str] = None, + ffmpeg_input_args: Optional[str] = None, + ffmpeg_output_args: Optional[str] = None, + cpu_fallback: bool = False, ) -> None: super().__init__() self.config = config @@ -73,8 +72,11 @@ class RecordingExporter(threading.Thread): self.user_provided_image = image self.start_time = start_time self.end_time = end_time - self.playback_factor = playback_factor self.playback_source = playback_source + self.export_case_id = export_case_id + self.ffmpeg_input_args = ffmpeg_input_args + self.ffmpeg_output_args = ffmpeg_output_args + self.cpu_fallback = cpu_fallback # ensure export thumb dir Path(os.path.join(CLIPS_DIR, "export")).mkdir(exist_ok=True) @@ -179,9 +181,16 @@ class RecordingExporter(threading.Thread): return thumb_path - def get_record_export_command(self, video_path: str) -> list[str]: + def get_record_export_command( + self, video_path: str, use_hwaccel: bool = True + ) -> list[str]: + # handle case where internal port is a string with ip:port + internal_port = self.config.networking.listen.internal + if type(internal_port) is str: + internal_port = int(internal_port.split(":")[-1]) + if (self.end_time - self.start_time) <= MAX_PLAYLIST_SECONDS: - playlist_lines = f"http://127.0.0.1:5000/vod/{self.camera}/start/{self.start_time}/end/{self.end_time}/index.m3u8" + playlist_lines = f"http://127.0.0.1:{internal_port}/vod/{self.camera}/start/{self.start_time}/end/{self.end_time}/index.m3u8" ffmpeg_input = ( f"-y -protocol_whitelist pipe,file,http,tcp -i {playlist_lines}" ) @@ -213,25 +222,30 @@ class RecordingExporter(threading.Thread): for page in range(1, num_pages + 1): playlist = export_recordings.paginate(page, page_size) playlist_lines.append( - f"file 'http://127.0.0.1:5000/vod/{self.camera}/start/{float(playlist[0].start_time)}/end/{float(playlist[-1].end_time)}/index.m3u8'" + f"file 'http://127.0.0.1:{internal_port}/vod/{self.camera}/start/{float(playlist[0].start_time)}/end/{float(playlist[-1].end_time)}/index.m3u8'" ) ffmpeg_input = "-y -protocol_whitelist pipe,file,http,tcp -f concat -safe 0 -i /dev/stdin" - if self.playback_factor == PlaybackFactorEnum.realtime: - ffmpeg_cmd = ( - f"{self.config.ffmpeg.ffmpeg_path} -hide_banner {ffmpeg_input} -c copy -movflags +faststart" - ).split(" ") - elif self.playback_factor == PlaybackFactorEnum.timelapse_25x: + if self.ffmpeg_input_args is not None and self.ffmpeg_output_args is not None: + hwaccel_args = ( + self.config.cameras[self.camera].record.export.hwaccel_args + if use_hwaccel + else None + ) ffmpeg_cmd = ( parse_preset_hardware_acceleration_encode( self.config.ffmpeg.ffmpeg_path, - self.config.ffmpeg.hwaccel_args, - f"-an {ffmpeg_input}", - f"{self.config.cameras[self.camera].record.export.timelapse_args} -movflags +faststart", + hwaccel_args, + f"{self.ffmpeg_input_args} -an {ffmpeg_input}".strip(), + f"{self.ffmpeg_output_args} -movflags +faststart".strip(), EncodeTypeEnum.timelapse, ) ).split(" ") + else: + ffmpeg_cmd = ( + f"{self.config.ffmpeg.ffmpeg_path} -hide_banner {ffmpeg_input} -c copy -movflags +faststart" + ).split(" ") # add metadata title = f"Frigate Recording for {self.camera}, {self.get_datetime_from_timestamp(self.start_time)} - {self.get_datetime_from_timestamp(self.end_time)}" @@ -241,7 +255,9 @@ class RecordingExporter(threading.Thread): return ffmpeg_cmd, playlist_lines - def get_preview_export_command(self, video_path: str) -> list[str]: + def get_preview_export_command( + self, video_path: str, use_hwaccel: bool = True + ) -> list[str]: playlist_lines = [] codec = "-c copy" @@ -309,20 +325,25 @@ class RecordingExporter(threading.Thread): "-y -protocol_whitelist pipe,file,tcp -f concat -safe 0 -i /dev/stdin" ) - if self.playback_factor == PlaybackFactorEnum.realtime: - ffmpeg_cmd = ( - f"{self.config.ffmpeg.ffmpeg_path} -hide_banner {ffmpeg_input} {codec} -movflags +faststart {video_path}" - ).split(" ") - elif self.playback_factor == PlaybackFactorEnum.timelapse_25x: + if self.ffmpeg_input_args is not None and self.ffmpeg_output_args is not None: + hwaccel_args = ( + self.config.cameras[self.camera].record.export.hwaccel_args + if use_hwaccel + else None + ) ffmpeg_cmd = ( parse_preset_hardware_acceleration_encode( self.config.ffmpeg.ffmpeg_path, - self.config.ffmpeg.hwaccel_args, - f"{TIMELAPSE_DATA_INPUT_ARGS} {ffmpeg_input}", - f"{self.config.cameras[self.camera].record.export.timelapse_args} -movflags +faststart {video_path}", + hwaccel_args, + f"{self.ffmpeg_input_args} {TIMELAPSE_DATA_INPUT_ARGS} {ffmpeg_input}".strip(), + f"{self.ffmpeg_output_args} -movflags +faststart {video_path}".strip(), EncodeTypeEnum.timelapse, ) ).split(" ") + else: + ffmpeg_cmd = ( + f"{self.config.ffmpeg.ffmpeg_path} -hide_banner {ffmpeg_input} {codec} -movflags +faststart {video_path}" + ).split(" ") # add metadata title = f"Frigate Preview for {self.camera}, {self.get_datetime_from_timestamp(self.start_time)} - {self.get_datetime_from_timestamp(self.end_time)}" @@ -348,17 +369,20 @@ class RecordingExporter(threading.Thread): video_path = f"{EXPORT_DIR}/{self.camera}_{filename_start_datetime}-{filename_end_datetime}_{cleaned_export_id}.mp4" thumb_path = self.save_thumbnail(self.export_id) - Export.insert( - { - Export.id: self.export_id, - Export.camera: self.camera, - Export.name: export_name, - Export.date: self.start_time, - Export.video_path: video_path, - Export.thumb_path: thumb_path, - Export.in_progress: True, - } - ).execute() + export_values = { + Export.id: self.export_id, + Export.camera: self.camera, + Export.name: export_name, + Export.date: self.start_time, + Export.video_path: video_path, + Export.thumb_path: thumb_path, + Export.in_progress: True, + } + + if self.export_case_id is not None: + export_values[Export.export_case] = self.export_case_id + + Export.insert(export_values).execute() try: if self.playback_source == PlaybackSourceEnum.recordings: @@ -376,6 +400,34 @@ class RecordingExporter(threading.Thread): capture_output=True, ) + # If export failed and cpu_fallback is enabled, retry without hwaccel + if ( + p.returncode != 0 + and self.cpu_fallback + and self.ffmpeg_input_args is not None + and self.ffmpeg_output_args is not None + ): + logger.warning( + f"Export with hardware acceleration failed, retrying without hwaccel for {self.export_id}" + ) + + if self.playback_source == PlaybackSourceEnum.recordings: + ffmpeg_cmd, playlist_lines = self.get_record_export_command( + video_path, use_hwaccel=False + ) + else: + ffmpeg_cmd, playlist_lines = self.get_preview_export_command( + video_path, use_hwaccel=False + ) + + p = sp.run( + ffmpeg_cmd, + input="\n".join(playlist_lines), + encoding="ascii", + preexec_fn=lower_priority, + capture_output=True, + ) + if p.returncode != 0: logger.error( f"Failed to export {self.playback_source.value} for command {' '.join(ffmpeg_cmd)}" diff --git a/frigate/record/util.py b/frigate/record/util.py deleted file mode 100644 index 6a91c1aaf..000000000 --- a/frigate/record/util.py +++ /dev/null @@ -1,147 +0,0 @@ -"""Recordings Utilities.""" - -import datetime -import logging -import os - -from peewee import DatabaseError, chunked - -from frigate.const import RECORD_DIR -from frigate.models import Recordings, RecordingsToDelete - -logger = logging.getLogger(__name__) - - -def remove_empty_directories(directory: str) -> None: - # list all directories recursively and sort them by path, - # longest first - paths = sorted( - [x[0] for x in os.walk(directory)], - key=lambda p: len(str(p)), - reverse=True, - ) - for path in paths: - # don't delete the parent - if path == directory: - continue - if len(os.listdir(path)) == 0: - os.rmdir(path) - - -def sync_recordings(limited: bool) -> None: - """Check the db for stale recordings entries that don't exist in the filesystem.""" - - def delete_db_entries_without_file(check_timestamp: float) -> bool: - """Delete db entries where file was deleted outside of frigate.""" - - if limited: - recordings = Recordings.select(Recordings.id, Recordings.path).where( - Recordings.start_time >= check_timestamp - ) - else: - # get all recordings in the db - recordings = Recordings.select(Recordings.id, Recordings.path) - - # Use pagination to process records in chunks - page_size = 1000 - num_pages = (recordings.count() + page_size - 1) // page_size - recordings_to_delete = set() - - for page in range(num_pages): - for recording in recordings.paginate(page, page_size): - if not os.path.exists(recording.path): - recordings_to_delete.add(recording.id) - - if len(recordings_to_delete) == 0: - return True - - logger.info( - f"Deleting {len(recordings_to_delete)} recording DB entries with missing files" - ) - - # convert back to list of dictionaries for insertion - recordings_to_delete = [ - {"id": recording_id} for recording_id in recordings_to_delete - ] - - if float(len(recordings_to_delete)) / max(1, recordings.count()) > 0.5: - logger.warning( - f"Deleting {(len(recordings_to_delete) / max(1, recordings.count()) * 100):.2f}% of recordings DB entries, could be due to configuration error. Aborting..." - ) - return False - - # create a temporary table for deletion - RecordingsToDelete.create_table(temporary=True) - - # insert ids to the temporary table - max_inserts = 1000 - for batch in chunked(recordings_to_delete, max_inserts): - RecordingsToDelete.insert_many(batch).execute() - - try: - # delete records in the main table that exist in the temporary table - query = Recordings.delete().where( - Recordings.id.in_(RecordingsToDelete.select(RecordingsToDelete.id)) - ) - query.execute() - except DatabaseError as e: - logger.error(f"Database error during recordings db cleanup: {e}") - - return True - - def delete_files_without_db_entry(files_on_disk: list[str]): - """Delete files where file is not inside frigate db.""" - files_to_delete = [] - - for file in files_on_disk: - if not Recordings.select().where(Recordings.path == file).exists(): - files_to_delete.append(file) - - if len(files_to_delete) == 0: - return True - - logger.info( - f"Deleting {len(files_to_delete)} recordings files with missing DB entries" - ) - - if float(len(files_to_delete)) / max(1, len(files_on_disk)) > 0.5: - logger.debug( - f"Deleting {(len(files_to_delete) / max(1, len(files_on_disk)) * 100):.2f}% of recordings DB entries, could be due to configuration error. Aborting..." - ) - return False - - for file in files_to_delete: - os.unlink(file) - - return True - - logger.debug("Start sync recordings.") - - # start checking on the hour 36 hours ago - check_point = datetime.datetime.now().replace( - minute=0, second=0, microsecond=0 - ).astimezone(datetime.timezone.utc) - datetime.timedelta(hours=36) - db_success = delete_db_entries_without_file(check_point.timestamp()) - - # only try to cleanup files if db cleanup was successful - if db_success: - if limited: - # get recording files from last 36 hours - hour_check = f"{RECORD_DIR}/{check_point.strftime('%Y-%m-%d/%H')}" - files_on_disk = { - os.path.join(root, file) - for root, _, files in os.walk(RECORD_DIR) - for file in files - if root > hour_check - } - else: - # get all recordings files on disk and put them in a set - files_on_disk = { - os.path.join(root, file) - for root, _, files in os.walk(RECORD_DIR) - for file in files - } - - delete_files_without_db_entry(files_on_disk) - - logger.debug("End sync recordings.") diff --git a/frigate/review/maintainer.py b/frigate/review/maintainer.py index 917c0c5ac..6afdc8de9 100644 --- a/frigate/review/maintainer.py +++ b/frigate/review/maintainer.py @@ -394,7 +394,11 @@ class ReviewSegmentMaintainer(threading.Thread): if activity.has_activity_category(SeverityEnum.alert): # update current time for last alert activity - segment.last_alert_time = frame_time + if ( + segment.last_alert_time is None + or frame_time > segment.last_alert_time + ): + segment.last_alert_time = frame_time if segment.severity != SeverityEnum.alert: # if segment is not alert category but current activity is @@ -404,7 +408,11 @@ class ReviewSegmentMaintainer(threading.Thread): should_update_image = True if activity.has_activity_category(SeverityEnum.detection): - segment.last_detection_time = frame_time + if ( + segment.last_detection_time is None + or frame_time > segment.last_detection_time + ): + segment.last_detection_time = frame_time for object in activity.get_all_objects(): # Alert-level objects should always be added (they extend/upgrade the segment) @@ -695,17 +703,28 @@ class ReviewSegmentMaintainer(threading.Thread): current_segment.detections[manual_info["event_id"]] = ( manual_info["label"] ) - if ( - topic == DetectionTypeEnum.api - and self.config.cameras[camera].review.alerts.enabled - ): - current_segment.severity = SeverityEnum.alert + if topic == DetectionTypeEnum.api: + # manual_info["label"] contains 'label: sub_label' + # so split out the label without modifying manual_info + if ( + self.config.cameras[camera].review.detections.enabled + and manual_info["label"].split(": ")[0] + in self.config.cameras[camera].review.detections.labels + ): + current_segment.last_detection_time = manual_info[ + "end_time" + ] + elif self.config.cameras[camera].review.alerts.enabled: + current_segment.severity = SeverityEnum.alert + current_segment.last_alert_time = manual_info[ + "end_time" + ] elif ( topic == DetectionTypeEnum.lpr and self.config.cameras[camera].review.detections.enabled ): current_segment.severity = SeverityEnum.detection - current_segment.last_alert_time = manual_info["end_time"] + current_segment.last_alert_time = manual_info["end_time"] elif manual_info["state"] == ManualEventState.start: self.indefinite_events[camera][manual_info["event_id"]] = ( manual_info["label"] @@ -717,7 +736,18 @@ class ReviewSegmentMaintainer(threading.Thread): topic == DetectionTypeEnum.api and self.config.cameras[camera].review.alerts.enabled ): - current_segment.severity = SeverityEnum.alert + # manual_info["label"] contains 'label: sub_label' + # so split out the label without modifying manual_info + if ( + not self.config.cameras[ + camera + ].review.detections.enabled + or manual_info["label"].split(": ")[0] + not in self.config.cameras[ + camera + ].review.detections.labels + ): + current_segment.severity = SeverityEnum.alert elif ( topic == DetectionTypeEnum.lpr and self.config.cameras[camera].review.detections.enabled @@ -789,11 +819,23 @@ class ReviewSegmentMaintainer(threading.Thread): detections, ) elif topic == DetectionTypeEnum.api: - if self.config.cameras[camera].review.alerts.enabled: + severity = None + # manual_info["label"] contains 'label: sub_label' + # so split out the label without modifying manual_info + if ( + self.config.cameras[camera].review.detections.enabled + and manual_info["label"].split(": ")[0] + in self.config.cameras[camera].review.detections.labels + ): + severity = SeverityEnum.detection + elif self.config.cameras[camera].review.alerts.enabled: + severity = SeverityEnum.alert + + if severity: self.active_review_segments[camera] = PendingReviewSegment( camera, frame_time, - SeverityEnum.alert, + severity, {manual_info["event_id"]: manual_info["label"]}, {}, [], @@ -820,7 +862,7 @@ class ReviewSegmentMaintainer(threading.Thread): ].last_detection_time = manual_info["end_time"] else: logger.warning( - f"Manual event API has been called for {camera}, but alerts are disabled. This manual event will not appear as an alert." + f"Manual event API has been called for {camera}, but alerts and detections are disabled. This manual event will not appear as an alert or detection." ) elif topic == DetectionTypeEnum.lpr: if self.config.cameras[camera].review.detections.enabled: diff --git a/frigate/stats/util.py b/frigate/stats/util.py index 410350d96..f4f91f83f 100644 --- a/frigate/stats/util.py +++ b/frigate/stats/util.py @@ -22,6 +22,7 @@ from frigate.util.services import ( get_bandwidth_stats, get_cpu_stats, get_fs_type, + get_hailo_temps, get_intel_gpu_stats, get_jetson_stats, get_nvidia_gpu_stats, @@ -90,9 +91,80 @@ def get_temperatures() -> dict[str, float]: if temp is not None: temps[apex] = temp + # Get temperatures for Hailo devices + temps.update(get_hailo_temps()) + return temps +def get_detector_temperature( + detector_type: str, + detector_index_by_type: dict[str, int], +) -> Optional[float]: + """Get temperature for a specific detector based on its type.""" + if detector_type == "edgetpu": + # Get temperatures for all attached Corals + base = "/sys/class/apex/" + if os.path.isdir(base): + apex_devices = sorted(os.listdir(base)) + index = detector_index_by_type.get("edgetpu", 0) + if index < len(apex_devices): + apex_name = apex_devices[index] + temp = read_temperature(os.path.join(base, apex_name, "temp")) + if temp is not None: + return temp + elif detector_type == "hailo8l": + # Get temperatures for Hailo devices + hailo_temps = get_hailo_temps() + if hailo_temps: + hailo_device_names = sorted(hailo_temps.keys()) + index = detector_index_by_type.get("hailo8l", 0) + if index < len(hailo_device_names): + device_name = hailo_device_names[index] + return hailo_temps[device_name] + elif detector_type == "rknn": + # Rockchip temperatures are handled by the GPU / NPU stats + # as there are not detector specific temperatures + pass + + return None + + +def get_detector_stats( + stats_tracking: StatsTrackingTypes, +) -> dict[str, dict[str, Any]]: + """Get stats for all detectors, including temperatures based on detector type.""" + detector_stats: dict[str, dict[str, Any]] = {} + detector_type_indices: dict[str, int] = {} + + for name, detector in stats_tracking["detectors"].items(): + pid = detector.detect_process.pid if detector.detect_process else None + detector_type = detector.detector_config.type + + # Keep track of the index for each detector type to match temperatures correctly + current_index = detector_type_indices.get(detector_type, 0) + detector_type_indices[detector_type] = current_index + 1 + + detector_stat = { + "inference_speed": round(detector.avg_inference_speed.value * 1000, 2), # type: ignore[attr-defined] + # issue https://github.com/python/typeshed/issues/8799 + # from mypy 0.981 onwards + "detection_start": detector.detection_start.value, # type: ignore[attr-defined] + # issue https://github.com/python/typeshed/issues/8799 + # from mypy 0.981 onwards + "pid": pid, + } + + temp = get_detector_temperature(detector_type, {detector_type: current_index}) + + if temp is not None: + detector_stat["temperature"] = round(temp, 1) + + detector_stats[name] = detector_stat + + return detector_stats + + def get_processing_stats( config: FrigateConfig, stats: dict[str, str], hwaccel_errors: list[str] ) -> None: @@ -173,6 +245,7 @@ async def set_gpu_stats( "mem": str(round(float(nvidia_usage[i]["mem"]), 2)) + "%", "enc": str(round(float(nvidia_usage[i]["enc"]), 2)) + "%", "dec": str(round(float(nvidia_usage[i]["dec"]), 2)) + "%", + "temp": str(nvidia_usage[i]["temp"]), } else: @@ -278,6 +351,32 @@ def stats_snapshot( if camera_stats.capture_process_pid.value else None ) + # Calculate connection quality based on current state + # This is computed at stats-collection time so offline cameras + # correctly show as unusable rather than excellent + expected_fps = config.cameras[name].detect.fps + current_fps = camera_stats.camera_fps.value + reconnects = camera_stats.reconnects_last_hour.value + stalls = camera_stats.stalls_last_hour.value + + if current_fps < 0.1: + quality_str = "unusable" + elif reconnects == 0 and current_fps >= 0.9 * expected_fps and stalls < 5: + quality_str = "excellent" + elif reconnects <= 2 and current_fps >= 0.6 * expected_fps: + quality_str = "fair" + elif reconnects > 10 or current_fps < 1.0 or stalls > 100: + quality_str = "unusable" + else: + quality_str = "poor" + + connection_quality = { + "connection_quality": quality_str, + "expected_fps": expected_fps, + "reconnects_last_hour": reconnects, + "stalls_last_hour": stalls, + } + stats["cameras"][name] = { "camera_fps": round(camera_stats.camera_fps.value, 2), "process_fps": round(camera_stats.process_fps.value, 2), @@ -289,20 +388,10 @@ def stats_snapshot( "ffmpeg_pid": ffmpeg_pid, "audio_rms": round(camera_stats.audio_rms.value, 4), "audio_dBFS": round(camera_stats.audio_dBFS.value, 4), + **connection_quality, } - stats["detectors"] = {} - for name, detector in stats_tracking["detectors"].items(): - pid = detector.detect_process.pid if detector.detect_process else None - stats["detectors"][name] = { - "inference_speed": round(detector.avg_inference_speed.value * 1000, 2), # type: ignore[attr-defined] - # issue https://github.com/python/typeshed/issues/8799 - # from mypy 0.981 onwards - "detection_start": detector.detection_start.value, # type: ignore[attr-defined] - # issue https://github.com/python/typeshed/issues/8799 - # from mypy 0.981 onwards - "pid": pid, - } + stats["detectors"] = get_detector_stats(stats_tracking) stats["camera_fps"] = round(total_camera_fps, 2) stats["process_fps"] = round(total_process_fps, 2) stats["skipped_fps"] = round(total_skipped_fps, 2) @@ -388,7 +477,6 @@ def stats_snapshot( "version": VERSION, "latest_version": stats_tracking["latest_frigate_version"], "storage": {}, - "temperatures": get_temperatures(), "last_updated": int(time.time()), } diff --git a/frigate/test/http_api/test_http_latest_frame.py b/frigate/test/http_api/test_http_latest_frame.py new file mode 100644 index 000000000..755ee6eb1 --- /dev/null +++ b/frigate/test/http_api/test_http_latest_frame.py @@ -0,0 +1,107 @@ +import os +import shutil +from unittest.mock import MagicMock + +import cv2 +import numpy as np + +from frigate.output.preview import PREVIEW_CACHE_DIR, PREVIEW_FRAME_TYPE +from frigate.test.http_api.base_http_test import AuthTestClient, BaseTestHttp + + +class TestHttpLatestFrame(BaseTestHttp): + def setUp(self): + super().setUp([]) + self.app = super().create_app() + self.app.detected_frames_processor = MagicMock() + + if os.path.exists(PREVIEW_CACHE_DIR): + shutil.rmtree(PREVIEW_CACHE_DIR) + os.makedirs(PREVIEW_CACHE_DIR) + + def tearDown(self): + if os.path.exists(PREVIEW_CACHE_DIR): + shutil.rmtree(PREVIEW_CACHE_DIR) + super().tearDown() + + def test_latest_frame_fallback_to_preview(self): + camera = "front_door" + # 1. Mock frame processor to return None (simulating offline/missing frame) + self.app.detected_frames_processor.get_current_frame.return_value = None + # Return a timestamp that is after our dummy preview frame + self.app.detected_frames_processor.get_current_frame_time.return_value = ( + 1234567891.0 + ) + + # 2. Create a dummy preview file + dummy_frame = np.zeros((180, 320, 3), np.uint8) + cv2.putText( + dummy_frame, + "PREVIEW", + (50, 50), + cv2.FONT_HERSHEY_SIMPLEX, + 1, + (255, 255, 255), + 2, + ) + preview_path = os.path.join( + PREVIEW_CACHE_DIR, f"preview_{camera}-1234567890.0.{PREVIEW_FRAME_TYPE}" + ) + cv2.imwrite(preview_path, dummy_frame) + + with AuthTestClient(self.app) as client: + response = client.get(f"/{camera}/latest.webp") + assert response.status_code == 200 + assert response.headers.get("X-Frigate-Offline") == "true" + # Verify we got an image (webp) + assert response.headers.get("content-type") == "image/webp" + + def test_latest_frame_no_fallback_when_live(self): + camera = "front_door" + # 1. Mock frame processor to return a live frame + dummy_frame = np.zeros((180, 320, 3), np.uint8) + self.app.detected_frames_processor.get_current_frame.return_value = dummy_frame + self.app.detected_frames_processor.get_current_frame_time.return_value = ( + 2000000000.0 # Way in the future + ) + + with AuthTestClient(self.app) as client: + response = client.get(f"/{camera}/latest.webp") + assert response.status_code == 200 + assert "X-Frigate-Offline" not in response.headers + + def test_latest_frame_stale_falls_back_to_preview(self): + camera = "front_door" + # 1. Mock frame processor to return a stale frame + dummy_frame = np.zeros((180, 320, 3), np.uint8) + self.app.detected_frames_processor.get_current_frame.return_value = dummy_frame + # Return a timestamp that is after our dummy preview frame, but way in the past + self.app.detected_frames_processor.get_current_frame_time.return_value = 1000.0 + + # 2. Create a dummy preview file + preview_path = os.path.join( + PREVIEW_CACHE_DIR, f"preview_{camera}-999.0.{PREVIEW_FRAME_TYPE}" + ) + cv2.imwrite(preview_path, dummy_frame) + + with AuthTestClient(self.app) as client: + response = client.get(f"/{camera}/latest.webp") + assert response.status_code == 200 + assert response.headers.get("X-Frigate-Offline") == "true" + + def test_latest_frame_no_preview_found(self): + camera = "front_door" + # 1. Mock frame processor to return None + self.app.detected_frames_processor.get_current_frame.return_value = None + + # 2. No preview file created + + with AuthTestClient(self.app) as client: + response = client.get(f"/{camera}/latest.webp") + # Should fall back to camera-error.jpg (which might not exist in test env, but let's see) + # If camera-error.jpg is not found, it returns 500 "Unable to get valid frame" in latest_frame + # OR it uses request.app.camera_error_image if already loaded. + + # Since we didn't provide camera-error.jpg, it might 500 if glob fails or return 500 if frame is None. + assert response.status_code in [200, 500] + assert "X-Frigate-Offline" not in response.headers diff --git a/frigate/test/test_preview_loader.py b/frigate/test/test_preview_loader.py new file mode 100644 index 000000000..e2062fce1 --- /dev/null +++ b/frigate/test/test_preview_loader.py @@ -0,0 +1,80 @@ +import os +import shutil +import unittest + +from frigate.output.preview import ( + PREVIEW_CACHE_DIR, + PREVIEW_FRAME_TYPE, + get_most_recent_preview_frame, +) + + +class TestPreviewLoader(unittest.TestCase): + def setUp(self): + if os.path.exists(PREVIEW_CACHE_DIR): + shutil.rmtree(PREVIEW_CACHE_DIR) + os.makedirs(PREVIEW_CACHE_DIR) + + def tearDown(self): + if os.path.exists(PREVIEW_CACHE_DIR): + shutil.rmtree(PREVIEW_CACHE_DIR) + + def test_get_most_recent_preview_frame_missing(self): + self.assertIsNone(get_most_recent_preview_frame("test_camera")) + + def test_get_most_recent_preview_frame_exists(self): + camera = "test_camera" + # create dummy preview files + for ts in ["1000.0", "2000.0", "1500.0"]: + with open( + os.path.join( + PREVIEW_CACHE_DIR, f"preview_{camera}-{ts}.{PREVIEW_FRAME_TYPE}" + ), + "w", + ) as f: + f.write(f"test_{ts}") + + expected_path = os.path.join( + PREVIEW_CACHE_DIR, f"preview_{camera}-2000.0.{PREVIEW_FRAME_TYPE}" + ) + self.assertEqual(get_most_recent_preview_frame(camera), expected_path) + + def test_get_most_recent_preview_frame_before(self): + camera = "test_camera" + # create dummy preview files + for ts in ["1000.0", "2000.0"]: + with open( + os.path.join( + PREVIEW_CACHE_DIR, f"preview_{camera}-{ts}.{PREVIEW_FRAME_TYPE}" + ), + "w", + ) as f: + f.write(f"test_{ts}") + + # Test finding frame before or at 1500 + expected_path = os.path.join( + PREVIEW_CACHE_DIR, f"preview_{camera}-1000.0.{PREVIEW_FRAME_TYPE}" + ) + self.assertEqual( + get_most_recent_preview_frame(camera, before=1500.0), expected_path + ) + + # Test finding frame before or at 999 + self.assertIsNone(get_most_recent_preview_frame(camera, before=999.0)) + + def test_get_most_recent_preview_frame_other_camera(self): + camera = "test_camera" + other_camera = "other_camera" + with open( + os.path.join( + PREVIEW_CACHE_DIR, f"preview_{other_camera}-3000.0.{PREVIEW_FRAME_TYPE}" + ), + "w", + ) as f: + f.write("test") + + self.assertIsNone(get_most_recent_preview_frame(camera)) + + def test_get_most_recent_preview_frame_no_directory(self): + shutil.rmtree(PREVIEW_CACHE_DIR) + self.assertIsNone(get_most_recent_preview_frame("test_camera")) diff --git a/frigate/track/object_processing.py b/frigate/track/object_processing.py index e0ee74228..9ac04b42a 100644 --- a/frigate/track/object_processing.py +++ b/frigate/track/object_processing.py @@ -185,7 +185,7 @@ class TrackedObjectProcessor(threading.Thread): def snapshot(camera: str, obj: TrackedObject) -> bool: mqtt_config: CameraMqttConfig = self.config.cameras[camera].mqtt if mqtt_config.enabled and self.should_mqtt_snapshot(camera, obj): - jpg_bytes = obj.get_img_bytes( + jpg_bytes, _ = obj.get_img_bytes( ext="jpg", timestamp=mqtt_config.timestamp, bounding_box=mqtt_config.bounding_box, @@ -515,6 +515,7 @@ class TrackedObjectProcessor(threading.Thread): duration, source_type, draw, + pre_capture, ) = payload # save the snapshot image @@ -522,6 +523,11 @@ class TrackedObjectProcessor(threading.Thread): None, event_id, label, draw ) end_time = frame_time + duration if duration is not None else None + start_time = ( + frame_time - self.config.cameras[camera_name].record.event_pre_capture + if pre_capture is None + else frame_time - pre_capture + ) # send event to event maintainer self.event_sender.publish( @@ -536,8 +542,7 @@ class TrackedObjectProcessor(threading.Thread): "sub_label": sub_label, "score": score, "camera": camera_name, - "start_time": frame_time - - self.config.cameras[camera_name].record.event_pre_capture, + "start_time": start_time, "end_time": end_time, "has_clip": self.config.cameras[camera_name].record.enabled and include_recording, diff --git a/frigate/track/tracked_object.py b/frigate/track/tracked_object.py index a95221bbd..f435de7b6 100644 --- a/frigate/track/tracked_object.py +++ b/frigate/track/tracked_object.py @@ -434,7 +434,7 @@ class TrackedObject: return count > (self.camera_config.detect.stationary.threshold or 50) def get_thumbnail(self, ext: str) -> bytes | None: - img_bytes = self.get_img_bytes( + img_bytes, _ = self.get_img_bytes( ext, timestamp=False, bounding_box=False, crop=True, height=175 ) @@ -475,20 +475,21 @@ class TrackedObject: crop: bool = False, height: int | None = None, quality: int | None = None, - ) -> bytes | None: + ) -> tuple[bytes | None, float | None]: if self.thumbnail_data is None: - return None + return None, None try: + frame_time = self.thumbnail_data["frame_time"] best_frame = cv2.cvtColor( - self.frame_cache[self.thumbnail_data["frame_time"]]["frame"], + self.frame_cache[frame_time]["frame"], cv2.COLOR_YUV2BGR_I420, ) except KeyError: logger.warning( - f"Unable to create jpg because frame {self.thumbnail_data['frame_time']} is not in the cache" + f"Unable to create jpg because frame {frame_time} is not in the cache" ) - return None + return None, None if bounding_box: thickness = 2 @@ -570,13 +571,13 @@ class TrackedObject: ret, jpg = cv2.imencode(f".{ext}", best_frame, quality_params) if ret: - return jpg.tobytes() + return jpg.tobytes(), frame_time else: - return None + return None, None def write_snapshot_to_disk(self) -> None: snapshot_config: SnapshotsConfig = self.camera_config.snapshots - jpg_bytes = self.get_img_bytes( + jpg_bytes, _ = self.get_img_bytes( ext="jpg", timestamp=snapshot_config.timestamp, bounding_box=snapshot_config.bounding_box, diff --git a/frigate/types.py b/frigate/types.py index 6c5135616..77bb50845 100644 --- a/frigate/types.py +++ b/frigate/types.py @@ -26,6 +26,15 @@ class ModelStatusTypesEnum(str, Enum): failed = "failed" +class JobStatusTypesEnum(str, Enum): + pending = "pending" + queued = "queued" + running = "running" + success = "success" + failed = "failed" + cancelled = "cancelled" + + class TrackedObjectUpdateTypesEnum(str, Enum): description = "description" face = "face" diff --git a/frigate/util/config.py b/frigate/util/config.py index c3d796397..1af5c8e4e 100644 --- a/frigate/util/config.py +++ b/frigate/util/config.py @@ -13,7 +13,7 @@ from frigate.util.services import get_video_properties logger = logging.getLogger(__name__) -CURRENT_CONFIG_VERSION = "0.17-0" +CURRENT_CONFIG_VERSION = "0.18-0" DEFAULT_CONFIG_FILE = os.path.join(CONFIG_DIR, "config.yml") @@ -98,6 +98,13 @@ def migrate_frigate_config(config_file: str): yaml.dump(new_config, f) previous_version = "0.17-0" + if previous_version < "0.18-0": + logger.info(f"Migrating frigate config from {previous_version} to 0.18-0...") + new_config = migrate_018_0(config) + with open(config_file, "w") as f: + yaml.dump(new_config, f) + previous_version = "0.18-0" + logger.info("Finished frigate config migration...") @@ -427,6 +434,49 @@ def migrate_017_0(config: dict[str, dict[str, Any]]) -> dict[str, dict[str, Any] return new_config +def migrate_018_0(config: dict[str, dict[str, Any]]) -> dict[str, dict[str, Any]]: + """Handle migrating frigate config to 0.18-0""" + new_config = config.copy() + + # Remove deprecated sync_recordings from global record config + if new_config.get("record", {}).get("sync_recordings") is not None: + del new_config["record"]["sync_recordings"] + + # Remove deprecated timelapse_args from global record export config + if new_config.get("record", {}).get("export", {}).get("timelapse_args") is not None: + del new_config["record"]["export"]["timelapse_args"] + # Remove export section if empty + if not new_config.get("record", {}).get("export"): + del new_config["record"]["export"] + # Remove record section if empty + if not new_config.get("record"): + del new_config["record"] + + # Remove deprecated sync_recordings and timelapse_args from camera-specific record configs + for name, camera in config.get("cameras", {}).items(): + camera_config: dict[str, dict[str, Any]] = camera.copy() + + if camera_config.get("record", {}).get("sync_recordings") is not None: + del camera_config["record"]["sync_recordings"] + + if ( + camera_config.get("record", {}).get("export", {}).get("timelapse_args") + is not None + ): + del camera_config["record"]["export"]["timelapse_args"] + # Remove export section if empty + if not camera_config.get("record", {}).get("export"): + del camera_config["record"]["export"] + # Remove record section if empty + if not camera_config.get("record"): + del camera_config["record"] + + new_config["cameras"][name] = camera_config + + new_config["version"] = "0.18-0" + return new_config + + def get_relative_coordinates( mask: Optional[Union[str, list]], frame_shape: tuple[int, int] ) -> Union[str, list]: diff --git a/frigate/util/media.py b/frigate/util/media.py new file mode 100644 index 000000000..c7de85c9f --- /dev/null +++ b/frigate/util/media.py @@ -0,0 +1,808 @@ +"""Recordings Utilities.""" + +import datetime +import errno +import logging +import os +from dataclasses import dataclass, field +from pathlib import Path +from typing import Iterable + +from peewee import DatabaseError, chunked + +from frigate.const import CLIPS_DIR, EXPORT_DIR, RECORD_DIR, THUMB_DIR +from frigate.models import ( + Event, + Export, + Previews, + Recordings, + RecordingsToDelete, + ReviewSegment, +) + +logger = logging.getLogger(__name__) + + +# Safety threshold - abort if more than 50% of files would be deleted +SAFETY_THRESHOLD = 0.5 + + +@dataclass +class SyncResult: + """Result of a sync operation.""" + + media_type: str + files_checked: int = 0 + orphans_found: int = 0 + orphans_deleted: int = 0 + orphan_paths: list[str] = field(default_factory=list) + aborted: bool = False + error: str | None = None + + def to_dict(self) -> dict: + return { + "media_type": self.media_type, + "files_checked": self.files_checked, + "orphans_found": self.orphans_found, + "orphans_deleted": self.orphans_deleted, + "aborted": self.aborted, + "error": self.error, + } + + +def remove_empty_directories(root: Path, paths: Iterable[Path]) -> None: + """ + Remove directories if they exist and are empty. + Silently ignores non-existent and non-empty directories. + Attempts to remove parent directories as well, stopping at the given root. + """ + count = 0 + while True: + parents = set() + for path in paths: + if path == root: + continue + + try: + path.rmdir() + count += 1 + except FileNotFoundError: + pass + except OSError as e: + if e.errno == errno.ENOTEMPTY: + continue + raise + + parents.add(path.parent) + + if not parents: + break + + paths = parents + + logger.debug("Removed {count} empty directories") + + +def sync_recordings( + limited: bool = False, dry_run: bool = False, force: bool = False +) -> SyncResult: + """Sync recordings between the database and disk using the SyncResult format.""" + + result = SyncResult(media_type="recordings") + + try: + logger.debug("Start sync recordings.") + + # start checking on the hour 36 hours ago + check_point = datetime.datetime.now().replace( + minute=0, second=0, microsecond=0 + ).astimezone(datetime.timezone.utc) - datetime.timedelta(hours=36) + + # Gather DB recordings to inspect + if limited: + recordings_query = Recordings.select(Recordings.id, Recordings.path).where( + Recordings.start_time >= check_point.timestamp() + ) + else: + recordings_query = Recordings.select(Recordings.id, Recordings.path) + + recordings_count = recordings_query.count() + page_size = 1000 + num_pages = (recordings_count + page_size - 1) // page_size + recordings_to_delete: list[dict] = [] + + for page in range(num_pages): + for recording in recordings_query.paginate(page, page_size): + if not os.path.exists(recording.path): + recordings_to_delete.append( + {"id": recording.id, "path": recording.path} + ) + + result.orphans_found += len(recordings_to_delete) + result.orphan_paths.extend( + [ + recording["path"] + for recording in recordings_to_delete + if recording.get("path") + ] + ) + + if ( + recordings_count + and len(recordings_to_delete) / recordings_count > SAFETY_THRESHOLD + ): + if force: + logger.warning( + f"Deleting {(len(recordings_to_delete) / max(1, recordings_count) * 100):.2f}% of recordings DB entries (force=True, bypassing safety threshold)" + ) + else: + logger.warning( + f"Deleting {(len(recordings_to_delete) / max(1, recordings_count) * 100):.2f}% of recordings DB entries, could be due to configuration error. Aborting..." + ) + result.aborted = True + return result + + if recordings_to_delete and not dry_run: + logger.info( + f"Deleting {len(recordings_to_delete)} recording DB entries with missing files" + ) + + RecordingsToDelete.create_table(temporary=True) + + max_inserts = 1000 + for batch in chunked(recordings_to_delete, max_inserts): + RecordingsToDelete.insert_many(batch).execute() + + try: + deleted = ( + Recordings.delete() + .where( + Recordings.id.in_( + RecordingsToDelete.select(RecordingsToDelete.id) + ) + ) + .execute() + ) + result.orphans_deleted += int(deleted) + except DatabaseError as e: + logger.error(f"Database error during recordings db cleanup: {e}") + result.error = str(e) + result.aborted = True + return result + + if result.aborted: + logger.warning("Recording DB sync aborted; skipping file cleanup.") + return result + + # Only try to cleanup files if db cleanup was successful or dry_run + if limited: + # get recording files from last 36 hours + hour_check = f"{RECORD_DIR}/{check_point.strftime('%Y-%m-%d/%H')}" + files_on_disk = { + os.path.join(root, file) + for root, _, files in os.walk(RECORD_DIR) + for file in files + if root > hour_check + } + else: + # get all recordings files on disk and put them in a set + files_on_disk = { + os.path.join(root, file) + for root, _, files in os.walk(RECORD_DIR) + for file in files + } + + result.files_checked = len(files_on_disk) + + files_to_delete: list[str] = [] + for file in files_on_disk: + if not Recordings.select().where(Recordings.path == file).exists(): + files_to_delete.append(file) + + result.orphans_found += len(files_to_delete) + result.orphan_paths.extend(files_to_delete) + + if ( + files_on_disk + and len(files_to_delete) / len(files_on_disk) > SAFETY_THRESHOLD + ): + if force: + logger.warning( + f"Deleting {(len(files_to_delete) / max(1, len(files_on_disk)) * 100):.2f}% of recordings files (force=True, bypassing safety threshold)" + ) + else: + logger.warning( + f"Deleting {(len(files_to_delete) / max(1, len(files_on_disk)) * 100):.2f}% of recordings files, could be due to configuration error. Aborting..." + ) + result.aborted = True + return result + + if dry_run: + logger.info( + f"Recordings sync (dry run): Found {len(files_to_delete)} orphaned files" + ) + return result + + # Delete orphans + logger.info(f"Deleting {len(files_to_delete)} orphaned recordings files") + for file in files_to_delete: + try: + os.unlink(file) + result.orphans_deleted += 1 + except OSError as e: + logger.error(f"Failed to delete {file}: {e}") + + logger.debug("End sync recordings.") + + except Exception as e: + logger.error(f"Error syncing recordings: {e}") + result.error = str(e) + + return result + + +def sync_event_snapshots(dry_run: bool = False, force: bool = False) -> SyncResult: + """Sync event snapshots - delete files not referenced by any event. + + Event snapshots are stored at: CLIPS_DIR/{camera}-{event_id}.jpg + Also checks for clean variants: {camera}-{event_id}-clean.webp and -clean.png + """ + result = SyncResult(media_type="event_snapshots") + + try: + # Get all event IDs with snapshots from DB + events_with_snapshots = set( + f"{e.camera}-{e.id}" + for e in Event.select(Event.id, Event.camera).where( + Event.has_snapshot == True + ) + ) + + # Find snapshot files on disk (directly in CLIPS_DIR, not subdirectories) + snapshot_files: list[tuple[str, str]] = [] # (full_path, base_name) + if os.path.isdir(CLIPS_DIR): + for file in os.listdir(CLIPS_DIR): + file_path = os.path.join(CLIPS_DIR, file) + if os.path.isfile(file_path) and file.endswith( + (".jpg", "-clean.webp", "-clean.png") + ): + # Extract base name (camera-event_id) from filename + base_name = file + for suffix in ["-clean.webp", "-clean.png", ".jpg"]: + if file.endswith(suffix): + base_name = file[: -len(suffix)] + break + snapshot_files.append((file_path, base_name)) + + result.files_checked = len(snapshot_files) + + # Find orphans + orphans: list[str] = [] + for file_path, base_name in snapshot_files: + if base_name not in events_with_snapshots: + orphans.append(file_path) + + result.orphans_found = len(orphans) + result.orphan_paths = orphans + + if len(orphans) == 0: + return result + + # Safety check + if ( + result.files_checked > 0 + and len(orphans) / result.files_checked > SAFETY_THRESHOLD + ): + if force: + logger.warning( + f"Event snapshots sync: Would delete {len(orphans)}/{result.files_checked} " + f"({len(orphans) / result.files_checked * 100:.2f}%) files (force=True, bypassing safety threshold)." + ) + else: + logger.warning( + f"Event snapshots sync: Would delete {len(orphans)}/{result.files_checked} " + f"({len(orphans) / result.files_checked * 100:.2f}%) files. " + "Aborting due to safety threshold." + ) + result.aborted = True + return result + + if dry_run: + logger.info( + f"Event snapshots sync (dry run): Found {len(orphans)} orphaned files" + ) + return result + + # Delete orphans + logger.info(f"Deleting {len(orphans)} orphaned event snapshot files") + for file_path in orphans: + try: + os.unlink(file_path) + result.orphans_deleted += 1 + except OSError as e: + logger.error(f"Failed to delete {file_path}: {e}") + + except Exception as e: + logger.error(f"Error syncing event snapshots: {e}") + result.error = str(e) + + return result + + +def sync_event_thumbnails(dry_run: bool = False, force: bool = False) -> SyncResult: + """Sync event thumbnails - delete files not referenced by any event. + + Event thumbnails are stored at: THUMB_DIR/{camera}/{event_id}.webp + Only events without inline thumbnail (thumbnail field is None/empty) use files. + """ + result = SyncResult(media_type="event_thumbnails") + + try: + # Get all events that use file-based thumbnails + # Events with thumbnail field populated don't need files + events_with_file_thumbs = set( + (e.camera, e.id) + for e in Event.select(Event.id, Event.camera, Event.thumbnail).where( + (Event.thumbnail.is_null(True)) | (Event.thumbnail == "") + ) + ) + + # Find thumbnail files on disk + thumbnail_files: list[ + tuple[str, str, str] + ] = [] # (full_path, camera, event_id) + if os.path.isdir(THUMB_DIR): + for camera_dir in os.listdir(THUMB_DIR): + camera_path = os.path.join(THUMB_DIR, camera_dir) + if not os.path.isdir(camera_path): + continue + for file in os.listdir(camera_path): + if file.endswith(".webp"): + event_id = file[:-5] # Remove .webp + file_path = os.path.join(camera_path, file) + thumbnail_files.append((file_path, camera_dir, event_id)) + + result.files_checked = len(thumbnail_files) + + # Find orphans - files where event doesn't exist or event has inline thumbnail + orphans: list[str] = [] + for file_path, camera, event_id in thumbnail_files: + if (camera, event_id) not in events_with_file_thumbs: + # Check if event exists with inline thumbnail + event_exists = Event.select().where(Event.id == event_id).exists() + if not event_exists: + orphans.append(file_path) + # If event exists with inline thumbnail, the file is also orphaned + elif event_exists: + event = Event.get_or_none(Event.id == event_id) + if event and event.thumbnail: + orphans.append(file_path) + + result.orphans_found = len(orphans) + result.orphan_paths = orphans + + if len(orphans) == 0: + return result + + # Safety check + if ( + result.files_checked > 0 + and len(orphans) / result.files_checked > SAFETY_THRESHOLD + ): + if force: + logger.warning( + f"Event thumbnails sync: Would delete {len(orphans)}/{result.files_checked} " + f"({len(orphans) / result.files_checked * 100:.2f}%) files (force=True, bypassing safety threshold)." + ) + else: + logger.warning( + f"Event thumbnails sync: Would delete {len(orphans)}/{result.files_checked} " + f"({len(orphans) / result.files_checked * 100:.2f}%) files. " + "Aborting due to safety threshold." + ) + result.aborted = True + return result + + if dry_run: + logger.info( + f"Event thumbnails sync (dry run): Found {len(orphans)} orphaned files" + ) + return result + + # Delete orphans + logger.info(f"Deleting {len(orphans)} orphaned event thumbnail files") + for file_path in orphans: + try: + os.unlink(file_path) + result.orphans_deleted += 1 + except OSError as e: + logger.error(f"Failed to delete {file_path}: {e}") + + except Exception as e: + logger.error(f"Error syncing event thumbnails: {e}") + result.error = str(e) + + return result + + +def sync_review_thumbnails(dry_run: bool = False, force: bool = False) -> SyncResult: + """Sync review segment thumbnails - delete files not referenced by any review segment. + + Review thumbnails are stored at: CLIPS_DIR/review/thumb-{camera}-{review_id}.webp + The full path is stored in ReviewSegment.thumb_path + """ + result = SyncResult(media_type="review_thumbnails") + + try: + # Get all thumb paths from DB + review_thumb_paths = set( + r.thumb_path + for r in ReviewSegment.select(ReviewSegment.thumb_path) + if r.thumb_path + ) + + # Find review thumbnail files on disk + review_dir = os.path.join(CLIPS_DIR, "review") + thumbnail_files: list[str] = [] + if os.path.isdir(review_dir): + for file in os.listdir(review_dir): + if file.startswith("thumb-") and file.endswith(".webp"): + file_path = os.path.join(review_dir, file) + thumbnail_files.append(file_path) + + result.files_checked = len(thumbnail_files) + + # Find orphans + orphans: list[str] = [] + for file_path in thumbnail_files: + if file_path not in review_thumb_paths: + orphans.append(file_path) + + result.orphans_found = len(orphans) + result.orphan_paths = orphans + + if len(orphans) == 0: + return result + + # Safety check + if ( + result.files_checked > 0 + and len(orphans) / result.files_checked > SAFETY_THRESHOLD + ): + if force: + logger.warning( + f"Review thumbnails sync: Would delete {len(orphans)}/{result.files_checked} " + f"({len(orphans) / result.files_checked * 100:.2f}%) files (force=True, bypassing safety threshold)." + ) + else: + logger.warning( + f"Review thumbnails sync: Would delete {len(orphans)}/{result.files_checked} " + f"({len(orphans) / result.files_checked * 100:.2f}%) files. " + "Aborting due to safety threshold." + ) + result.aborted = True + return result + + if dry_run: + logger.info( + f"Review thumbnails sync (dry run): Found {len(orphans)} orphaned files" + ) + return result + + # Delete orphans + logger.info(f"Deleting {len(orphans)} orphaned review thumbnail files") + for file_path in orphans: + try: + os.unlink(file_path) + result.orphans_deleted += 1 + except OSError as e: + logger.error(f"Failed to delete {file_path}: {e}") + + except Exception as e: + logger.error(f"Error syncing review thumbnails: {e}") + result.error = str(e) + + return result + + +def sync_previews(dry_run: bool = False, force: bool = False) -> SyncResult: + """Sync preview files - delete files not referenced by any preview record. + + Previews are stored at: CLIPS_DIR/previews/{camera}/*.mp4 + The full path is stored in Previews.path + """ + result = SyncResult(media_type="previews") + + try: + # Get all preview paths from DB + preview_paths = set(p.path for p in Previews.select(Previews.path) if p.path) + + # Find preview files on disk + previews_dir = os.path.join(CLIPS_DIR, "previews") + preview_files: list[str] = [] + if os.path.isdir(previews_dir): + for camera_dir in os.listdir(previews_dir): + camera_path = os.path.join(previews_dir, camera_dir) + if not os.path.isdir(camera_path): + continue + for file in os.listdir(camera_path): + if file.endswith(".mp4"): + file_path = os.path.join(camera_path, file) + preview_files.append(file_path) + + result.files_checked = len(preview_files) + + # Find orphans + orphans: list[str] = [] + for file_path in preview_files: + if file_path not in preview_paths: + orphans.append(file_path) + + result.orphans_found = len(orphans) + result.orphan_paths = orphans + + if len(orphans) == 0: + return result + + # Safety check + if ( + result.files_checked > 0 + and len(orphans) / result.files_checked > SAFETY_THRESHOLD + ): + if force: + logger.warning( + f"Previews sync: Would delete {len(orphans)}/{result.files_checked} " + f"({len(orphans) / result.files_checked * 100:.2f}%) files (force=True, bypassing safety threshold)." + ) + else: + logger.warning( + f"Previews sync: Would delete {len(orphans)}/{result.files_checked} " + f"({len(orphans) / result.files_checked * 100:.2f}%) files. " + "Aborting due to safety threshold." + ) + result.aborted = True + return result + + if dry_run: + logger.info(f"Previews sync (dry run): Found {len(orphans)} orphaned files") + return result + + # Delete orphans + logger.info(f"Deleting {len(orphans)} orphaned preview files") + for file_path in orphans: + try: + os.unlink(file_path) + result.orphans_deleted += 1 + except OSError as e: + logger.error(f"Failed to delete {file_path}: {e}") + + except Exception as e: + logger.error(f"Error syncing previews: {e}") + result.error = str(e) + + return result + + +def sync_exports(dry_run: bool = False, force: bool = False) -> SyncResult: + """Sync export files - delete files not referenced by any export record. + + Export videos are stored at: EXPORT_DIR/*.mp4 + Export thumbnails are stored at: CLIPS_DIR/export/*.jpg + The paths are stored in Export.video_path and Export.thumb_path + """ + result = SyncResult(media_type="exports") + + try: + # Get all export paths from DB + export_video_paths = set() + export_thumb_paths = set() + for e in Export.select(Export.video_path, Export.thumb_path): + if e.video_path: + export_video_paths.add(e.video_path) + if e.thumb_path: + export_thumb_paths.add(e.thumb_path) + + # Find export video files on disk + export_files: list[str] = [] + if os.path.isdir(EXPORT_DIR): + for file in os.listdir(EXPORT_DIR): + if file.endswith(".mp4"): + file_path = os.path.join(EXPORT_DIR, file) + export_files.append(file_path) + + # Find export thumbnail files on disk + export_thumb_dir = os.path.join(CLIPS_DIR, "export") + thumb_files: list[str] = [] + if os.path.isdir(export_thumb_dir): + for file in os.listdir(export_thumb_dir): + if file.endswith(".jpg"): + file_path = os.path.join(export_thumb_dir, file) + thumb_files.append(file_path) + + result.files_checked = len(export_files) + len(thumb_files) + + # Find orphans + orphans: list[str] = [] + for file_path in export_files: + if file_path not in export_video_paths: + orphans.append(file_path) + for file_path in thumb_files: + if file_path not in export_thumb_paths: + orphans.append(file_path) + + result.orphans_found = len(orphans) + result.orphan_paths = orphans + + if len(orphans) == 0: + return result + + # Safety check + if ( + result.files_checked > 0 + and len(orphans) / result.files_checked > SAFETY_THRESHOLD + ): + if force: + logger.warning( + f"Exports sync: Would delete {len(orphans)}/{result.files_checked} " + f"({len(orphans) / result.files_checked * 100:.2f}%) files (force=True, bypassing safety threshold)." + ) + else: + logger.warning( + f"Exports sync: Would delete {len(orphans)}/{result.files_checked} " + f"({len(orphans) / result.files_checked * 100:.2f}%) files. " + "Aborting due to safety threshold." + ) + result.aborted = True + return result + + if dry_run: + logger.info(f"Exports sync (dry run): Found {len(orphans)} orphaned files") + return result + + # Delete orphans + logger.info(f"Deleting {len(orphans)} orphaned export files") + for file_path in orphans: + try: + os.unlink(file_path) + result.orphans_deleted += 1 + except OSError as e: + logger.error(f"Failed to delete {file_path}: {e}") + + except Exception as e: + logger.error(f"Error syncing exports: {e}") + result.error = str(e) + + return result + + +@dataclass +class MediaSyncResults: + """Combined results from all media sync operations.""" + + event_snapshots: SyncResult | None = None + event_thumbnails: SyncResult | None = None + review_thumbnails: SyncResult | None = None + previews: SyncResult | None = None + exports: SyncResult | None = None + recordings: SyncResult | None = None + + @property + def total_files_checked(self) -> int: + total = 0 + for result in [ + self.event_snapshots, + self.event_thumbnails, + self.review_thumbnails, + self.previews, + self.exports, + self.recordings, + ]: + if result: + total += result.files_checked + return total + + @property + def total_orphans_found(self) -> int: + total = 0 + for result in [ + self.event_snapshots, + self.event_thumbnails, + self.review_thumbnails, + self.previews, + self.exports, + self.recordings, + ]: + if result: + total += result.orphans_found + return total + + @property + def total_orphans_deleted(self) -> int: + total = 0 + for result in [ + self.event_snapshots, + self.event_thumbnails, + self.review_thumbnails, + self.previews, + self.exports, + self.recordings, + ]: + if result: + total += result.orphans_deleted + return total + + def to_dict(self) -> dict: + """Convert results to dictionary for API response.""" + results = {} + for name, result in [ + ("event_snapshots", self.event_snapshots), + ("event_thumbnails", self.event_thumbnails), + ("review_thumbnails", self.review_thumbnails), + ("previews", self.previews), + ("exports", self.exports), + ("recordings", self.recordings), + ]: + if result: + results[name] = { + "files_checked": result.files_checked, + "orphans_found": result.orphans_found, + "orphans_deleted": result.orphans_deleted, + "aborted": result.aborted, + "error": result.error, + } + results["totals"] = { + "files_checked": self.total_files_checked, + "orphans_found": self.total_orphans_found, + "orphans_deleted": self.total_orphans_deleted, + } + return results + + +def sync_all_media( + dry_run: bool = False, media_types: list[str] = ["all"], force: bool = False +) -> MediaSyncResults: + """Sync specified media types with the database. + + Args: + dry_run: If True, only report orphans without deleting them. + media_types: List of media types to sync. Can include: 'all', 'event_snapshots', + 'event_thumbnails', 'review_thumbnails', 'previews', 'exports', 'recordings' + force: If True, bypass safety threshold checks. + + Returns: + MediaSyncResults with details of each sync operation. + """ + logger.debug( + f"Starting media sync (dry_run={dry_run}, media_types={media_types}, force={force})" + ) + + results = MediaSyncResults() + + # Determine which media types to sync + sync_all = "all" in media_types + + if sync_all or "event_snapshots" in media_types: + results.event_snapshots = sync_event_snapshots(dry_run=dry_run, force=force) + + if sync_all or "event_thumbnails" in media_types: + results.event_thumbnails = sync_event_thumbnails(dry_run=dry_run, force=force) + + if sync_all or "review_thumbnails" in media_types: + results.review_thumbnails = sync_review_thumbnails(dry_run=dry_run, force=force) + + if sync_all or "previews" in media_types: + results.previews = sync_previews(dry_run=dry_run, force=force) + + if sync_all or "exports" in media_types: + results.exports = sync_exports(dry_run=dry_run, force=force) + + if sync_all or "recordings" in media_types: + results.recordings = sync_recordings(dry_run=dry_run, force=force) + + logger.info( + f"Media sync complete: checked {results.total_files_checked} files, " + f"found {results.total_orphans_found} orphans, " + f"deleted {results.total_orphans_deleted}" + ) + + return results diff --git a/frigate/util/services.py b/frigate/util/services.py index 64d83833d..19ec4efdf 100644 --- a/frigate/util/services.py +++ b/frigate/util/services.py @@ -417,12 +417,12 @@ def get_openvino_npu_stats() -> Optional[dict[str, str]]: else: usage = 0.0 - return {"npu": f"{round(usage, 2)}", "mem": "-"} + return {"npu": f"{round(usage, 2)}", "mem": "-%"} except (FileNotFoundError, PermissionError, ValueError): return None -def get_rockchip_gpu_stats() -> Optional[dict[str, str]]: +def get_rockchip_gpu_stats() -> Optional[dict[str, str | float]]: """Get GPU stats using rk.""" try: with open("/sys/kernel/debug/rkrga/load", "r") as f: @@ -440,7 +440,16 @@ def get_rockchip_gpu_stats() -> Optional[dict[str, str]]: return None average_load = f"{round(sum(load_values) / len(load_values), 2)}%" - return {"gpu": average_load, "mem": "-"} + stats: dict[str, str | float] = {"gpu": average_load, "mem": "-%"} + + try: + with open("/sys/class/thermal/thermal_zone5/temp", "r") as f: + line = f.readline().strip() + stats["temp"] = round(int(line) / 1000, 1) + except (FileNotFoundError, OSError, ValueError): + pass + + return stats def get_rockchip_npu_stats() -> Optional[dict[str, float | str]]: @@ -463,13 +472,25 @@ def get_rockchip_npu_stats() -> Optional[dict[str, float | str]]: percentages = [int(load) for load in core_loads] mean = round(sum(percentages) / len(percentages), 2) - return {"npu": mean, "mem": "-"} + stats: dict[str, float | str] = {"npu": mean, "mem": "-%"} + + try: + with open("/sys/class/thermal/thermal_zone6/temp", "r") as f: + line = f.readline().strip() + stats["temp"] = round(int(line) / 1000, 1) + except (FileNotFoundError, OSError, ValueError): + pass + + return stats -def try_get_info(f, h, default="N/A"): +def try_get_info(f, h, default="N/A", sensor=None): try: if h: - v = f(h) + if sensor is not None: + v = f(h, sensor) + else: + v = f(h) else: v = f() except nvml.NVMLError_NotSupported: @@ -498,6 +519,9 @@ def get_nvidia_gpu_stats() -> dict[int, dict]: util = try_get_info(nvml.nvmlDeviceGetUtilizationRates, handle) enc = try_get_info(nvml.nvmlDeviceGetEncoderUtilization, handle) dec = try_get_info(nvml.nvmlDeviceGetDecoderUtilization, handle) + temp = try_get_info( + nvml.nvmlDeviceGetTemperature, handle, default=None, sensor=0 + ) pstate = try_get_info(nvml.nvmlDeviceGetPowerState, handle, default=None) if util != "N/A": @@ -510,6 +534,11 @@ def get_nvidia_gpu_stats() -> dict[int, dict]: else: gpu_mem_util = -1 + if temp != "N/A" and temp is not None: + temp = float(temp) + else: + temp = None + if enc != "N/A": enc_util = enc[0] else: @@ -527,6 +556,7 @@ def get_nvidia_gpu_stats() -> dict[int, dict]: "enc": enc_util, "dec": dec_util, "pstate": pstate or "unknown", + "temp": temp, } except Exception: pass @@ -556,6 +586,53 @@ def get_jetson_stats() -> Optional[dict[int, dict]]: return results +def get_hailo_temps() -> dict[str, float]: + """Get temperatures for Hailo devices.""" + try: + from hailo_platform import Device + except ModuleNotFoundError: + return {} + + temps = {} + + try: + device_ids = Device.scan() + for i, device_id in enumerate(device_ids): + try: + with Device(device_id) as device: + temp_info = device.control.get_chip_temperature() + + # Get board name and normalise it + identity = device.control.identify() + board_name = None + for line in str(identity).split("\n"): + if line.startswith("Board Name:"): + board_name = ( + line.split(":", 1)[1].strip().lower().replace("-", "") + ) + break + + if not board_name: + board_name = f"hailo{i}" + + # Use indexed name if multiple devices, otherwise just the board name + device_name = ( + f"{board_name}-{i}" if len(device_ids) > 1 else board_name + ) + + # ts1_temperature is also available, but appeared to be the same as ts0 in testing. + temps[device_name] = round(temp_info.ts0_temperature, 1) + except Exception as e: + logger.debug( + f"Failed to get temperature for Hailo device {device_id}: {e}" + ) + continue + except Exception as e: + logger.debug(f"Failed to scan for Hailo devices: {e}") + + return temps + + def ffprobe_stream(ffmpeg, path: str, detailed: bool = False) -> sp.CompletedProcess: """Run ffprobe on stream.""" clean_path = escape_special_characters(path) @@ -591,12 +668,17 @@ def ffprobe_stream(ffmpeg, path: str, detailed: bool = False) -> sp.CompletedPro def vainfo_hwaccel(device_name: Optional[str] = None) -> sp.CompletedProcess: """Run vainfo.""" - ffprobe_cmd = ( - ["vainfo"] - if not device_name - else ["vainfo", "--display", "drm", "--device", f"/dev/dri/{device_name}"] - ) - return sp.run(ffprobe_cmd, capture_output=True) + if not device_name: + cmd = ["vainfo"] + else: + if os.path.isabs(device_name) and device_name.startswith("/dev/dri/"): + device_path = device_name + else: + device_path = f"/dev/dri/{device_name}" + + cmd = ["vainfo", "--display", "drm", "--device", device_path] + + return sp.run(cmd, capture_output=True) def get_nvidia_driver_info() -> dict[str, Any]: diff --git a/frigate/video.py b/frigate/video.py index 112844543..5e42619dd 100755 --- a/frigate/video.py +++ b/frigate/video.py @@ -3,6 +3,7 @@ import queue import subprocess as sp import threading import time +from collections import deque from datetime import datetime, timedelta, timezone from multiprocessing import Queue, Value from multiprocessing.synchronize import Event as MpEvent @@ -117,6 +118,7 @@ def capture_frames( frame_rate.start() skipped_eps = EventsPerSecond() skipped_eps.start() + config_subscriber = CameraConfigUpdateSubscriber( None, {config.name: config}, [CameraConfigUpdateEnum.enabled] ) @@ -181,6 +183,9 @@ class CameraWatchdog(threading.Thread): camera_fps, skipped_fps, ffmpeg_pid, + stalls, + reconnects, + detection_frame, stop_event, ): threading.Thread.__init__(self) @@ -201,6 +206,10 @@ class CameraWatchdog(threading.Thread): self.frame_index = 0 self.stop_event = stop_event self.sleeptime = self.config.ffmpeg.retry_interval + self.reconnect_timestamps = deque() + self.stalls = stalls + self.reconnects = reconnects + self.detection_frame = detection_frame self.config_subscriber = CameraConfigUpdateSubscriber( None, @@ -216,6 +225,35 @@ class CameraWatchdog(threading.Thread): self.latest_cache_segment_time: float = 0 self.record_enable_time: datetime | None = None + # Stall tracking (based on last processed frame) + self._stall_timestamps: deque[float] = deque() + self._stall_active: bool = False + + # Status caching to reduce message volume + self._last_detect_status: str | None = None + self._last_record_status: str | None = None + self._last_status_update_time: float = 0.0 + + def _send_detect_status(self, status: str, now: float) -> None: + """Send detect status only if changed or retry_interval has elapsed.""" + if ( + status != self._last_detect_status + or (now - self._last_status_update_time) >= self.sleeptime + ): + self.requestor.send_data(f"{self.config.name}/status/detect", status) + self._last_detect_status = status + self._last_status_update_time = now + + def _send_record_status(self, status: str, now: float) -> None: + """Send record status only if changed or retry_interval has elapsed.""" + if ( + status != self._last_record_status + or (now - self._last_status_update_time) >= self.sleeptime + ): + self.requestor.send_data(f"{self.config.name}/status/record", status) + self._last_record_status = status + self._last_status_update_time = now + def _update_enabled_state(self) -> bool: """Fetch the latest config and update enabled state.""" self.config_subscriber.check_for_updates() @@ -242,6 +280,14 @@ class CameraWatchdog(threading.Thread): else: self.ffmpeg_detect_process.wait() + # Update reconnects + now = datetime.now().timestamp() + self.reconnect_timestamps.append(now) + while self.reconnect_timestamps and self.reconnect_timestamps[0] < now - 3600: + self.reconnect_timestamps.popleft() + if self.reconnects: + self.reconnects.value = len(self.reconnect_timestamps) + # Wait for old capture thread to fully exit before starting a new one if self.capture_thread is not None and self.capture_thread.is_alive(): self.logger.info("Waiting for capture thread to exit...") @@ -267,7 +313,10 @@ class CameraWatchdog(threading.Thread): self.record_enable_time = datetime.now().astimezone(timezone.utc) time.sleep(self.sleeptime) - while not self.stop_event.wait(self.sleeptime): + last_restart_time = datetime.now().timestamp() + + # 1 second watchdog loop + while not self.stop_event.wait(1): enabled = self._update_enabled_state() if enabled != self.was_enabled: if enabled: @@ -285,12 +334,9 @@ class CameraWatchdog(threading.Thread): self.record_enable_time = None # update camera status - self.requestor.send_data( - f"{self.config.name}/status/detect", "disabled" - ) - self.requestor.send_data( - f"{self.config.name}/status/record", "disabled" - ) + now = datetime.now().timestamp() + self._send_detect_status("disabled", now) + self._send_record_status("disabled", now) self.was_enabled = enabled continue @@ -329,36 +375,44 @@ class CameraWatchdog(threading.Thread): now = datetime.now().timestamp() + # Check if enough time has passed to allow ffmpeg restart (backoff pacing) + time_since_last_restart = now - last_restart_time + can_restart = time_since_last_restart >= self.sleeptime + if not self.capture_thread.is_alive(): - self.requestor.send_data(f"{self.config.name}/status/detect", "offline") + self._send_detect_status("offline", now) self.camera_fps.value = 0 self.logger.error( f"Ffmpeg process crashed unexpectedly for {self.config.name}." ) - self.reset_capture_thread(terminate=False) + if can_restart: + self.reset_capture_thread(terminate=False) + last_restart_time = now elif self.camera_fps.value >= (self.config.detect.fps + 10): self.fps_overflow_count += 1 if self.fps_overflow_count == 3: - self.requestor.send_data( - f"{self.config.name}/status/detect", "offline" - ) + self._send_detect_status("offline", now) self.fps_overflow_count = 0 self.camera_fps.value = 0 self.logger.info( f"{self.config.name} exceeded fps limit. Exiting ffmpeg..." ) - self.reset_capture_thread(drain_output=False) + if can_restart: + self.reset_capture_thread(drain_output=False) + last_restart_time = now elif now - self.capture_thread.current_frame.value > 20: - self.requestor.send_data(f"{self.config.name}/status/detect", "offline") + self._send_detect_status("offline", now) self.camera_fps.value = 0 self.logger.info( f"No frames received from {self.config.name} in 20 seconds. Exiting ffmpeg..." ) - self.reset_capture_thread() + if can_restart: + self.reset_capture_thread() + last_restart_time = now else: # process is running normally - self.requestor.send_data(f"{self.config.name}/status/detect", "online") + self._send_detect_status("online", now) self.fps_overflow_count = 0 for p in self.ffmpeg_other_processes: @@ -441,9 +495,7 @@ class CameraWatchdog(threading.Thread): continue else: - self.requestor.send_data( - f"{self.config.name}/status/record", "online" - ) + self._send_record_status("online", now) p["latest_segment_time"] = self.latest_cache_segment_time if poll is None: @@ -459,6 +511,34 @@ class CameraWatchdog(threading.Thread): p["cmd"], self.logger, p["logpipe"], ffmpeg_process=p["process"] ) + # Update stall metrics based on last processed frame timestamp + now = datetime.now().timestamp() + processed_ts = ( + float(self.detection_frame.value) if self.detection_frame else 0.0 + ) + if processed_ts > 0: + delta = now - processed_ts + observed_fps = ( + self.camera_fps.value + if self.camera_fps.value > 0 + else self.config.detect.fps + ) + interval = 1.0 / max(observed_fps, 0.1) + stall_threshold = max(2.0 * interval, 2.0) + + if delta > stall_threshold: + if not self._stall_active: + self._stall_timestamps.append(now) + self._stall_active = True + else: + self._stall_active = False + + while self._stall_timestamps and self._stall_timestamps[0] < now - 3600: + self._stall_timestamps.popleft() + + if self.stalls: + self.stalls.value = len(self._stall_timestamps) + self.stop_all_ffmpeg() self.logpipe.close() self.config_subscriber.stop() @@ -596,6 +676,9 @@ class CameraCapture(FrigateProcess): self.camera_metrics.camera_fps, self.camera_metrics.skipped_fps, self.camera_metrics.ffmpeg_pid, + self.camera_metrics.stalls_last_hour, + self.camera_metrics.reconnects_last_hour, + self.camera_metrics.detection_frame, self.stop_event, ) camera_watchdog.start() diff --git a/migrations/033_create_export_case_table.py b/migrations/033_create_export_case_table.py new file mode 100644 index 000000000..08edcbc32 --- /dev/null +++ b/migrations/033_create_export_case_table.py @@ -0,0 +1,50 @@ +"""Peewee migrations -- 033_create_export_case_table.py. + +Some examples (model - class or model name):: + + > Model = migrator.orm['model_name'] # Return model in current state by name + + > migrator.sql(sql) # Run custom SQL + > migrator.python(func, *args, **kwargs) # Run python code + > migrator.create_model(Model) # Create a model (could be used as decorator) + > migrator.remove_model(model, cascade=True) # Remove a model + > migrator.add_fields(model, **fields) # Add fields to a model + > migrator.change_fields(model, **fields) # Change fields + > migrator.remove_fields(model, *field_names, cascade=True) + > migrator.rename_field(model, old_field_name, new_field_name) + > migrator.rename_table(model, new_table_name) + > migrator.add_index(model, *col_names, unique=False) + > migrator.drop_index(model, *col_names) + > migrator.add_not_null(model, *field_names) + > migrator.drop_not_null(model, *field_names) + > migrator.add_default(model, field_name, default) + +""" + +import peewee as pw + +SQL = pw.SQL + + +def migrate(migrator, database, fake=False, **kwargs): + migrator.sql( + """ + CREATE TABLE IF NOT EXISTS "exportcase" ( + "id" VARCHAR(30) NOT NULL PRIMARY KEY, + "name" VARCHAR(100) NOT NULL, + "description" TEXT NULL, + "created_at" DATETIME NOT NULL, + "updated_at" DATETIME NOT NULL + ) + """ + ) + migrator.sql( + 'CREATE INDEX IF NOT EXISTS "exportcase_name" ON "exportcase" ("name")' + ) + migrator.sql( + 'CREATE INDEX IF NOT EXISTS "exportcase_created_at" ON "exportcase" ("created_at")' + ) + + +def rollback(migrator, database, fake=False, **kwargs): + pass diff --git a/migrations/034_add_export_case_to_exports.py b/migrations/034_add_export_case_to_exports.py new file mode 100644 index 000000000..da9e1d4ac --- /dev/null +++ b/migrations/034_add_export_case_to_exports.py @@ -0,0 +1,40 @@ +"""Peewee migrations -- 034_add_export_case_to_exports.py. + +Some examples (model - class or model name):: + + > Model = migrator.orm['model_name'] # Return model in current state by name + + > migrator.sql(sql) # Run custom SQL + > migrator.python(func, *args, **kwargs) # Run python code + > migrator.create_model(Model) # Create a model (could be used as decorator) + > migrator.remove_model(model, cascade=True) # Remove a model + > migrator.add_fields(model, **fields) # Add fields to a model + > migrator.change_fields(model, **fields) # Change fields + > migrator.remove_fields(model, *field_names, cascade=True) + > migrator.rename_field(model, old_field_name, new_field_name) + > migrator.rename_table(model, new_table_name) + > migrator.add_index(model, *col_names, unique=False) + > migrator.drop_index(model, *col_names) + > migrator.add_not_null(model, *field_names) + > migrator.drop_not_null(model, *field_names) + > migrator.add_default(model, field_name, default) + +""" + +import peewee as pw + +SQL = pw.SQL + + +def migrate(migrator, database, fake=False, **kwargs): + # Add nullable export_case_id column to export table + migrator.sql('ALTER TABLE "export" ADD COLUMN "export_case_id" VARCHAR(30) NULL') + + # Index for faster case-based queries + migrator.sql( + 'CREATE INDEX IF NOT EXISTS "export_export_case_id" ON "export" ("export_case_id")' + ) + + +def rollback(migrator, database, fake=False, **kwargs): + pass diff --git a/web/public/locales/en/components/dialog.json b/web/public/locales/en/components/dialog.json index 91ff38d82..9a6f68daf 100644 --- a/web/public/locales/en/components/dialog.json +++ b/web/public/locales/en/components/dialog.json @@ -49,6 +49,10 @@ "name": { "placeholder": "Name the Export" }, + "case": { + "label": "Case", + "placeholder": "Select a case" + }, "select": "Select", "export": "Export", "selectOrExport": "Select or Export", diff --git a/web/public/locales/en/config/cameras.json b/web/public/locales/en/config/cameras.json index 67015bde5..d2c74dc54 100644 --- a/web/public/locales/en/config/cameras.json +++ b/web/public/locales/en/config/cameras.json @@ -324,9 +324,6 @@ "enabled": { "label": "Enable record on all cameras." }, - "sync_recordings": { - "label": "Sync recordings with disk on startup and once a day." - }, "expire_interval": { "label": "Number of minutes to wait between cleanup runs." }, @@ -758,4 +755,4 @@ "label": "Keep track of original state of camera." } } -} \ No newline at end of file +} diff --git a/web/public/locales/en/config/networking.json b/web/public/locales/en/config/networking.json index 0f8d9cc54..592ea9477 100644 --- a/web/public/locales/en/config/networking.json +++ b/web/public/locales/en/config/networking.json @@ -2,12 +2,23 @@ "label": "Networking configuration", "properties": { "ipv6": { - "label": "Network configuration", + "label": "IPv6 configuration", "properties": { "enabled": { "label": "Enable IPv6 for port 5000 and/or 8971" } } + }, + "listen": { + "label": "Listening ports configuration", + "properties": { + "internal": { + "label": "Internal listening port for Frigate" + }, + "external": { + "label": "External listening port for Frigate" + } + } } } -} \ No newline at end of file +} diff --git a/web/public/locales/en/config/record.json b/web/public/locales/en/config/record.json index 81139084e..0c4a5fc42 100644 --- a/web/public/locales/en/config/record.json +++ b/web/public/locales/en/config/record.json @@ -4,9 +4,6 @@ "enabled": { "label": "Enable record on all cameras." }, - "sync_recordings": { - "label": "Sync recordings with disk on startup and once a day." - }, "expire_interval": { "label": "Number of minutes to wait between cleanup runs." }, @@ -90,4 +87,4 @@ "label": "Keep track of original state of recording." } } -} \ No newline at end of file +} diff --git a/web/public/locales/en/views/exports.json b/web/public/locales/en/views/exports.json index 4a79d20e1..8f9e8205e 100644 --- a/web/public/locales/en/views/exports.json +++ b/web/public/locales/en/views/exports.json @@ -2,6 +2,10 @@ "documentTitle": "Export - Frigate", "search": "Search", "noExports": "No exports found", + "headings": { + "cases": "Cases", + "uncategorizedExports": "Uncategorized Exports" + }, "deleteExport": "Delete Export", "deleteExport.desc": "Are you sure you want to delete {{exportName}}?", "editExport": { @@ -13,11 +17,21 @@ "shareExport": "Share export", "downloadVideo": "Download video", "editName": "Edit name", - "deleteExport": "Delete export" + "deleteExport": "Delete export", + "assignToCase": "Add to case" }, "toast": { "error": { - "renameExportFailed": "Failed to rename export: {{errorMessage}}" + "renameExportFailed": "Failed to rename export: {{errorMessage}}", + "assignCaseFailed": "Failed to update case assignment: {{errorMessage}}" } + }, + "caseDialog": { + "title": "Add to case", + "description": "Choose an existing case or create a new one.", + "selectLabel": "Case", + "newCaseOption": "Create new case", + "nameLabel": "Case name", + "descriptionLabel": "Description" } } diff --git a/web/public/locales/en/views/settings.json b/web/public/locales/en/views/settings.json index ea2869986..a84c15619 100644 --- a/web/public/locales/en/views/settings.json +++ b/web/public/locales/en/views/settings.json @@ -1067,5 +1067,53 @@ "deleteTriggerFailed": "Failed to delete trigger: {{errorMessage}}" } } + }, + "maintenance": { + "title": "Maintenance", + "sync": { + "title": "Media Sync", + "desc": "Frigate will periodically clean up media on a regular schedule according to your retention configuration. It is normal to see a few orphaned files as Frigate runs. Use this feature to remove orphaned media files from disk that are no longer referenced in the database.", + "started": "Media sync started.", + "alreadyRunning": "A sync job is already running", + "error": "Failed to start sync", + "currentStatus": "Status", + "jobId": "Job ID", + "startTime": "Start Time", + "endTime": "End Time", + "statusLabel": "Status", + "results": "Results", + "errorLabel": "Error", + "mediaTypes": "Media Types", + "allMedia": "All Media", + "dryRun": "Dry Run", + "dryRunEnabled": "No files will be deleted", + "dryRunDisabled": "Files will be deleted", + "force": "Force", + "forceDesc": "Bypass safety threshold and complete sync even if more than 50% of the files would be deleted.", + "running": "Sync Running...", + "start": "Start Sync", + "inProgress": "Sync is in progress. This page is disabled.", + "status": { + "queued": "Queued", + "running": "Running", + "completed": "Completed", + "failed": "Failed", + "notRunning": "Not Running" + }, + "resultsFields": { + "filesChecked": "Files Checked", + "orphansFound": "Orphans Found", + "orphansDeleted": "Orphans Deleted", + "aborted": "Aborted. Deletion would exceed safety threshold.", + "error": "Error", + "totals": "Totals" + }, + "event_snapshots": "Tracked Object Snapshots", + "event_thumbnails": "Tracked Object Thumbnails", + "review_thumbnails": "Review Thumbnails", + "previews": "Previews", + "exports": "Exports", + "recordings": "Recordings" + } } } diff --git a/web/public/locales/en/views/system.json b/web/public/locales/en/views/system.json index da774e302..8ddbc03e1 100644 --- a/web/public/locales/en/views/system.json +++ b/web/public/locales/en/views/system.json @@ -51,6 +51,7 @@ "gpuMemory": "GPU Memory", "gpuEncoder": "GPU Encoder", "gpuDecoder": "GPU Decoder", + "gpuTemperature": "GPU Temperature", "gpuInfo": { "vainfoOutput": { "title": "Vainfo Output", @@ -77,6 +78,7 @@ }, "npuUsage": "NPU Usage", "npuMemory": "NPU Memory", + "npuTemperature": "NPU Temperature", "intelGpuWarning": { "title": "Intel GPU Stats Warning", "message": "GPU stats unavailable", @@ -158,6 +160,17 @@ "cameraDetectionsPerSecond": "{{camName}} detections per second", "cameraSkippedDetectionsPerSecond": "{{camName}} skipped detections per second" }, + "connectionQuality": { + "title": "Connection Quality", + "excellent": "Excellent", + "fair": "Fair", + "poor": "Poor", + "unusable": "Unusable", + "fps": "FPS", + "expectedFps": "Expected FPS", + "reconnectsLastHour": "Reconnects (last hour)", + "stallsLastHour": "Stalls (last hour)" + }, "toast": { "success": { "copyToClipboard": "Copied probe data to clipboard." diff --git a/web/src/api/ws.tsx b/web/src/api/ws.tsx index 44d45ea2f..6bb2fdc32 100644 --- a/web/src/api/ws.tsx +++ b/web/src/api/ws.tsx @@ -11,6 +11,7 @@ import { TrackedObjectUpdateReturnType, TriggerStatus, FrigateAudioDetections, + Job, } from "@/types/ws"; import { FrigateStats } from "@/types/stats"; import { createContainer } from "react-tracked"; @@ -651,3 +652,40 @@ export function useTriggers(): { payload: TriggerStatus } { : { name: "", camera: "", event_id: "", type: "", score: 0 }; return { payload: useDeepMemo(parsed) }; } + +export function useJobStatus( + jobType: string, + revalidateOnFocus: boolean = true, +): { payload: Job | null } { + const { + value: { payload }, + send: sendCommand, + } = useWs("job_state", "jobState"); + + const jobData = useDeepMemo( + payload && typeof payload === "string" ? JSON.parse(payload) : {}, + ); + const currentJob = jobData[jobType] || null; + + useEffect(() => { + let listener: (() => void) | undefined; + if (revalidateOnFocus) { + sendCommand("jobState"); + listener = () => { + if (document.visibilityState === "visible") { + sendCommand("jobState"); + } + }; + addEventListener("visibilitychange", listener); + } + + return () => { + if (listener) { + removeEventListener("visibilitychange", listener); + } + }; + // eslint-disable-next-line react-hooks/exhaustive-deps + }, [revalidateOnFocus]); + + return { payload: currentJob as Job | null }; +} diff --git a/web/src/components/auth/ProtectedRoute.tsx b/web/src/components/auth/ProtectedRoute.tsx index cedf5a15a..a7d1b3596 100644 --- a/web/src/components/auth/ProtectedRoute.tsx +++ b/web/src/components/auth/ProtectedRoute.tsx @@ -47,7 +47,7 @@ export default function ProtectedRoute({ return ; } - // Authenticated mode (8971): require login + // Authenticated mode (external port): require login if (!auth.user) { return ( diff --git a/web/src/components/camera/ConnectionQualityIndicator.tsx b/web/src/components/camera/ConnectionQualityIndicator.tsx new file mode 100644 index 000000000..3ea3c4f19 --- /dev/null +++ b/web/src/components/camera/ConnectionQualityIndicator.tsx @@ -0,0 +1,76 @@ +import { useTranslation } from "react-i18next"; +import { + Tooltip, + TooltipContent, + TooltipTrigger, +} from "@/components/ui/tooltip"; +import { cn } from "@/lib/utils"; + +type ConnectionQualityIndicatorProps = { + quality: "excellent" | "fair" | "poor" | "unusable"; + expectedFps: number; + reconnects: number; + stalls: number; +}; + +export function ConnectionQualityIndicator({ + quality, + expectedFps, + reconnects, + stalls, +}: ConnectionQualityIndicatorProps) { + const { t } = useTranslation(["views/system"]); + + const getColorClass = (quality: string): string => { + switch (quality) { + case "excellent": + return "bg-success"; + case "fair": + return "bg-yellow-500"; + case "poor": + return "bg-orange-500"; + case "unusable": + return "bg-destructive"; + default: + return "bg-gray-500"; + } + }; + + const qualityLabel = t(`cameras.connectionQuality.${quality}`); + + return ( + + +
+ + +
+
+ {t("cameras.connectionQuality.title")} +
+
+
{qualityLabel}
+
+
+ {t("cameras.connectionQuality.expectedFps")}:{" "} + {expectedFps.toFixed(1)} {t("cameras.connectionQuality.fps")} +
+
+ {t("cameras.connectionQuality.reconnectsLastHour")}:{" "} + {reconnects} +
+
+ {t("cameras.connectionQuality.stallsLastHour")}: {stalls} +
+
+
+
+
+ + ); +} diff --git a/web/src/components/card/ExportCard.tsx b/web/src/components/card/ExportCard.tsx index 021524532..c8d9c4c65 100644 --- a/web/src/components/card/ExportCard.tsx +++ b/web/src/components/card/ExportCard.tsx @@ -1,9 +1,8 @@ import ActivityIndicator from "../indicators/activity-indicator"; -import { LuTrash } from "react-icons/lu"; import { Button } from "../ui/button"; -import { useCallback, useState } from "react"; -import { isDesktop, isMobile } from "react-device-detect"; -import { FaDownload, FaPlay, FaShareAlt } from "react-icons/fa"; +import { useCallback, useMemo, useState } from "react"; +import { isMobile } from "react-device-detect"; +import { FiMoreVertical } from "react-icons/fi"; import { Skeleton } from "../ui/skeleton"; import { Dialog, @@ -14,35 +13,81 @@ import { } from "../ui/dialog"; import { Input } from "../ui/input"; import useKeyboardListener from "@/hooks/use-keyboard-listener"; -import { DeleteClipType, Export } from "@/types/export"; -import { MdEditSquare } from "react-icons/md"; +import { DeleteClipType, Export, ExportCase } from "@/types/export"; import { baseUrl } from "@/api/baseUrl"; import { cn } from "@/lib/utils"; import { shareOrCopy } from "@/utils/browserUtil"; import { useTranslation } from "react-i18next"; import { ImageShadowOverlay } from "../overlay/ImageShadowOverlay"; import BlurredIconButton from "../button/BlurredIconButton"; -import { Tooltip, TooltipContent, TooltipTrigger } from "../ui/tooltip"; import { useIsAdmin } from "@/hooks/use-is-admin"; +import { + DropdownMenu, + DropdownMenuContent, + DropdownMenuItem, + DropdownMenuTrigger, +} from "../ui/dropdown-menu"; +import { FaFolder } from "react-icons/fa"; -type ExportProps = { +type CaseCardProps = { + className: string; + exportCase: ExportCase; + exports: Export[]; + onSelect: () => void; +}; +export function CaseCard({ + className, + exportCase, + exports, + onSelect, +}: CaseCardProps) { + const firstExport = useMemo( + () => exports.find((exp) => exp.thumb_path && exp.thumb_path.length > 0), + [exports], + ); + + return ( +
onSelect()} + > + {firstExport && ( + + )} +
+
+ +
{exportCase.name}
+
+
+ ); +} + +type ExportCardProps = { className: string; exportedRecording: Export; onSelect: (selected: Export) => void; onRename: (original: string, update: string) => void; onDelete: ({ file, exportName }: DeleteClipType) => void; + onAssignToCase?: (selected: Export) => void; }; - -export default function ExportCard({ +export function ExportCard({ className, exportedRecording, onSelect, onRename, onDelete, -}: ExportProps) { + onAssignToCase, +}: ExportCardProps) { const { t } = useTranslation(["views/exports"]); const isAdmin = useIsAdmin(); - const [hovered, setHovered] = useState(false); const [loading, setLoading] = useState( exportedRecording.thumb_path.length > 0, ); @@ -136,12 +181,14 @@ export default function ExportCard({
setHovered(true) : undefined} - onMouseLeave={isDesktop ? () => setHovered(false) : undefined} - onClick={isDesktop ? undefined : () => setHovered(!hovered)} + onClick={() => { + if (!exportedRecording.in_progress) { + onSelect(exportedRecording); + } + }} > {exportedRecording.in_progress ? ( @@ -158,95 +205,88 @@ export default function ExportCard({ )} )} - {hovered && ( - <> -
-
-
- {!exportedRecording.in_progress && ( - - - - shareOrCopy( - `${baseUrl}export?id=${exportedRecording.id}`, - exportedRecording.name.replaceAll("_", " "), - ) - } - > - - - - {t("tooltip.shareExport")} - - )} - {!exportedRecording.in_progress && ( + {!exportedRecording.in_progress && ( +
+ + + e.stopPropagation()} + > + + + + + { + e.stopPropagation(); + shareOrCopy( + `${baseUrl}export?id=${exportedRecording.id}`, + exportedRecording.name.replaceAll("_", " "), + ); + }} + > + {t("tooltip.shareExport")} + + e.stopPropagation()} > - - - - - - - - {t("tooltip.downloadVideo")} - - + {t("tooltip.downloadVideo")} - )} - {isAdmin && !exportedRecording.in_progress && ( - - - - setEditName({ - original: exportedRecording.name, - update: undefined, - }) - } - > - - - - {t("tooltip.editName")} - + + {isAdmin && onAssignToCase && ( + { + e.stopPropagation(); + onAssignToCase(exportedRecording); + }} + > + {t("tooltip.assignToCase")} + )} {isAdmin && ( - - - - onDelete({ - file: exportedRecording.id, - exportName: exportedRecording.name, - }) - } - > - - - - {t("tooltip.deleteExport")} - + { + e.stopPropagation(); + setEditName({ + original: exportedRecording.name, + update: undefined, + }); + }} + > + {t("tooltip.editName")} + )} -
-
- - {!exportedRecording.in_progress && ( - - )} - + {isAdmin && ( + { + e.stopPropagation(); + onDelete({ + file: exportedRecording.id, + exportName: exportedRecording.name, + }); + }} + > + {t("tooltip.deleteExport")} + + )} + + +
)} {loading && ( diff --git a/web/src/components/filter/ExportFilterGroup.tsx b/web/src/components/filter/ExportFilterGroup.tsx new file mode 100644 index 000000000..c5fe4f33c --- /dev/null +++ b/web/src/components/filter/ExportFilterGroup.tsx @@ -0,0 +1,67 @@ +import { cn } from "@/lib/utils"; +import { + DEFAULT_EXPORT_FILTERS, + ExportFilter, + ExportFilters, +} from "@/types/export"; +import { CamerasFilterButton } from "./CamerasFilterButton"; +import { useAllowedCameras } from "@/hooks/use-allowed-cameras"; +import { useMemo } from "react"; +import { FrigateConfig } from "@/types/frigateConfig"; +import useSWR from "swr"; + +type ExportFilterGroupProps = { + className: string; + filters?: ExportFilters[]; + filter?: ExportFilter; + onUpdateFilter: (filter: ExportFilter) => void; +}; +export default function ExportFilterGroup({ + className, + filter, + filters = DEFAULT_EXPORT_FILTERS, + onUpdateFilter, +}: ExportFilterGroupProps) { + const { data: config } = useSWR("config", { + revalidateOnFocus: false, + }); + const allowedCameras = useAllowedCameras(); + + const filterValues = useMemo( + () => ({ + cameras: allowedCameras, + }), + [allowedCameras], + ); + + const groups = useMemo(() => { + if (!config) { + return []; + } + + return Object.entries(config.camera_groups).sort( + (a, b) => a[1].order - b[1].order, + ); + }, [config]); + + return ( +
+ {filters.includes("cameras") && ( + { + onUpdateFilter({ ...filter, cameras: newCameras }); + }} + /> + )} +
+ ); +} diff --git a/web/src/components/overlay/ExportDialog.tsx b/web/src/components/overlay/ExportDialog.tsx index b8b5b9911..738aa689e 100644 --- a/web/src/components/overlay/ExportDialog.tsx +++ b/web/src/components/overlay/ExportDialog.tsx @@ -22,7 +22,14 @@ import useSWR from "swr"; import { FrigateConfig } from "@/types/frigateConfig"; import { Popover, PopoverContent, PopoverTrigger } from "../ui/popover"; import { TimezoneAwareCalendar } from "./ReviewActivityCalendar"; -import { SelectSeparator } from "../ui/select"; +import { + Select, + SelectContent, + SelectItem, + SelectSeparator, + SelectTrigger, + SelectValue, +} from "../ui/select"; import { isDesktop, isIOS, isMobile } from "react-device-detect"; import { Drawer, DrawerContent, DrawerTrigger } from "../ui/drawer"; import SaveExportOverlay from "./SaveExportOverlay"; @@ -31,6 +38,7 @@ import { baseUrl } from "@/api/baseUrl"; import { cn } from "@/lib/utils"; import { GenericVideoPlayer } from "../player/GenericVideoPlayer"; import { useTranslation } from "react-i18next"; +import { ExportCase } from "@/types/export"; const EXPORT_OPTIONS = [ "1", @@ -67,6 +75,9 @@ export default function ExportDialog({ }: ExportDialogProps) { const { t } = useTranslation(["components/dialog"]); const [name, setName] = useState(""); + const [selectedCaseId, setSelectedCaseId] = useState( + undefined, + ); const onStartExport = useCallback(() => { if (!range) { @@ -89,6 +100,7 @@ export default function ExportDialog({ { playback: "realtime", name, + export_case_id: selectedCaseId || undefined, }, ) .then((response) => { @@ -102,6 +114,7 @@ export default function ExportDialog({ ), }); setName(""); + setSelectedCaseId(undefined); setRange(undefined); setMode("none"); } @@ -118,10 +131,11 @@ export default function ExportDialog({ { position: "top-center" }, ); }); - }, [camera, name, range, setRange, setName, setMode, t]); + }, [camera, name, range, selectedCaseId, setRange, setName, setMode, t]); const handleCancel = useCallback(() => { setName(""); + setSelectedCaseId(undefined); setMode("none"); setRange(undefined); }, [setMode, setRange]); @@ -190,8 +204,10 @@ export default function ExportDialog({ currentTime={currentTime} range={range} name={name} + selectedCaseId={selectedCaseId} onStartExport={onStartExport} setName={setName} + setSelectedCaseId={setSelectedCaseId} setRange={setRange} setMode={setMode} onCancel={handleCancel} @@ -207,8 +223,10 @@ type ExportContentProps = { currentTime: number; range?: TimeRange; name: string; + selectedCaseId?: string; onStartExport: () => void; setName: (name: string) => void; + setSelectedCaseId: (caseId: string | undefined) => void; setRange: (range: TimeRange | undefined) => void; setMode: (mode: ExportMode) => void; onCancel: () => void; @@ -218,14 +236,17 @@ export function ExportContent({ currentTime, range, name, + selectedCaseId, onStartExport, setName, + setSelectedCaseId, setRange, setMode, onCancel, }: ExportContentProps) { const { t } = useTranslation(["components/dialog"]); const [selectedOption, setSelectedOption] = useState("1"); + const { data: cases } = useSWR("cases"); const onSelectTime = useCallback( (option: ExportOption) => { @@ -320,6 +341,44 @@ export function ExportContent({ value={name} onChange={(e) => setName(e.target.value)} /> +
+ + +
{isDesktop && } ( + undefined, + ); const onStartExport = useCallback(() => { if (!range) { toast.error(t("toast.error.noValidTimeSelected"), { @@ -96,6 +99,7 @@ export default function MobileReviewSettingsDrawer({ { playback: "realtime", name, + export_case_id: selectedCaseId || undefined, }, ) .then((response) => { @@ -114,6 +118,7 @@ export default function MobileReviewSettingsDrawer({ }, ); setName(""); + setSelectedCaseId(undefined); setRange(undefined); setMode("none"); } @@ -133,7 +138,7 @@ export default function MobileReviewSettingsDrawer({ }, ); }); - }, [camera, name, range, setRange, setName, setMode, t]); + }, [camera, name, range, selectedCaseId, setRange, setName, setMode, t]); // filters @@ -200,8 +205,10 @@ export default function MobileReviewSettingsDrawer({ currentTime={currentTime} range={range} name={name} + selectedCaseId={selectedCaseId} onStartExport={onStartExport} setName={setName} + setSelectedCaseId={setSelectedCaseId} setRange={setRange} setMode={(mode) => { setMode(mode); @@ -213,6 +220,7 @@ export default function MobileReviewSettingsDrawer({ onCancel={() => { setMode("none"); setRange(undefined); + setSelectedCaseId(undefined); setDrawerMode("select"); }} /> diff --git a/web/src/components/overlay/dialog/OptionAndInputDialog.tsx b/web/src/components/overlay/dialog/OptionAndInputDialog.tsx new file mode 100644 index 000000000..cb6b23907 --- /dev/null +++ b/web/src/components/overlay/dialog/OptionAndInputDialog.tsx @@ -0,0 +1,166 @@ +import { Button } from "@/components/ui/button"; +import { + Dialog, + DialogContent, + DialogDescription, + DialogFooter, + DialogHeader, + DialogTitle, +} from "@/components/ui/dialog"; +import { Input } from "@/components/ui/input"; +import { + Select, + SelectContent, + SelectItem, + SelectTrigger, + SelectValue, +} from "@/components/ui/select"; +import { cn } from "@/lib/utils"; +import { isMobile } from "react-device-detect"; +import { useEffect, useMemo, useState } from "react"; +import { useTranslation } from "react-i18next"; + +type Option = { + value: string; + label: string; +}; + +type OptionAndInputDialogProps = { + open: boolean; + title: string; + description?: string; + options: Option[]; + newValueKey: string; + initialValue?: string; + nameLabel: string; + descriptionLabel: string; + setOpen: (open: boolean) => void; + onSave: (value: string) => void; + onCreateNew: (name: string, description: string) => void; +}; + +export default function OptionAndInputDialog({ + open, + title, + description, + options, + newValueKey, + initialValue, + nameLabel, + descriptionLabel, + setOpen, + onSave, + onCreateNew, +}: OptionAndInputDialogProps) { + const { t } = useTranslation("common"); + const firstOption = useMemo(() => options[0]?.value, [options]); + + const [selectedValue, setSelectedValue] = useState( + initialValue ?? firstOption, + ); + const [name, setName] = useState(""); + const [descriptionValue, setDescriptionValue] = useState(""); + + useEffect(() => { + if (open) { + setSelectedValue(initialValue ?? firstOption); + setName(""); + setDescriptionValue(""); + } + }, [open, initialValue, firstOption]); + + const isNew = selectedValue === newValueKey; + const disableSave = !selectedValue || (isNew && name.trim().length === 0); + + const handleSave = () => { + if (!selectedValue) { + return; + } + + const trimmedName = name.trim(); + const trimmedDescription = descriptionValue.trim(); + + if (isNew) { + onCreateNew(trimmedName, trimmedDescription); + } else { + onSave(selectedValue); + } + setOpen(false); + }; + + return ( + + { + if (isMobile) { + e.preventDefault(); + } + }} + > + + {title} + {description && {description}} + + +
+ +
+ + {isNew && ( +
+
+ + setName(e.target.value)} /> +
+
+ + setDescriptionValue(e.target.value)} + /> +
+
+ )} + + + + + +
+
+ ); +} diff --git a/web/src/components/player/JSMpegPlayer.tsx b/web/src/components/player/JSMpegPlayer.tsx index f85535013..c522ff0a8 100644 --- a/web/src/components/player/JSMpegPlayer.tsx +++ b/web/src/components/player/JSMpegPlayer.tsx @@ -118,6 +118,8 @@ export default function JSMpegPlayer({ const videoWrapper = videoRef.current; const canvas = canvasRef.current; let videoElement: JSMpeg.VideoElement | null = null; + let socket: WebSocket | null = null; + let socketMessageHandler: ((event: MessageEvent) => void) | null = null; let frameCount = 0; @@ -152,12 +154,14 @@ export default function JSMpegPlayer({ videoElement.player.source && videoElement.player.source.socket ) { - const socket = videoElement.player.source.socket; - socket.addEventListener("message", (event: MessageEvent) => { + socket = videoElement.player.source.socket as WebSocket; + socketMessageHandler = (event: MessageEvent) => { if (event.data instanceof ArrayBuffer) { bytesReceivedRef.current += event.data.byteLength; } - }); + }; + + socket.addEventListener("message", socketMessageHandler); } // Update stats every second @@ -197,11 +201,23 @@ export default function JSMpegPlayer({ } if (videoElement) { try { - // this causes issues in react strict mode - // https://stackoverflow.com/questions/76822128/issue-with-cycjimmy-jsmpeg-player-in-react-18-cannot-read-properties-of-null-o - videoElement.destroy(); + videoElement.player?.destroy(); // eslint-disable-next-line no-empty } catch (e) {} + + if (videoWrapper) { + videoWrapper.innerHTML = ""; + // @ts-expect-error playerInstance is set by jsmpeg + videoWrapper.playerInstance = null; + } + } + if (socket) { + if (socketMessageHandler) { + socket.removeEventListener("message", socketMessageHandler); + } + + socket = null; + socketMessageHandler = null; } }; } diff --git a/web/src/components/player/LivePlayer.tsx b/web/src/components/player/LivePlayer.tsx index dbbc289c5..f48a7d475 100644 --- a/web/src/components/player/LivePlayer.tsx +++ b/web/src/components/player/LivePlayer.tsx @@ -82,6 +82,11 @@ export default function LivePlayer({ const internalContainerRef = useRef(null); const cameraName = useCameraFriendlyName(cameraConfig); + + // player is showing on a dashboard if containerRef is not provided + + const inDashboard = containerRef?.current == null; + // stats const [stats, setStats] = useState({ @@ -416,6 +421,28 @@ export default function LivePlayer({ />
+ {offline && inDashboard && ( + <> +
+
+
+
{t("streamOffline.title")}
+ +

+ + streamOffline.desc + +

+
+
+ + )} + {offline && !showStillWithoutActivity && cameraEnabled && (
diff --git a/web/src/hooks/use-allowed-cameras.ts b/web/src/hooks/use-allowed-cameras.ts index 9eae59fc2..05941922a 100644 --- a/web/src/hooks/use-allowed-cameras.ts +++ b/web/src/hooks/use-allowed-cameras.ts @@ -12,7 +12,7 @@ export function useAllowedCameras() { if ( auth.user?.role === "viewer" || auth.user?.role === "admin" || - !auth.isAuthenticated // anonymous port 5000 + !auth.isAuthenticated // anonymous internal port ) { // return all cameras return config?.cameras ? Object.keys(config.cameras) : []; diff --git a/web/src/pages/Exports.tsx b/web/src/pages/Exports.tsx index 26a75801a..5b05439c6 100644 --- a/web/src/pages/Exports.tsx +++ b/web/src/pages/Exports.tsx @@ -1,5 +1,5 @@ import { baseUrl } from "@/api/baseUrl"; -import ExportCard from "@/components/card/ExportCard"; +import { CaseCard, ExportCard } from "@/components/card/ExportCard"; import { AlertDialog, AlertDialogCancel, @@ -11,64 +11,144 @@ import { } from "@/components/ui/alert-dialog"; import { Button } from "@/components/ui/button"; import { Dialog, DialogContent, DialogTitle } from "@/components/ui/dialog"; +import Heading from "@/components/ui/heading"; import { Input } from "@/components/ui/input"; import { Toaster } from "@/components/ui/sonner"; import useKeyboardListener from "@/hooks/use-keyboard-listener"; import { useSearchEffect } from "@/hooks/use-overlay-state"; +import { useHistoryBack } from "@/hooks/use-history-back"; +import { useApiFilterArgs } from "@/hooks/use-api-filter"; import { cn } from "@/lib/utils"; -import { DeleteClipType, Export } from "@/types/export"; +import { + DeleteClipType, + Export, + ExportCase, + ExportFilter, +} from "@/types/export"; +import OptionAndInputDialog from "@/components/overlay/dialog/OptionAndInputDialog"; import axios from "axios"; -import { useCallback, useEffect, useMemo, useRef, useState } from "react"; -import { isMobile } from "react-device-detect"; +import { + MutableRefObject, + useCallback, + useEffect, + useMemo, + useRef, + useState, +} from "react"; +import { isMobile, isMobileOnly } from "react-device-detect"; import { useTranslation } from "react-i18next"; import { LuFolderX } from "react-icons/lu"; import { toast } from "sonner"; import useSWR from "swr"; +import ExportFilterGroup from "@/components/filter/ExportFilterGroup"; + +// always parse these as string arrays +const EXPORT_FILTER_ARRAY_KEYS = ["cameras"]; function Exports() { const { t } = useTranslation(["views/exports"]); - const { data: exports, mutate } = useSWR("exports"); useEffect(() => { document.title = t("documentTitle"); }, [t]); + // Filters + + const [exportFilter, setExportFilter, exportSearchParams] = + useApiFilterArgs(EXPORT_FILTER_ARRAY_KEYS); + + // Data + + const { data: cases, mutate: updateCases } = useSWR("cases"); + const { data: rawExports, mutate: updateExports } = useSWR( + exportSearchParams && Object.keys(exportSearchParams).length > 0 + ? ["exports", exportSearchParams] + : "exports", + ); + + const exportsByCase = useMemo<{ [caseId: string]: Export[] }>(() => { + const grouped: { [caseId: string]: Export[] } = {}; + (rawExports ?? []).forEach((exp) => { + const caseId = exp.export_case || "none"; + if (!grouped[caseId]) { + grouped[caseId] = []; + } + + grouped[caseId].push(exp); + }); + return grouped; + }, [rawExports]); + + const filteredCases = useMemo(() => { + if (!cases) { + return []; + } + + return cases.filter((caseItem) => { + const caseExports = exportsByCase[caseItem.id]; + return caseExports?.length; + }); + }, [cases, exportsByCase]); + + const exports = useMemo( + () => exportsByCase["none"] || [], + [exportsByCase], + ); + + const mutate = useCallback(() => { + updateExports(); + updateCases(); + }, [updateExports, updateCases]); + // Search const [search, setSearch] = useState(""); - const filteredExports = useMemo(() => { - if (!search || !exports) { - return exports; - } - - return exports.filter((exp) => - exp.name - .toLowerCase() - .replaceAll("_", " ") - .includes(search.toLowerCase()), - ); - }, [exports, search]); - // Viewing const [selected, setSelected] = useState(); + const [selectedCaseId, setSelectedCaseId] = useState( + undefined, + ); const [selectedAspect, setSelectedAspect] = useState(0.0); + // Handle browser back button to deselect case before navigating away + useHistoryBack({ + enabled: true, + open: selectedCaseId !== undefined, + onClose: () => setSelectedCaseId(undefined), + }); + useSearchEffect("id", (id) => { - if (!exports) { + if (!rawExports) { return false; } - setSelected(exports.find((exp) => exp.id == id)); + setSelected(rawExports.find((exp) => exp.id == id)); return true; }); - // Deleting + useSearchEffect("caseId", (caseId: string) => { + if (!filteredCases) { + return false; + } + + const exists = filteredCases.some((c) => c.id === caseId); + + if (!exists) { + return false; + } + + setSelectedCaseId(caseId); + return true; + }); + + // Modifying const [deleteClip, setDeleteClip] = useState(); + const [exportToAssign, setExportToAssign] = useState(); const onHandleDelete = useCallback(() => { if (!deleteClip) { @@ -83,8 +163,6 @@ function Exports() { }); }, [deleteClip, mutate]); - // Renaming - const onHandleRename = useCallback( (id: string, update: string) => { axios @@ -107,7 +185,7 @@ function Exports() { }); }); }, - [mutate, t], + [mutate, setDeleteClip, t], ); // Keyboard Listener @@ -115,10 +193,27 @@ function Exports() { const contentRef = useRef(null); useKeyboardListener([], undefined, contentRef); + const selectedCase = useMemo( + () => filteredCases?.find((c) => c.id === selectedCaseId), + [filteredCases, selectedCaseId], + ); + + const resetCaseDialog = useCallback(() => { + setExportToAssign(undefined); + }, []); + return (
+ + setDeleteClip(undefined)} @@ -187,47 +282,364 @@ function Exports() { - {exports && ( -
+
+
setSearch(e.target.value)} />
- )} + +
-
- {exports && filteredExports && filteredExports.length > 0 ? ( -
- {Object.values(exports).map((item) => ( - - setDeleteClip({ file, exportName }) - } - /> - ))} -
- ) : exports !== undefined ? ( -
- - {t("noExports")} -
- ) : null} + {selectedCase ? ( + + ) : ( + + )} +
+ ); +} + +type AllExportsViewProps = { + contentRef: MutableRefObject; + search: string; + cases?: ExportCase[]; + exports: Export[]; + exportsByCase: { [caseId: string]: Export[] }; + setSelectedCaseId: (id: string) => void; + setSelected: (e: Export) => void; + renameClip: (id: string, update: string) => void; + setDeleteClip: (d: DeleteClipType | undefined) => void; + onAssignToCase: (e: Export) => void; +}; +function AllExportsView({ + contentRef, + search, + cases, + exports, + exportsByCase, + setSelectedCaseId, + setSelected, + renameClip, + setDeleteClip, + onAssignToCase, +}: AllExportsViewProps) { + const { t } = useTranslation(["views/exports"]); + + // Filter + + const filteredCases = useMemo(() => { + if (!search || !cases) { + return cases || []; + } + + return cases.filter( + (caseItem) => + caseItem.name.toLowerCase().includes(search.toLowerCase()) || + (caseItem.description && + caseItem.description.toLowerCase().includes(search.toLowerCase())), + ); + }, [search, cases]); + + const filteredExports = useMemo(() => { + if (!search) { + return exports; + } + + return exports.filter((exp) => + exp.name + .toLowerCase() + .replaceAll("_", " ") + .includes(search.toLowerCase()), + ); + }, [exports, search]); + + return ( +
+ {filteredCases?.length || filteredExports.length ? ( +
+ {filteredCases.length > 0 && ( +
+ {t("headings.cases")} +
+ {cases?.map((item) => ( + { + setSelectedCaseId(item.id); + }} + /> + ))} +
+
+ )} + + {filteredExports.length > 0 && ( +
+ {t("headings.uncategorizedExports")} +
+ {exports.map((item) => ( + + setDeleteClip({ file, exportName }) + } + onAssignToCase={onAssignToCase} + /> + ))} +
+
+ )} +
+ ) : ( +
+ + {t("noExports")} +
+ )} +
+ ); +} + +type CaseViewProps = { + contentRef: MutableRefObject; + selectedCase: ExportCase; + exports?: Export[]; + search: string; + setSelected: (e: Export) => void; + renameClip: (id: string, update: string) => void; + setDeleteClip: (d: DeleteClipType | undefined) => void; + onAssignToCase: (e: Export) => void; +}; +function CaseView({ + contentRef, + selectedCase, + exports, + search, + setSelected, + renameClip, + setDeleteClip, + onAssignToCase, +}: CaseViewProps) { + const filteredExports = useMemo(() => { + const caseExports = (exports || []).filter( + (e) => e.export_case == selectedCase.id, + ); + + if (!search) { + return caseExports; + } + + return caseExports.filter((exp) => + exp.name + .toLowerCase() + .replaceAll("_", " ") + .includes(search.toLowerCase()), + ); + }, [selectedCase, exports, search]); + + return ( +
+
+ + {selectedCase.name} + +
+ {selectedCase.description} +
+
+
+ {exports?.map((item) => ( + + setDeleteClip({ file, exportName }) + } + onAssignToCase={onAssignToCase} + /> + ))}
); } -export default Exports; +type CaseAssignmentDialogProps = { + exportToAssign?: Export; + cases?: ExportCase[]; + selectedCaseId?: string; + onClose: () => void; + mutate: () => void; +}; +function CaseAssignmentDialog({ + exportToAssign, + cases, + selectedCaseId, + onClose, + mutate, +}: CaseAssignmentDialogProps) { + const { t } = useTranslation(["views/exports"]); + const caseOptions = useMemo( + () => [ + ...(cases ?? []) + .map((c) => ({ + value: c.id, + label: c.name, + })) + .sort((cA, cB) => cA.label.localeCompare(cB.label)), + { + value: "new", + label: t("caseDialog.newCaseOption"), + }, + ], + [cases, t], + ); + + const handleSave = useCallback( + async (caseId: string) => { + if (!exportToAssign) return; + + try { + await axios.patch(`export/${exportToAssign.id}/case`, { + export_case_id: caseId, + }); + mutate(); + onClose(); + } catch (error: unknown) { + const apiError = error as { + response?: { data?: { message?: string; detail?: string } }; + }; + const errorMessage = + apiError.response?.data?.message || + apiError.response?.data?.detail || + "Unknown error"; + toast.error(t("toast.error.assignCaseFailed", { errorMessage }), { + position: "top-center", + }); + } + }, + [exportToAssign, mutate, onClose, t], + ); + + const handleCreateNew = useCallback( + async (name: string, description: string) => { + if (!exportToAssign) return; + + try { + const createResp = await axios.post("cases", { + name, + description, + }); + + const newCaseId: string | undefined = createResp.data?.id; + + if (newCaseId) { + await axios.patch(`export/${exportToAssign.id}/case`, { + export_case_id: newCaseId, + }); + } + + mutate(); + onClose(); + } catch (error: unknown) { + const apiError = error as { + response?: { data?: { message?: string; detail?: string } }; + }; + const errorMessage = + apiError.response?.data?.message || + apiError.response?.data?.detail || + "Unknown error"; + toast.error(t("toast.error.assignCaseFailed", { errorMessage }), { + position: "top-center", + }); + } + }, + [exportToAssign, mutate, onClose, t], + ); + + if (!exportToAssign) { + return null; + } + + return ( + { + if (!open) { + onClose(); + } + }} + options={caseOptions} + nameLabel={t("caseDialog.nameLabel")} + descriptionLabel={t("caseDialog.descriptionLabel")} + initialValue={selectedCaseId} + newValueKey="new" + onSave={handleSave} + onCreateNew={handleCreateNew} + /> + ); +} + +export default Exports; \ No newline at end of file diff --git a/web/src/pages/Settings.tsx b/web/src/pages/Settings.tsx index 1d44125cb..50b72ab80 100644 --- a/web/src/pages/Settings.tsx +++ b/web/src/pages/Settings.tsx @@ -36,6 +36,7 @@ import NotificationView from "@/views/settings/NotificationsSettingsView"; import EnrichmentsSettingsView from "@/views/settings/EnrichmentsSettingsView"; import UiSettingsView from "@/views/settings/UiSettingsView"; import FrigatePlusSettingsView from "@/views/settings/FrigatePlusSettingsView"; +import MaintenanceSettingsView from "@/views/settings/MaintenanceSettingsView"; import { useSearchEffect } from "@/hooks/use-overlay-state"; import { useNavigate, useSearchParams } from "react-router-dom"; import { useInitialCameraState } from "@/api/ws"; @@ -81,6 +82,7 @@ const allSettingsViews = [ "roles", "notifications", "frigateplus", + "maintenance", ] as const; type SettingsType = (typeof allSettingsViews)[number]; @@ -120,6 +122,10 @@ const settingsGroups = [ label: "frigateplus", items: [{ key: "frigateplus", component: FrigatePlusSettingsView }], }, + { + label: "maintenance", + items: [{ key: "maintenance", component: MaintenanceSettingsView }], + }, ]; const CAMERA_SELECT_BUTTON_PAGES = [ diff --git a/web/src/types/export.ts b/web/src/types/export.ts index fc62bbeec..c606855f2 100644 --- a/web/src/types/export.ts +++ b/web/src/types/export.ts @@ -6,9 +6,28 @@ export type Export = { video_path: string; thumb_path: string; in_progress: boolean; + export_case?: string; +}; + +export type ExportCase = { + id: string; + name: string; + description: string; + created_at: number; + updated_at: number; }; export type DeleteClipType = { file: string; exportName: string; }; + +// filtering + +const EXPORT_FILTERS = ["cameras"] as const; +export type ExportFilters = (typeof EXPORT_FILTERS)[number]; +export const DEFAULT_EXPORT_FILTERS: ExportFilters[] = ["cameras"]; + +export type ExportFilter = { + cameras?: string[]; +}; diff --git a/web/src/types/frigateConfig.ts b/web/src/types/frigateConfig.ts index 94c9ba6e9..7c69ef808 100644 --- a/web/src/types/frigateConfig.ts +++ b/web/src/types/frigateConfig.ts @@ -197,7 +197,6 @@ export interface CameraConfig { days: number; mode: string; }; - sync_recordings: boolean; }; review: { alerts: { @@ -542,7 +541,6 @@ export interface FrigateConfig { days: number; mode: string; }; - sync_recordings: boolean; }; rtmp: { diff --git a/web/src/types/stats.ts b/web/src/types/stats.ts index c98ebe80f..8b22849be 100644 --- a/web/src/types/stats.ts +++ b/web/src/types/stats.ts @@ -24,6 +24,10 @@ export type CameraStats = { pid: number; process_fps: number; skipped_fps: number; + connection_quality: "excellent" | "fair" | "poor" | "unusable"; + expected_fps: number; + reconnects_last_hour: number; + stalls_last_hour: number; }; export type CpuStats = { @@ -37,6 +41,7 @@ export type DetectorStats = { detection_start: number; inference_speed: number; pid: number; + temperature?: number; }; export type EmbeddingsStats = { @@ -56,11 +61,13 @@ export type GpuStats = { enc?: string; dec?: string; pstate?: string; + temp?: number; }; export type NpuStats = { npu: number; mem: string; + temp?: number; }; export type GpuInfo = "vainfo" | "nvinfo"; @@ -68,7 +75,6 @@ export type GpuInfo = "vainfo" | "nvinfo"; export type ServiceStats = { last_updated: number; storage: { [path: string]: StorageStats }; - temperatures: { [apex: string]: number }; uptime: number; latest_version: string; version: string; diff --git a/web/src/types/ws.ts b/web/src/types/ws.ts index 1d98b7b01..6e22345eb 100644 --- a/web/src/types/ws.ts +++ b/web/src/types/ws.ts @@ -126,3 +126,32 @@ export type TriggerStatus = { type: string; score: number; }; + +export type MediaSyncStats = { + files_checked: number; + orphans_found: number; + orphans_deleted: number; + aborted: boolean; + error: string | null; +}; + +export type MediaSyncTotals = { + files_checked: number; + orphans_found: number; + orphans_deleted: number; +}; + +export type MediaSyncResults = { + [mediaType: string]: MediaSyncStats | MediaSyncTotals; + totals: MediaSyncTotals; +}; + +export type Job = { + id: string; + job_type: string; + status: string; + results?: MediaSyncResults; + start_time?: number; + end_time?: number; + error_message?: string; +}; diff --git a/web/src/views/settings/MaintenanceSettingsView.tsx b/web/src/views/settings/MaintenanceSettingsView.tsx new file mode 100644 index 000000000..f2d1bad30 --- /dev/null +++ b/web/src/views/settings/MaintenanceSettingsView.tsx @@ -0,0 +1,442 @@ +import Heading from "@/components/ui/heading"; +import { Button } from "@/components/ui/button"; +import { Label } from "@/components/ui/label"; +import { Separator } from "@/components/ui/separator"; +import { Toaster } from "@/components/ui/sonner"; +import ActivityIndicator from "@/components/indicators/activity-indicator"; +import { useCallback, useState } from "react"; +import { useTranslation } from "react-i18next"; +import axios from "axios"; +import { toast } from "sonner"; +import { useJobStatus } from "@/api/ws"; +import { Switch } from "@/components/ui/switch"; +import { LuCheck, LuX } from "react-icons/lu"; +import { cn } from "@/lib/utils"; +import { formatUnixTimestampToDateTime } from "@/utils/dateUtil"; +import { MediaSyncStats } from "@/types/ws"; + +export default function MaintenanceSettingsView() { + const { t } = useTranslation("views/settings"); + const [selectedMediaTypes, setSelectedMediaTypes] = useState([ + "all", + ]); + const [dryRun, setDryRun] = useState(true); + const [force, setForce] = useState(false); + const [isSubmitting, setIsSubmitting] = useState(false); + + const MEDIA_TYPES = [ + { id: "event_snapshots", label: t("maintenance.sync.event_snapshots") }, + { id: "event_thumbnails", label: t("maintenance.sync.event_thumbnails") }, + { id: "review_thumbnails", label: t("maintenance.sync.review_thumbnails") }, + { id: "previews", label: t("maintenance.sync.previews") }, + { id: "exports", label: t("maintenance.sync.exports") }, + { id: "recordings", label: t("maintenance.sync.recordings") }, + ]; + + // Subscribe to media sync status via WebSocket + const { payload: currentJob } = useJobStatus("media_sync"); + + const isJobRunning = Boolean( + currentJob && + (currentJob.status === "queued" || currentJob.status === "running"), + ); + + const handleMediaTypeChange = useCallback((id: string, checked: boolean) => { + setSelectedMediaTypes((prev) => { + if (id === "all") { + return checked ? ["all"] : []; + } + + let next = prev.filter((t) => t !== "all"); + if (checked) { + next.push(id); + } else { + next = next.filter((t) => t !== id); + } + return next.length === 0 ? ["all"] : next; + }); + }, []); + + const handleStartSync = useCallback(async () => { + setIsSubmitting(true); + + try { + const response = await axios.post( + "/media/sync", + { + dry_run: dryRun, + media_types: selectedMediaTypes, + force: force, + }, + { + headers: { + "Content-Type": "application/json", + }, + }, + ); + + if (response.status === 202) { + toast.success(t("maintenance.sync.started"), { + position: "top-center", + closeButton: true, + }); + } else if (response.status === 409) { + toast.error(t("maintenance.sync.alreadyRunning"), { + position: "top-center", + closeButton: true, + }); + } + } catch { + toast.error(t("maintenance.sync.error"), { + position: "top-center", + closeButton: true, + }); + } finally { + setIsSubmitting(false); + } + }, [selectedMediaTypes, dryRun, force, t]); + + return ( + <> +
+ +
+
+
+ + {t("maintenance.sync.title")} + + +
+
+

{t("maintenance.sync.desc")}

+
+
+ +
+ {/* Media Types Selection */} +
+ +
+
+ + + handleMediaTypeChange("all", checked) + } + disabled={isJobRunning} + /> +
+
+ {MEDIA_TYPES.map((type) => ( +
+ + + handleMediaTypeChange(type.id, checked) + } + disabled={ + isJobRunning || selectedMediaTypes.includes("all") + } + /> +
+ ))} +
+
+
+ + {/* Options */} +
+
+
+ +
+ +

+ {dryRun + ? t("maintenance.sync.dryRunEnabled") + : t("maintenance.sync.dryRunDisabled")} +

+
+
+
+ +
+
+ +
+ +

+ {t("maintenance.sync.forceDesc")} +

+
+
+
+
+ + {/* Action Buttons */} +
+ +
+
+
+ +
+
+ +
+ + {t("maintenance.sync.currentStatus")} + +
+ {currentJob?.status === "success" && ( + + )} + {currentJob?.status === "failed" && ( + + )} + {(currentJob?.status === "running" || + currentJob?.status === "queued") && ( + + )} + {t( + `maintenance.sync.status.${currentJob?.status ?? "notRunning"}`, + )} +
+
+ + {/* Current Job Status */} +
+ {currentJob?.start_time && ( +
+ + {t("maintenance.sync.startTime")}: + + + {formatUnixTimestampToDateTime( + currentJob?.start_time ?? "-", + )} + +
+ )} + {currentJob?.end_time && ( +
+ + {t("maintenance.sync.endTime")}: + + + {formatUnixTimestampToDateTime(currentJob?.end_time)} + +
+ )} + {currentJob?.results && ( +
+

+ {t("maintenance.sync.results")} +

+
+ {/* Individual media type results */} +
+ {Object.entries(currentJob.results) + .filter(([key]) => key !== "totals") + .map(([mediaType, stats]) => { + const mediaStats = stats as MediaSyncStats; + return ( +
+

+ {t(`maintenance.sync.${mediaType}`)} +

+
+
+ + {t( + "maintenance.sync.resultsFields.filesChecked", + )} + + {mediaStats.files_checked} +
+
+ + {t( + "maintenance.sync.resultsFields.orphansFound", + )} + + 0 + ? "text-yellow-500" + : "" + } + > + {mediaStats.orphans_found} + +
+
+ + {t( + "maintenance.sync.resultsFields.orphansDeleted", + )} + + 0 && + "text-success", + mediaStats.orphans_deleted === 0 && + mediaStats.aborted && + "text-destructive", + )} + > + {mediaStats.orphans_deleted} + +
+ {mediaStats.aborted && ( +
+ + + {t( + "maintenance.sync.resultsFields.aborted", + )} +
+ )} + {mediaStats.error && ( +
+ {t( + "maintenance.sync.resultsFields.error", + )} + {": "} + {mediaStats.error} +
+ )} +
+
+ ); + })} +
+ {/* Totals */} + {currentJob.results.totals && ( +
+

+ {t("maintenance.sync.resultsFields.totals")} +

+
+
+ + {t( + "maintenance.sync.resultsFields.filesChecked", + )} + + + {currentJob.results.totals.files_checked} + +
+
+ + {t( + "maintenance.sync.resultsFields.orphansFound", + )} + + 0 + ? "font-medium text-yellow-500" + : "font-medium" + } + > + {currentJob.results.totals.orphans_found} + +
+
+ + {t( + "maintenance.sync.resultsFields.orphansDeleted", + )} + + + 0 + ? "text-success" + : "text-muted-foreground", + )} + > + {currentJob.results.totals.orphans_deleted} + +
+
+
+ )} +
+
+ )} + {currentJob?.error_message && ( +
+

+ {t("maintenance.sync.errorLabel")} +

+

{currentJob?.error_message}

+
+ )} +
+
+
+
+
+
+ + ); +} diff --git a/web/src/views/system/CameraMetrics.tsx b/web/src/views/system/CameraMetrics.tsx index 6e24ef5d0..b6c5be4fa 100644 --- a/web/src/views/system/CameraMetrics.tsx +++ b/web/src/views/system/CameraMetrics.tsx @@ -1,6 +1,7 @@ import { useFrigateStats } from "@/api/ws"; import { CameraLineGraph } from "@/components/graph/LineGraph"; import CameraInfoDialog from "@/components/overlay/CameraInfoDialog"; +import { ConnectionQualityIndicator } from "@/components/camera/ConnectionQualityIndicator"; import { Skeleton } from "@/components/ui/skeleton"; import { FrigateConfig } from "@/types/frigateConfig"; import { FrigateStats } from "@/types/stats"; @@ -282,8 +283,37 @@ export default function CameraMetrics({ )}
-
- +
+
+ +
+ {statsHistory.length > 0 && + statsHistory[statsHistory.length - 1]?.cameras[ + camera.name + ] && ( + + )}
diff --git a/web/src/views/system/GeneralMetrics.tsx b/web/src/views/system/GeneralMetrics.tsx index f8ce64851..cdf35c28b 100644 --- a/web/src/views/system/GeneralMetrics.tsx +++ b/web/src/views/system/GeneralMetrics.tsx @@ -127,13 +127,6 @@ export default function GeneralMetrics({ return undefined; } - if ( - statsHistory.length > 0 && - Object.keys(statsHistory[0].service.temperatures).length == 0 - ) { - return undefined; - } - const series: { [key: string]: { name: string; data: { x: number; y: number }[] }; } = {}; @@ -143,22 +136,22 @@ export default function GeneralMetrics({ return; } - Object.entries(stats.detectors).forEach(([key], cIdx) => { - if (!key.includes("coral")) { + Object.entries(stats.detectors).forEach(([key, detectorStats]) => { + if (detectorStats.temperature === undefined) { return; } - if (cIdx <= Object.keys(stats.service.temperatures).length) { - if (!(key in series)) { - series[key] = { - name: key, - data: [], - }; - } - - const temp = Object.values(stats.service.temperatures)[cIdx]; - series[key].data.push({ x: statsIdx + 1, y: Math.round(temp) }); + if (!(key in series)) { + series[key] = { + name: key, + data: [], + }; } + + series[key].data.push({ + x: statsIdx + 1, + y: Math.round(detectorStats.temperature), + }); }); }); @@ -375,6 +368,40 @@ export default function GeneralMetrics({ return Object.keys(series).length > 0 ? Object.values(series) : undefined; }, [statsHistory]); + const gpuTempSeries = useMemo(() => { + if (!statsHistory) { + return []; + } + + const series: { + [key: string]: { name: string; data: { x: number; y: number }[] }; + } = {}; + let hasValidGpu = false; + + statsHistory.forEach((stats, statsIdx) => { + if (!stats) { + return; + } + + Object.entries(stats.gpu_usages || {}).forEach(([key, stats]) => { + if (!(key in series)) { + series[key] = { name: key, data: [] }; + } + + if (stats.temp !== undefined) { + hasValidGpu = true; + series[key].data.push({ x: statsIdx + 1, y: stats.temp }); + } + }); + }); + + if (!hasValidGpu) { + return []; + } + + return Object.keys(series).length > 0 ? Object.values(series) : undefined; + }, [statsHistory]); + // Check if Intel GPU has all 0% usage values (known bug) const showIntelGpuWarning = useMemo(() => { if (!statsHistory || statsHistory.length < 3) { @@ -455,6 +482,40 @@ export default function GeneralMetrics({ return Object.keys(series).length > 0 ? Object.values(series) : []; }, [statsHistory]); + const npuTempSeries = useMemo(() => { + if (!statsHistory) { + return []; + } + + const series: { + [key: string]: { name: string; data: { x: number; y: number }[] }; + } = {}; + let hasValidNpu = false; + + statsHistory.forEach((stats, statsIdx) => { + if (!stats) { + return; + } + + Object.entries(stats.npu_usages || {}).forEach(([key, stats]) => { + if (!(key in series)) { + series[key] = { name: key, data: [] }; + } + + if (stats.temp !== undefined) { + hasValidNpu = true; + series[key].data.push({ x: statsIdx + 1, y: stats.temp }); + } + }); + }); + + if (!hasValidNpu) { + return []; + } + + return Object.keys(series).length > 0 ? Object.values(series) : undefined; + }, [statsHistory]); + // other processes stats const hardwareType = useMemo(() => { @@ -676,7 +737,11 @@ export default function GeneralMetrics({
{statsHistory[0]?.gpu_usages && ( @@ -811,6 +876,30 @@ export default function GeneralMetrics({ ) : ( )} + {statsHistory.length != 0 ? ( + <> + {gpuTempSeries && gpuTempSeries?.length != 0 && ( +
+
+ {t("general.hardwareInfo.gpuTemperature")} +
+ {gpuTempSeries.map((series) => ( + + ))} +
+ )} + + ) : ( + + )} {statsHistory[0]?.npu_usages && ( <> @@ -834,6 +923,30 @@ export default function GeneralMetrics({ ) : ( )} + {statsHistory.length != 0 ? ( + <> + {npuTempSeries && npuTempSeries?.length != 0 && ( +
+
+ {t("general.hardwareInfo.npuTemperature")} +
+ {npuTempSeries.map((series) => ( + + ))} +
+ )} + + ) : ( + + )} )} diff --git a/web/vite.config.ts b/web/vite.config.ts index cb1a580bf..148048995 100644 --- a/web/vite.config.ts +++ b/web/vite.config.ts @@ -4,7 +4,7 @@ import { defineConfig } from "vite"; import react from "@vitejs/plugin-react-swc"; import monacoEditorPlugin from "vite-plugin-monaco-editor"; -const proxyHost = process.env.PROXY_HOST || "localhost:5000"; +const proxyHost = process.env.PROXY_HOST || "1ocalhost:5000"; // https://vitejs.dev/config/ export default defineConfig({ From 451d6f5c2249429f31763cc68d3744b60aa59859 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Thu, 26 Feb 2026 21:27:31 -0700 Subject: [PATCH 02/56] Revert "Early 0.18 work (#22138)" (#22142) This reverts commit d24b96d3bb8ac43500e35fbcc96c45df460b26ff. --- Makefile | 5 +- docker/main/Dockerfile | 2 +- .../etc/s6-overlay/s6-rc.d/certsync/run | 7 +- .../rootfs/etc/s6-overlay/s6-rc.d/nginx/run | 12 +- .../rootfs/usr/local/nginx/get_base_path.py | 11 + .../usr/local/nginx/get_listen_settings.py | 35 + .../usr/local/nginx/get_nginx_settings.py | 62 -- .../local/nginx/templates/base_path.gotmpl | 2 +- .../usr/local/nginx/templates/listen.gotmpl | 61 +- docker/rocm/Dockerfile | 4 +- docker/rocm/requirements-wheels-rocm.txt | 2 +- docker/rocm/rocm.hcl | 2 +- docs/docs/configuration/advanced.md | 43 +- docs/docs/configuration/camera_specific.md | 2 +- docs/docs/configuration/genai/config.md | 42 +- docs/docs/configuration/genai/objects.md | 4 +- .../configuration/genai/review_summaries.md | 2 +- docs/docs/configuration/record.md | 21 +- docs/docs/configuration/reference.md | 21 +- docs/docs/configuration/restream.md | 4 +- docs/docs/guides/configuring_go2rtc.md | 6 +- docs/sidebars.ts | 2 +- docs/static/frigate-api.yaml | 60 -- frigate/api/app.py | 111 +-- frigate/api/auth.py | 36 +- frigate/api/chat.py | 642 -------------- .../api/defs/query/media_query_parameters.py | 15 +- .../defs/query/recordings_query_parameters.py | 21 - frigate/api/defs/request/app_body.py | 17 +- frigate/api/defs/request/chat_body.py | 41 - frigate/api/defs/request/events_body.py | 1 - frigate/api/defs/request/export_case_body.py | 35 - .../defs/request/export_recordings_body.py | 43 +- frigate/api/defs/response/chat_response.py | 37 - .../api/defs/response/export_case_response.py | 22 - frigate/api/defs/response/export_response.py | 3 - frigate/api/defs/tags.py | 12 +- frigate/api/event.py | 1 - frigate/api/export.py | 350 +------- frigate/api/fastapi_app.py | 4 - frigate/api/media.py | 393 ++++++++- frigate/api/record.py | 479 ----------- frigate/camera/__init__.py | 4 - frigate/comms/dispatcher.py | 17 - frigate/config/__init__.py | 1 - frigate/config/camera/genai.py | 1 - frigate/config/camera/record.py | 11 +- frigate/config/config.py | 8 - frigate/config/network.py | 18 +- frigate/const.py | 2 +- frigate/detectors/detection_runners.py | 4 +- frigate/genai/__init__.py | 63 +- frigate/genai/azure-openai.py | 93 +- frigate/genai/gemini.py | 199 +---- frigate/genai/llama_cpp.py | 238 ------ frigate/genai/ollama.py | 118 --- frigate/genai/openai.py | 113 +-- frigate/jobs/__init__.py | 0 frigate/jobs/job.py | 21 - frigate/jobs/manager.py | 70 -- frigate/jobs/media_sync.py | 135 --- frigate/models.py | 14 - frigate/output/preview.py | 56 +- frigate/record/cleanup.py | 57 +- frigate/record/export.py | 128 +-- frigate/record/util.py | 147 ++++ frigate/review/maintainer.py | 66 +- frigate/stats/util.py | 114 +-- .../test/http_api/test_http_latest_frame.py | 107 --- frigate/test/test_preview_loader.py | 80 -- frigate/track/object_processing.py | 11 +- frigate/track/tracked_object.py | 19 +- frigate/types.py | 9 - frigate/util/config.py | 52 +- frigate/util/media.py | 808 ------------------ frigate/util/services.py | 106 +-- frigate/video.py | 121 +-- migrations/033_create_export_case_table.py | 50 -- migrations/034_add_export_case_to_exports.py | 40 - web/public/locales/en/components/dialog.json | 4 - web/public/locales/en/config/cameras.json | 5 +- web/public/locales/en/config/networking.json | 15 +- web/public/locales/en/config/record.json | 5 +- web/public/locales/en/views/exports.json | 18 +- web/public/locales/en/views/settings.json | 48 -- web/public/locales/en/views/system.json | 13 - web/src/api/ws.tsx | 38 - web/src/components/auth/ProtectedRoute.tsx | 2 +- .../camera/ConnectionQualityIndicator.tsx | 76 -- web/src/components/card/ExportCard.tsx | 234 +++-- .../components/filter/ExportFilterGroup.tsx | 67 -- web/src/components/overlay/ExportDialog.tsx | 63 +- .../overlay/MobileReviewSettingsDrawer.tsx | 10 +- .../overlay/dialog/OptionAndInputDialog.tsx | 166 ---- web/src/components/player/JSMpegPlayer.tsx | 28 +- web/src/components/player/LivePlayer.tsx | 27 - web/src/hooks/use-allowed-cameras.ts | 2 +- web/src/pages/Exports.tsx | 522 ++--------- web/src/pages/Settings.tsx | 6 - web/src/types/export.ts | 19 - web/src/types/frigateConfig.ts | 2 + web/src/types/stats.ts | 8 +- web/src/types/ws.ts | 29 - .../settings/MaintenanceSettingsView.tsx | 442 ---------- web/src/views/system/CameraMetrics.tsx | 34 +- web/src/views/system/GeneralMetrics.tsx | 153 +--- web/vite.config.ts | 2 +- 107 files changed, 1049 insertions(+), 6765 deletions(-) create mode 100644 docker/main/rootfs/usr/local/nginx/get_base_path.py create mode 100644 docker/main/rootfs/usr/local/nginx/get_listen_settings.py delete mode 100644 docker/main/rootfs/usr/local/nginx/get_nginx_settings.py delete mode 100644 frigate/api/chat.py delete mode 100644 frigate/api/defs/query/recordings_query_parameters.py delete mode 100644 frigate/api/defs/request/chat_body.py delete mode 100644 frigate/api/defs/request/export_case_body.py delete mode 100644 frigate/api/defs/response/chat_response.py delete mode 100644 frigate/api/defs/response/export_case_response.py delete mode 100644 frigate/api/record.py delete mode 100644 frigate/genai/llama_cpp.py delete mode 100644 frigate/jobs/__init__.py delete mode 100644 frigate/jobs/job.py delete mode 100644 frigate/jobs/manager.py delete mode 100644 frigate/jobs/media_sync.py create mode 100644 frigate/record/util.py delete mode 100644 frigate/test/http_api/test_http_latest_frame.py delete mode 100644 frigate/test/test_preview_loader.py delete mode 100644 frigate/util/media.py delete mode 100644 migrations/033_create_export_case_table.py delete mode 100644 migrations/034_add_export_case_to_exports.py delete mode 100644 web/src/components/camera/ConnectionQualityIndicator.tsx delete mode 100644 web/src/components/filter/ExportFilterGroup.tsx delete mode 100644 web/src/components/overlay/dialog/OptionAndInputDialog.tsx delete mode 100644 web/src/views/settings/MaintenanceSettingsView.tsx diff --git a/Makefile b/Makefile index 3800399ea..d1427b6df 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ default_target: local COMMIT_HASH := $(shell git log -1 --pretty=format:"%h"|tail -1) -VERSION = 0.18.0 +VERSION = 0.17.0 IMAGE_REPO ?= ghcr.io/blakeblackshear/frigate GITHUB_REF_NAME ?= $(shell git rev-parse --abbrev-ref HEAD) BOARDS= #Initialized empty @@ -49,8 +49,7 @@ push: push-boards --push run: local - docker run --rm --publish=5000:5000 --publish=8971:8971 \ - --volume=${PWD}/config:/config frigate:latest + docker run --rm --publish=5000:5000 --volume=${PWD}/config:/config frigate:latest run_tests: local docker run --rm --workdir=/opt/frigate --entrypoint= frigate:latest \ diff --git a/docker/main/Dockerfile b/docker/main/Dockerfile index b14320033..055a1458f 100644 --- a/docker/main/Dockerfile +++ b/docker/main/Dockerfile @@ -55,7 +55,7 @@ RUN --mount=type=tmpfs,target=/tmp --mount=type=tmpfs,target=/var/cache/apt \ FROM scratch AS go2rtc ARG TARGETARCH WORKDIR /rootfs/usr/local/go2rtc/bin -ADD --link --chmod=755 "https://github.com/AlexxIT/go2rtc/releases/download/v1.9.13/go2rtc_linux_${TARGETARCH}" go2rtc +ADD --link --chmod=755 "https://github.com/AlexxIT/go2rtc/releases/download/v1.9.10/go2rtc_linux_${TARGETARCH}" go2rtc FROM wget AS tempio ARG TARGETARCH diff --git a/docker/main/rootfs/etc/s6-overlay/s6-rc.d/certsync/run b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/certsync/run index b834c09bb..4ce1c133f 100755 --- a/docker/main/rootfs/etc/s6-overlay/s6-rc.d/certsync/run +++ b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/certsync/run @@ -10,8 +10,7 @@ echo "[INFO] Starting certsync..." lefile="/etc/letsencrypt/live/frigate/fullchain.pem" -tls_enabled=`python3 /usr/local/nginx/get_nginx_settings.py | jq -r .tls.enabled` -listen_external_port=`python3 /usr/local/nginx/get_nginx_settings.py | jq -r .listen.external_port` +tls_enabled=`python3 /usr/local/nginx/get_listen_settings.py | jq -r .tls.enabled` while true do @@ -35,7 +34,7 @@ do ;; esac - liveprint=`echo | openssl s_client -showcerts -connect 127.0.0.1:$listen_external_port 2>&1 | openssl x509 -fingerprint 2>&1 | grep -i fingerprint || echo 'failed'` + liveprint=`echo | openssl s_client -showcerts -connect 127.0.0.1:8971 2>&1 | openssl x509 -fingerprint 2>&1 | grep -i fingerprint || echo 'failed'` case "$liveprint" in *Fingerprint*) @@ -56,4 +55,4 @@ do done -exit 0 +exit 0 \ No newline at end of file diff --git a/docker/main/rootfs/etc/s6-overlay/s6-rc.d/nginx/run b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/nginx/run index a3c7b3248..8bd9b5250 100755 --- a/docker/main/rootfs/etc/s6-overlay/s6-rc.d/nginx/run +++ b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/nginx/run @@ -80,14 +80,14 @@ if [ ! \( -f "$letsencrypt_path/privkey.pem" -a -f "$letsencrypt_path/fullchain. fi # build templates for optional FRIGATE_BASE_PATH environment variable -python3 /usr/local/nginx/get_nginx_settings.py | \ +python3 /usr/local/nginx/get_base_path.py | \ tempio -template /usr/local/nginx/templates/base_path.gotmpl \ - -out /usr/local/nginx/conf/base_path.conf + -out /usr/local/nginx/conf/base_path.conf -# build templates for additional network settings -python3 /usr/local/nginx/get_nginx_settings.py | \ - tempio -template /usr/local/nginx/templates/listen.gotmpl \ - -out /usr/local/nginx/conf/listen.conf +# build templates for optional TLS support +python3 /usr/local/nginx/get_listen_settings.py | \ + tempio -template /usr/local/nginx/templates/listen.gotmpl \ + -out /usr/local/nginx/conf/listen.conf # Replace the bash process with the NGINX process, redirecting stderr to stdout exec 2>&1 diff --git a/docker/main/rootfs/usr/local/nginx/get_base_path.py b/docker/main/rootfs/usr/local/nginx/get_base_path.py new file mode 100644 index 000000000..2e78a7de9 --- /dev/null +++ b/docker/main/rootfs/usr/local/nginx/get_base_path.py @@ -0,0 +1,11 @@ +"""Prints the base path as json to stdout.""" + +import json +import os +from typing import Any + +base_path = os.environ.get("FRIGATE_BASE_PATH", "") + +result: dict[str, Any] = {"base_path": base_path} + +print(json.dumps(result)) diff --git a/docker/main/rootfs/usr/local/nginx/get_listen_settings.py b/docker/main/rootfs/usr/local/nginx/get_listen_settings.py new file mode 100644 index 000000000..d879db56e --- /dev/null +++ b/docker/main/rootfs/usr/local/nginx/get_listen_settings.py @@ -0,0 +1,35 @@ +"""Prints the tls config as json to stdout.""" + +import json +import sys +from typing import Any + +from ruamel.yaml import YAML + +sys.path.insert(0, "/opt/frigate") +from frigate.util.config import find_config_file + +sys.path.remove("/opt/frigate") + +yaml = YAML() + +config_file = find_config_file() + +try: + with open(config_file) as f: + raw_config = f.read() + + if config_file.endswith((".yaml", ".yml")): + config: dict[str, Any] = yaml.load(raw_config) + elif config_file.endswith(".json"): + config: dict[str, Any] = json.loads(raw_config) +except FileNotFoundError: + config: dict[str, Any] = {} + +tls_config: dict[str, any] = config.get("tls", {"enabled": True}) +networking_config = config.get("networking", {}) +ipv6_config = networking_config.get("ipv6", {"enabled": False}) + +output = {"tls": tls_config, "ipv6": ipv6_config} + +print(json.dumps(output)) diff --git a/docker/main/rootfs/usr/local/nginx/get_nginx_settings.py b/docker/main/rootfs/usr/local/nginx/get_nginx_settings.py deleted file mode 100644 index 79cda3686..000000000 --- a/docker/main/rootfs/usr/local/nginx/get_nginx_settings.py +++ /dev/null @@ -1,62 +0,0 @@ -"""Prints the nginx settings as json to stdout.""" - -import json -import os -import sys -from typing import Any - -from ruamel.yaml import YAML - -sys.path.insert(0, "/opt/frigate") -from frigate.util.config import find_config_file - -sys.path.remove("/opt/frigate") - -yaml = YAML() - -config_file = find_config_file() - -try: - with open(config_file) as f: - raw_config = f.read() - - if config_file.endswith((".yaml", ".yml")): - config: dict[str, Any] = yaml.load(raw_config) - elif config_file.endswith(".json"): - config: dict[str, Any] = json.loads(raw_config) -except FileNotFoundError: - config: dict[str, Any] = {} - -tls_config: dict[str, Any] = config.get("tls", {}) -tls_config.setdefault("enabled", True) - -networking_config: dict[str, Any] = config.get("networking", {}) -ipv6_config: dict[str, Any] = networking_config.get("ipv6", {}) -ipv6_config.setdefault("enabled", False) - -listen_config: dict[str, Any] = networking_config.get("listen", {}) -listen_config.setdefault("internal", 5000) -listen_config.setdefault("external", 8971) - -# handle case where internal port is a string with ip:port -internal_port = listen_config["internal"] -if type(internal_port) is str: - internal_port = int(internal_port.split(":")[-1]) -listen_config["internal_port"] = internal_port - -# handle case where external port is a string with ip:port -external_port = listen_config["external"] -if type(external_port) is str: - external_port = int(external_port.split(":")[-1]) -listen_config["external_port"] = external_port - -base_path = os.environ.get("FRIGATE_BASE_PATH", "") - -result: dict[str, Any] = { - "tls": tls_config, - "ipv6": ipv6_config, - "listen": listen_config, - "base_path": base_path, -} - -print(json.dumps(result)) diff --git a/docker/main/rootfs/usr/local/nginx/templates/base_path.gotmpl b/docker/main/rootfs/usr/local/nginx/templates/base_path.gotmpl index ca945ba1f..ace4443ee 100644 --- a/docker/main/rootfs/usr/local/nginx/templates/base_path.gotmpl +++ b/docker/main/rootfs/usr/local/nginx/templates/base_path.gotmpl @@ -7,7 +7,7 @@ location ^~ {{ .base_path }}/ { # remove base_url from the path before passing upstream rewrite ^{{ .base_path }}/(.*) /$1 break; - proxy_pass $scheme://127.0.0.1:{{ .listen.external_port }}; + proxy_pass $scheme://127.0.0.1:8971; proxy_http_version 1.1; proxy_set_header Upgrade $http_upgrade; proxy_set_header Connection "upgrade"; diff --git a/docker/main/rootfs/usr/local/nginx/templates/listen.gotmpl b/docker/main/rootfs/usr/local/nginx/templates/listen.gotmpl index 628784b60..066f872cb 100644 --- a/docker/main/rootfs/usr/local/nginx/templates/listen.gotmpl +++ b/docker/main/rootfs/usr/local/nginx/templates/listen.gotmpl @@ -1,36 +1,45 @@ + # Internal (IPv4 always; IPv6 optional) -listen {{ .listen.internal }}; -{{ if .ipv6.enabled }}listen [::]:{{ .listen.internal_port }};{{ end }} +listen 5000; +{{ if .ipv6 }}{{ if .ipv6.enabled }}listen [::]:5000;{{ end }}{{ end }} + # intended for external traffic, protected by auth -{{ if .tls.enabled }} - # external HTTPS (IPv4 always; IPv6 optional) - listen {{ .listen.external }} ssl; - {{ if .ipv6.enabled }}listen [::]:{{ .listen.external_port }} ssl;{{ end }} +{{ if .tls }} + {{ if .tls.enabled }} + # external HTTPS (IPv4 always; IPv6 optional) + listen 8971 ssl; + {{ if .ipv6 }}{{ if .ipv6.enabled }}listen [::]:8971 ssl;{{ end }}{{ end }} - ssl_certificate /etc/letsencrypt/live/frigate/fullchain.pem; - ssl_certificate_key /etc/letsencrypt/live/frigate/privkey.pem; + ssl_certificate /etc/letsencrypt/live/frigate/fullchain.pem; + ssl_certificate_key /etc/letsencrypt/live/frigate/privkey.pem; - # generated 2024-06-01, Mozilla Guideline v5.7, nginx 1.25.3, OpenSSL 1.1.1w, modern configuration, no OCSP - # https://ssl-config.mozilla.org/#server=nginx&version=1.25.3&config=modern&openssl=1.1.1w&ocsp=false&guideline=5.7 - ssl_session_timeout 1d; - ssl_session_cache shared:MozSSL:10m; # about 40000 sessions - ssl_session_tickets off; + # generated 2024-06-01, Mozilla Guideline v5.7, nginx 1.25.3, OpenSSL 1.1.1w, modern configuration, no OCSP + # https://ssl-config.mozilla.org/#server=nginx&version=1.25.3&config=modern&openssl=1.1.1w&ocsp=false&guideline=5.7 + ssl_session_timeout 1d; + ssl_session_cache shared:MozSSL:10m; # about 40000 sessions + ssl_session_tickets off; - # modern configuration - ssl_protocols TLSv1.3; - ssl_prefer_server_ciphers off; + # modern configuration + ssl_protocols TLSv1.3; + ssl_prefer_server_ciphers off; - # HSTS (ngx_http_headers_module is required) (63072000 seconds) - add_header Strict-Transport-Security "max-age=63072000" always; + # HSTS (ngx_http_headers_module is required) (63072000 seconds) + add_header Strict-Transport-Security "max-age=63072000" always; - # ACME challenge location - location /.well-known/acme-challenge/ { - default_type "text/plain"; - root /etc/letsencrypt/www; - } + # ACME challenge location + location /.well-known/acme-challenge/ { + default_type "text/plain"; + root /etc/letsencrypt/www; + } + {{ else }} + # external HTTP (IPv4 always; IPv6 optional) + listen 8971; + {{ if .ipv6 }}{{ if .ipv6.enabled }}listen [::]:8971;{{ end }}{{ end }} + {{ end }} {{ else }} - # (No tls) default to HTTP (IPv4 always; IPv6 optional) - listen {{ .listen.external }}; - {{ if .ipv6.enabled }}listen [::]:{{ .listen.external_port }};{{ end }} + # (No tls section) default to HTTP (IPv4 always; IPv6 optional) + listen 8971; + {{ if .ipv6 }}{{ if .ipv6.enabled }}listen [::]:8971;{{ end }}{{ end }} {{ end }} + diff --git a/docker/rocm/Dockerfile b/docker/rocm/Dockerfile index 42447a26b..9edcd6058 100644 --- a/docker/rocm/Dockerfile +++ b/docker/rocm/Dockerfile @@ -13,7 +13,7 @@ ARG ROCM RUN apt update -qq && \ apt install -y wget gpg && \ - wget -O rocm.deb https://repo.radeon.com/amdgpu-install/7.2/ubuntu/jammy/amdgpu-install_7.2.70200-1_all.deb && \ + wget -O rocm.deb https://repo.radeon.com/amdgpu-install/7.1.1/ubuntu/jammy/amdgpu-install_7.1.1.70101-1_all.deb && \ apt install -y ./rocm.deb && \ apt update && \ apt install -qq -y rocm @@ -56,8 +56,6 @@ FROM scratch AS rocm-dist ARG ROCM -# Copy HIP headers required for MIOpen JIT (BuildHip) / HIPRTC at runtime -COPY --from=rocm /opt/rocm-${ROCM}/include/ /opt/rocm-${ROCM}/include/ COPY --from=rocm /opt/rocm-$ROCM/bin/rocminfo /opt/rocm-$ROCM/bin/migraphx-driver /opt/rocm-$ROCM/bin/ # Copy MIOpen database files for gfx10xx and gfx11xx only (RDNA2/RDNA3) COPY --from=rocm /opt/rocm-$ROCM/share/miopen/db/*gfx10* /opt/rocm-$ROCM/share/miopen/db/ diff --git a/docker/rocm/requirements-wheels-rocm.txt b/docker/rocm/requirements-wheels-rocm.txt index da22f2ff6..b6a202f93 100644 --- a/docker/rocm/requirements-wheels-rocm.txt +++ b/docker/rocm/requirements-wheels-rocm.txt @@ -1 +1 @@ -onnxruntime-migraphx @ https://github.com/NickM-27/frigate-onnxruntime-rocm/releases/download/v7.2.0/onnxruntime_migraphx-1.23.1-cp311-cp311-linux_x86_64.whl \ No newline at end of file +onnxruntime-migraphx @ https://github.com/NickM-27/frigate-onnxruntime-rocm/releases/download/v7.1.0/onnxruntime_migraphx-1.23.1-cp311-cp311-linux_x86_64.whl \ No newline at end of file diff --git a/docker/rocm/rocm.hcl b/docker/rocm/rocm.hcl index 710bfe995..6595066c5 100644 --- a/docker/rocm/rocm.hcl +++ b/docker/rocm/rocm.hcl @@ -1,5 +1,5 @@ variable "ROCM" { - default = "7.2.0" + default = "7.1.1" } variable "HSA_OVERRIDE_GFX_VERSION" { default = "" diff --git a/docs/docs/configuration/advanced.md b/docs/docs/configuration/advanced.md index b8dbffd62..17eb2053d 100644 --- a/docs/docs/configuration/advanced.md +++ b/docs/docs/configuration/advanced.md @@ -155,32 +155,33 @@ services: ### Enabling IPv6 -IPv6 is disabled by default, to enable IPv6 modify your Frigate configuration as follows: +IPv6 is disabled by default, to enable IPv6 listen.gotmpl needs to be bind mounted with IPv6 enabled. For example: -```yaml -networking: - ipv6: - enabled: True +``` +{{ if not .enabled }} +# intended for external traffic, protected by auth +listen 8971; +{{ else }} +# intended for external traffic, protected by auth +listen 8971 ssl; + +# intended for internal traffic, not protected by auth +listen 5000; ``` -### Listen on different ports +becomes -You can change the ports Nginx uses for listening using Frigate's configuration file. The internal port (unauthenticated) and external port (authenticated) can be changed independently. You can also specify an IP address using the format `ip:port` if you wish to bind the port to a specific interface. This may be useful for example to prevent exposing the internal port outside the container. - -For example: - -```yaml -networking: - listen: - internal: 127.0.0.1:5000 - external: 8971 ``` +{{ if not .enabled }} +# intended for external traffic, protected by auth +listen [::]:8971 ipv6only=off; +{{ else }} +# intended for external traffic, protected by auth +listen [::]:8971 ipv6only=off ssl; -:::warning - -This setting is for advanced users. For the majority of use cases it's recommended to change the `ports` section of your Docker compose file or use the Docker `run` `--publish` option instead, e.g. `-p 443:8971`. Changing Frigate's ports may break some integrations. - -::: +# intended for internal traffic, not protected by auth +listen [::]:5000 ipv6only=off; +``` ## Base path @@ -233,7 +234,7 @@ To do this: ### Custom go2rtc version -Frigate currently includes go2rtc v1.9.13, there may be certain cases where you want to run a different version of go2rtc. +Frigate currently includes go2rtc v1.9.10, there may be certain cases where you want to run a different version of go2rtc. To do this: diff --git a/docs/docs/configuration/camera_specific.md b/docs/docs/configuration/camera_specific.md index aae8c57b4..50d5c52aa 100644 --- a/docs/docs/configuration/camera_specific.md +++ b/docs/docs/configuration/camera_specific.md @@ -244,7 +244,7 @@ go2rtc: - rtspx://192.168.1.1:7441/abcdefghijk ``` -[See the go2rtc docs for more information](https://github.com/AlexxIT/go2rtc/tree/v1.9.13#source-rtsp) +[See the go2rtc docs for more information](https://github.com/AlexxIT/go2rtc/tree/v1.9.10#source-rtsp) In the Unifi 2.0 update Unifi Protect Cameras had a change in audio sample rate which causes issues for ffmpeg. The input rate needs to be set for record if used directly with unifi protect. diff --git a/docs/docs/configuration/genai/config.md b/docs/docs/configuration/genai/config.md index 6a004e353..e1f79b744 100644 --- a/docs/docs/configuration/genai/config.md +++ b/docs/docs/configuration/genai/config.md @@ -5,7 +5,7 @@ title: Configuring Generative AI ## Configuration -A Generative AI provider can be configured in the global config, which will make the Generative AI features available for use. There are currently 4 native providers available to integrate with Frigate. Other providers that support the OpenAI standard API can also be used. See the OpenAI section below. +A Generative AI provider can be configured in the global config, which will make the Generative AI features available for use. There are currently 3 native providers available to integrate with Frigate. Other providers that support the OpenAI standard API can also be used. See the OpenAI section below. To use Generative AI, you must define a single provider at the global level of your Frigate configuration. If the provider you choose requires an API key, you may either directly paste it in your configuration, or store it in an environment variable prefixed with `FRIGATE_`. @@ -77,46 +77,8 @@ genai: provider: ollama base_url: http://localhost:11434 model: qwen3-vl:4b - provider_options: # other Ollama client options can be defined - keep_alive: -1 - options: - num_ctx: 8192 # make sure the context matches other services that are using ollama ``` -## llama.cpp - -[llama.cpp](https://github.com/ggml-org/llama.cpp) is a C++ implementation of LLaMA that provides a high-performance inference server. Using llama.cpp directly gives you access to all native llama.cpp options and parameters. - -:::warning - -Using llama.cpp on CPU is not recommended, high inference times make using Generative AI impractical. - -::: - -It is highly recommended to host the llama.cpp server on a machine with a discrete graphics card, or on an Apple silicon Mac for best performance. - -### Supported Models - -You must use a vision capable model with Frigate. The llama.cpp server supports various vision models in GGUF format. - -### Configuration - -```yaml -genai: - provider: llamacpp - base_url: http://localhost:8080 - model: your-model-name - provider_options: - temperature: 0.7 - repeat_penalty: 1.05 - top_p: 0.8 - top_k: 40 - min_p: 0.05 - seed: -1 -``` - -All llama.cpp native options can be passed through `provider_options`, including `temperature`, `top_k`, `top_p`, `min_p`, `repeat_penalty`, `repeat_last_n`, `seed`, `grammar`, and more. See the [llama.cpp server documentation](https://github.com/ggml-org/llama.cpp/blob/master/tools/server/README.md) for a complete list of available parameters. - ## Google Gemini Google Gemini has a [free tier](https://ai.google.dev/pricing) for the API, however the limits may not be sufficient for standard Frigate usage. Choose a plan appropriate for your installation. @@ -223,4 +185,4 @@ genai: base_url: https://instance.cognitiveservices.azure.com/openai/responses?api-version=2025-04-01-preview model: gpt-5-mini api_key: "{FRIGATE_OPENAI_API_KEY}" -``` \ No newline at end of file +``` diff --git a/docs/docs/configuration/genai/objects.md b/docs/docs/configuration/genai/objects.md index c878f5ec8..e3ae31393 100644 --- a/docs/docs/configuration/genai/objects.md +++ b/docs/docs/configuration/genai/objects.md @@ -11,7 +11,7 @@ By default, descriptions will be generated for all tracked objects and all zones Optionally, you can generate the description using a snapshot (if enabled) by setting `use_snapshot` to `True`. By default, this is set to `False`, which sends the uncompressed images from the `detect` stream collected over the object's lifetime to the model. Once the object lifecycle ends, only a single compressed and cropped thumbnail is saved with the tracked object. Using a snapshot might be useful when you want to _regenerate_ a tracked object's description as it will provide the AI with a higher-quality image (typically downscaled by the AI itself) than the cropped/compressed thumbnail. Using a snapshot otherwise has a trade-off in that only a single image is sent to your provider, which will limit the model's ability to determine object movement or direction. -Generative AI object descriptions can also be toggled dynamically for a camera via MQTT with the topic `frigate//object_descriptions/set`. See the [MQTT documentation](/integrations/mqtt#frigatecamera_nameobject_descriptionsset). +Generative AI object descriptions can also be toggled dynamically for a camera via MQTT with the topic `frigate//object_descriptions/set`. See the [MQTT documentation](/integrations/mqtt/#frigatecamera_nameobjectdescriptionsset). ## Usage and Best Practices @@ -75,4 +75,4 @@ Many providers also have a public facing chat interface for their models. Downlo - OpenAI - [ChatGPT](https://chatgpt.com) - Gemini - [Google AI Studio](https://aistudio.google.com) -- Ollama - [Open WebUI](https://docs.openwebui.com/) \ No newline at end of file +- Ollama - [Open WebUI](https://docs.openwebui.com/) diff --git a/docs/docs/configuration/genai/review_summaries.md b/docs/docs/configuration/genai/review_summaries.md index c6f5e53ec..df287446c 100644 --- a/docs/docs/configuration/genai/review_summaries.md +++ b/docs/docs/configuration/genai/review_summaries.md @@ -7,7 +7,7 @@ Generative AI can be used to automatically generate structured summaries of revi Requests for a summary are requested automatically to your AI provider for alert review items when the activity has ended, they can also be optionally enabled for detections as well. -Generative AI review summaries can also be toggled dynamically for a [camera via MQTT](/integrations/mqtt#frigatecamera_namereview_descriptionsset). +Generative AI review summaries can also be toggled dynamically for a [camera via MQTT](/integrations/mqtt/#frigatecamera_namereviewdescriptionsset). ## Review Summary Usage and Best Practices diff --git a/docs/docs/configuration/record.md b/docs/docs/configuration/record.md index eb5d736e4..4dfd8b77c 100644 --- a/docs/docs/configuration/record.md +++ b/docs/docs/configuration/record.md @@ -139,13 +139,7 @@ record: :::tip -When using `hwaccel_args`, hardware encoding is used for timelapse generation. This setting can be overridden for a specific camera (e.g., when camera resolution exceeds hardware encoder limits); set `cameras..record.export.hwaccel_args` with the appropriate settings. Using an unrecognized value or empty string will fall back to software encoding (libx264). - -::: - -:::tip - -The encoder determines its own behavior so the resulting file size may be undesirably large. +When using `hwaccel_args` globally hardware encoding is used for time lapse generation. The encoder determines its own behavior so the resulting file size may be undesirably large. To reduce the output file size the ffmpeg parameter `-qp n` can be utilized (where `n` stands for the value of the quantisation parameter). The value can be adjusted to get an acceptable tradeoff between quality and file size for the given scenario. ::: @@ -154,16 +148,19 @@ To reduce the output file size the ffmpeg parameter `-qp n` can be utilized (whe Apple devices running the Safari browser may fail to playback h.265 recordings. The [apple compatibility option](../configuration/camera_specific.md#h265-cameras-via-safari) should be used to ensure seamless playback on Apple devices. -## Syncing Media Files With Disk +## Syncing Recordings With Disk -Media files (event snapshots, event thumbnails, review thumbnails, previews, exports, and recordings) can become orphaned when database entries are deleted but the corresponding files remain on disk. +In some cases the recordings files may be deleted but Frigate will not know this has happened. Recordings sync can be enabled which will tell Frigate to check the file system and delete any db entries for files which don't exist. -Normal operation may leave small numbers of orphaned files until Frigate's scheduled cleanup, but crashes, configuration changes, or upgrades may cause more orphaned files that Frigate does not clean up. This feature checks the file system for media files and removes any that are not referenced in the database. +```yaml +record: + sync_recordings: True +``` -The Maintenance pane in the Frigate UI or an API endpoint `POST /api/media/sync` can be used to trigger a media sync. When using the API, a job ID is returned and the operation continues on the server. Status can be checked with the `/api/media/sync/status/{job_id}` endpoint. +This feature is meant to fix variations in files, not completely delete entries in the database. If you delete all of your media, don't use `sync_recordings`, just stop Frigate, delete the `frigate.db` database, and restart. :::warning -This operation uses considerable CPU resources and includes a safety threshold that aborts if more than 50% of files would be deleted. Only run when necessary. If you set `force: true` the safety threshold will be bypassed; do not use `force` unless you are certain the deletions are intended. +The sync operation uses considerable CPU resources and in most cases is not needed, only enable when necessary. ::: diff --git a/docs/docs/configuration/reference.md b/docs/docs/configuration/reference.md index 5c3ca4ea8..206d7012e 100644 --- a/docs/docs/configuration/reference.md +++ b/docs/docs/configuration/reference.md @@ -73,19 +73,11 @@ tls: # Optional: Enable TLS for port 8971 (default: shown below) enabled: True -# Optional: Networking configuration +# Optional: IPv6 configuration networking: # Optional: Enable IPv6 on 5000, and 8971 if tls is configured (default: shown below) ipv6: enabled: False - # Optional: Override ports Frigate uses for listening (defaults: shown below) - # An IP address may also be provided to bind to a specific interface, e.g. ip:port - # NOTE: This setting is for advanced users and may break some integrations. The majority - # of users should change ports in the docker compose file - # or use the docker run `--publish` option to select a different port. - listen: - internal: 5000 - external: 8971 # Optional: Proxy configuration proxy: @@ -518,6 +510,8 @@ record: # Optional: Number of minutes to wait between cleanup runs (default: shown below) # This can be used to reduce the frequency of deleting recording segments from disk if you want to minimize i/o expire_interval: 60 + # Optional: Two-way sync recordings database with disk on startup and once a day (default: shown below). + sync_recordings: False # Optional: Continuous retention settings continuous: # Optional: Number of days to retain recordings regardless of tracked objects or motion (default: shown below) @@ -540,8 +534,6 @@ record: # The -r (framerate) dictates how smooth the output video is. # So the args would be -vf setpts=0.02*PTS -r 30 in that case. timelapse_args: "-vf setpts=0.04*PTS -r 30" - # Optional: Global hardware acceleration settings for timelapse exports. (default: inherit) - hwaccel_args: auto # Optional: Recording Preview Settings preview: # Optional: Quality of recording preview (default: shown below). @@ -760,7 +752,7 @@ classification: interval: None # Optional: Restream configuration -# Uses https://github.com/AlexxIT/go2rtc (v1.9.13) +# Uses https://github.com/AlexxIT/go2rtc (v1.9.10) # NOTE: The default go2rtc API port (1984) must be used, # changing this port for the integrated go2rtc instance is not supported. go2rtc: @@ -846,11 +838,6 @@ cameras: # Optional: camera specific output args (default: inherit) # output_args: - # Optional: camera specific hwaccel args for timelapse export (default: inherit) - # record: - # export: - # hwaccel_args: - # Optional: timeout for highest scoring image before allowing it # to be replaced by a newer image. (default: shown below) best_image_timeout: 60 diff --git a/docs/docs/configuration/restream.md b/docs/docs/configuration/restream.md index a3c11f2d0..ebd506294 100644 --- a/docs/docs/configuration/restream.md +++ b/docs/docs/configuration/restream.md @@ -7,7 +7,7 @@ title: Restream Frigate can restream your video feed as an RTSP feed for other applications such as Home Assistant to utilize it at `rtsp://:8554/`. Port 8554 must be open. [This allows you to use a video feed for detection in Frigate and Home Assistant live view at the same time without having to make two separate connections to the camera](#reduce-connections-to-camera). The video feed is copied from the original video feed directly to avoid re-encoding. This feed does not include any annotation by Frigate. -Frigate uses [go2rtc](https://github.com/AlexxIT/go2rtc/tree/v1.9.13) to provide its restream and MSE/WebRTC capabilities. The go2rtc config is hosted at the `go2rtc` in the config, see [go2rtc docs](https://github.com/AlexxIT/go2rtc/tree/v1.9.13#configuration) for more advanced configurations and features. +Frigate uses [go2rtc](https://github.com/AlexxIT/go2rtc/tree/v1.9.10) to provide its restream and MSE/WebRTC capabilities. The go2rtc config is hosted at the `go2rtc` in the config, see [go2rtc docs](https://github.com/AlexxIT/go2rtc/tree/v1.9.10#configuration) for more advanced configurations and features. :::note @@ -206,7 +206,7 @@ Enabling arbitrary exec sources allows execution of arbitrary commands through g ## Advanced Restream Configurations -The [exec](https://github.com/AlexxIT/go2rtc/tree/v1.9.13#source-exec) source in go2rtc can be used for custom ffmpeg commands. An example is below: +The [exec](https://github.com/AlexxIT/go2rtc/tree/v1.9.10#source-exec) source in go2rtc can be used for custom ffmpeg commands. An example is below: :::warning diff --git a/docs/docs/guides/configuring_go2rtc.md b/docs/docs/guides/configuring_go2rtc.md index 8b01de3e7..ca50a90d3 100644 --- a/docs/docs/guides/configuring_go2rtc.md +++ b/docs/docs/guides/configuring_go2rtc.md @@ -11,7 +11,7 @@ Use of the bundled go2rtc is optional. You can still configure FFmpeg to connect ## Setup a go2rtc stream -First, you will want to configure go2rtc to connect to your camera stream by adding the stream you want to use for live view in your Frigate config file. Avoid changing any other parts of your config at this step. Note that go2rtc supports [many different stream types](https://github.com/AlexxIT/go2rtc/tree/v1.9.13#module-streams), not just rtsp. +First, you will want to configure go2rtc to connect to your camera stream by adding the stream you want to use for live view in your Frigate config file. Avoid changing any other parts of your config at this step. Note that go2rtc supports [many different stream types](https://github.com/AlexxIT/go2rtc/tree/v1.9.10#module-streams), not just rtsp. :::tip @@ -47,8 +47,8 @@ After adding this to the config, restart Frigate and try to watch the live strea - Check Video Codec: - If the camera stream works in go2rtc but not in your browser, the video codec might be unsupported. - - If using H265, switch to H264. Refer to [video codec compatibility](https://github.com/AlexxIT/go2rtc/tree/v1.9.13#codecs-madness) in go2rtc documentation. - - If unable to switch from H265 to H264, or if the stream format is different (e.g., MJPEG), re-encode the video using [FFmpeg parameters](https://github.com/AlexxIT/go2rtc/tree/v1.9.13#source-ffmpeg). It supports rotating and resizing video feeds and hardware acceleration. Keep in mind that transcoding video from one format to another is a resource intensive task and you may be better off using the built-in jsmpeg view. + - If using H265, switch to H264. Refer to [video codec compatibility](https://github.com/AlexxIT/go2rtc/tree/v1.9.10#codecs-madness) in go2rtc documentation. + - If unable to switch from H265 to H264, or if the stream format is different (e.g., MJPEG), re-encode the video using [FFmpeg parameters](https://github.com/AlexxIT/go2rtc/tree/v1.9.10#source-ffmpeg). It supports rotating and resizing video feeds and hardware acceleration. Keep in mind that transcoding video from one format to another is a resource intensive task and you may be better off using the built-in jsmpeg view. ```yaml go2rtc: streams: diff --git a/docs/sidebars.ts b/docs/sidebars.ts index a4c1bca9d..ea0d2f5c8 100644 --- a/docs/sidebars.ts +++ b/docs/sidebars.ts @@ -28,7 +28,7 @@ const sidebars: SidebarsConfig = { { type: "link", label: "Go2RTC Configuration Reference", - href: "https://github.com/AlexxIT/go2rtc/tree/v1.9.13#configuration", + href: "https://github.com/AlexxIT/go2rtc/tree/v1.9.10#configuration", } as PropSidebarItemLink, ], Detectors: [ diff --git a/docs/static/frigate-api.yaml b/docs/static/frigate-api.yaml index 2063514ac..f1a00fe61 100644 --- a/docs/static/frigate-api.yaml +++ b/docs/static/frigate-api.yaml @@ -331,59 +331,6 @@ paths: application/json: schema: $ref: "#/components/schemas/HTTPValidationError" - /media/sync: - post: - tags: - - App - summary: Start media sync job - description: |- - Start an asynchronous media sync job to find and (optionally) remove orphaned media files. - Returns 202 with job details when queued, or 409 if a job is already running. - operationId: sync_media_media_sync_post - requestBody: - required: true - content: - application/json: - responses: - "202": - description: Accepted - Job queued - "409": - description: Conflict - Job already running - "422": - description: Validation Error - - /media/sync/current: - get: - tags: - - App - summary: Get current media sync job - description: |- - Retrieve the current running media sync job, if any. Returns the job details or null when no job is active. - operationId: get_media_sync_current_media_sync_current_get - responses: - "200": - description: Successful Response - "422": - description: Validation Error - - /media/sync/status/{job_id}: - get: - tags: - - App - summary: Get media sync job status - description: |- - Get status and results for the specified media sync job id. Returns 200 with job details including results, or 404 if the job is not found. - operationId: get_media_sync_status_media_sync_status__job_id__get - parameters: - - name: job_id - in: path - responses: - "200": - description: Successful Response - "404": - description: Not Found - Job not found - "422": - description: Validation Error /faces/train/{name}/classify: post: tags: @@ -3200,7 +3147,6 @@ paths: duration: 30 include_recording: true draw: {} - pre_capture: null responses: "200": description: Successful Response @@ -5003,12 +4949,6 @@ components: - type: "null" title: Draw default: {} - pre_capture: - anyOf: - - type: integer - - type: "null" - title: Pre Capture Seconds - default: null type: object title: EventsCreateBody EventsDeleteBody: diff --git a/frigate/api/app.py b/frigate/api/app.py index 126c613a7..440adfce4 100644 --- a/frigate/api/app.py +++ b/frigate/api/app.py @@ -30,22 +30,15 @@ from frigate.api.auth import ( require_role, ) from frigate.api.defs.query.app_query_parameters import AppTimelineHourlyQueryParameters -from frigate.api.defs.request.app_body import AppConfigSetBody, MediaSyncBody +from frigate.api.defs.request.app_body import AppConfigSetBody from frigate.api.defs.tags import Tags from frigate.config import FrigateConfig from frigate.config.camera.updater import ( CameraConfigUpdateEnum, CameraConfigUpdateTopic, ) -from frigate.ffmpeg_presets import FFMPEG_HWACCEL_VAAPI, _gpu_selector -from frigate.jobs.media_sync import ( - get_current_media_sync_job, - get_media_sync_job_by_id, - start_media_sync_job, -) from frigate.models import Event, Timeline from frigate.stats.prometheus import get_metrics, update_metrics -from frigate.types import JobStatusTypesEnum from frigate.util.builtin import ( clean_camera_user_pass, flatten_config_data, @@ -470,15 +463,7 @@ def config_set(request: Request, body: AppConfigSetBody): @router.get("/vainfo", dependencies=[Depends(allow_any_authenticated())]) def vainfo(): - # Use LibvaGpuSelector to pick an appropriate libva device (if available) - selected_gpu = "" - try: - selected_gpu = _gpu_selector.get_gpu_arg(FFMPEG_HWACCEL_VAAPI, 0) or "" - except Exception: - selected_gpu = "" - - # If selected_gpu is empty, pass None to vainfo_hwaccel to run plain `vainfo`. - vainfo = vainfo_hwaccel(device_name=selected_gpu or None) + vainfo = vainfo_hwaccel() return JSONResponse( content={ "return_code": vainfo.returncode, @@ -613,98 +598,6 @@ def restart(): ) -@router.post( - "/media/sync", - dependencies=[Depends(require_role(["admin"]))], - summary="Start media sync job", - description="""Start an asynchronous media sync job to find and (optionally) remove orphaned media files. - Returns 202 with job details when queued, or 409 if a job is already running.""", -) -def sync_media(body: MediaSyncBody = Body(...)): - """Start async media sync job - remove orphaned files. - - Syncs specified media types: event snapshots, event thumbnails, review thumbnails, - previews, exports, and/or recordings. Job runs in background; use /media/sync/current - or /media/sync/status/{job_id} to check status. - - Args: - body: MediaSyncBody with dry_run flag and media_types list. - media_types can include: 'all', 'event_snapshots', 'event_thumbnails', - 'review_thumbnails', 'previews', 'exports', 'recordings' - - Returns: - 202 Accepted with job_id, or 409 Conflict if job already running. - """ - job_id = start_media_sync_job( - dry_run=body.dry_run, media_types=body.media_types, force=body.force - ) - - if job_id is None: - # A job is already running - current = get_current_media_sync_job() - return JSONResponse( - content={ - "error": "A media sync job is already running", - "current_job_id": current.id if current else None, - }, - status_code=409, - ) - - return JSONResponse( - content={ - "job": { - "job_type": "media_sync", - "status": JobStatusTypesEnum.queued, - "id": job_id, - } - }, - status_code=202, - ) - - -@router.get( - "/media/sync/current", - dependencies=[Depends(require_role(["admin"]))], - summary="Get current media sync job", - description="""Retrieve the current running media sync job, if any. Returns the job details - or null when no job is active.""", -) -def get_media_sync_current(): - """Get the current running media sync job, if any.""" - job = get_current_media_sync_job() - - if job is None: - return JSONResponse(content={"job": None}, status_code=200) - - return JSONResponse( - content={"job": job.to_dict()}, - status_code=200, - ) - - -@router.get( - "/media/sync/status/{job_id}", - dependencies=[Depends(require_role(["admin"]))], - summary="Get media sync job status", - description="""Get status and results for the specified media sync job id. Returns 200 with - job details including results, or 404 if the job is not found.""", -) -def get_media_sync_status(job_id: str): - """Get the status of a specific media sync job.""" - job = get_media_sync_job_by_id(job_id) - - if job is None: - return JSONResponse( - content={"error": "Job not found"}, - status_code=404, - ) - - return JSONResponse( - content={"job": job.to_dict()}, - status_code=200, - ) - - @router.get("/labels", dependencies=[Depends(allow_any_authenticated())]) def get_labels(camera: str = ""): try: diff --git a/frigate/api/auth.py b/frigate/api/auth.py index 04a5bd19a..e0a6ec924 100644 --- a/frigate/api/auth.py +++ b/frigate/api/auth.py @@ -26,7 +26,7 @@ from frigate.api.defs.request.app_body import ( AppPutRoleBody, ) from frigate.api.defs.tags import Tags -from frigate.config import AuthConfig, NetworkingConfig, ProxyConfig +from frigate.config import AuthConfig, ProxyConfig from frigate.const import CONFIG_DIR, JWT_SECRET_ENV_VAR, PASSWORD_HASH_ALGORITHM from frigate.models import User @@ -41,7 +41,7 @@ def require_admin_by_default(): endpoints require admin access unless explicitly overridden with allow_public(), allow_any_authenticated(), or require_role(). - Internal port always has admin role set by the /auth endpoint, + Port 5000 (internal) always has admin role set by the /auth endpoint, so this check passes automatically for internal requests. Certain paths are exempted from the global admin check because they must @@ -130,7 +130,7 @@ def require_admin_by_default(): pass # For all other paths, require admin role - # Internal port requests have admin role set automatically + # Port 5000 (internal) requests have admin role set automatically role = request.headers.get("remote-role") if role == "admin": return @@ -143,17 +143,6 @@ def require_admin_by_default(): return admin_checker -def _is_authenticated(request: Request) -> bool: - """ - Helper to determine if a request is from an authenticated user. - - Returns True if the request has a valid authenticated user (not anonymous). - Internal port requests are considered anonymous despite having admin role. - """ - username = request.headers.get("remote-user") - return username is not None and username != "anonymous" - - def allow_public(): """ Override dependency to allow unauthenticated access to an endpoint. @@ -182,7 +171,6 @@ def allow_any_authenticated(): Rejects: - Requests with no remote-user header (did not pass through /auth endpoint) - - External port requests with anonymous user (auth disabled, no proxy auth) Example: @router.get("/authenticated-endpoint", dependencies=[Depends(allow_any_authenticated())]) @@ -191,14 +179,8 @@ def allow_any_authenticated(): async def auth_checker(request: Request): # Ensure a remote-user has been set by the /auth endpoint username = request.headers.get("remote-user") - - # Internal port requests have admin role and should be allowed - role = request.headers.get("remote-role") - - if role != "admin": - if username is None or not _is_authenticated(request): - raise HTTPException(status_code=401, detail="Authentication required") - + if username is None: + raise HTTPException(status_code=401, detail="Authentication required") return return auth_checker @@ -588,18 +570,12 @@ def resolve_role( def auth(request: Request): auth_config: AuthConfig = request.app.frigate_config.auth proxy_config: ProxyConfig = request.app.frigate_config.proxy - networking_config: NetworkingConfig = request.app.frigate_config.networking success_response = Response("", status_code=202) - # handle case where internal port is a string with ip:port - internal_port = networking_config.listen.internal - if type(internal_port) is str: - internal_port = int(internal_port.split(":")[-1]) - # dont require auth if the request is on the internal port # this header is set by Frigate's nginx proxy, so it cant be spoofed - if int(request.headers.get("x-server-port", default=0)) == internal_port: + if int(request.headers.get("x-server-port", default=0)) == 5000: success_response.headers["remote-user"] = "anonymous" success_response.headers["remote-role"] = "admin" return success_response diff --git a/frigate/api/chat.py b/frigate/api/chat.py deleted file mode 100644 index 1f5cc2297..000000000 --- a/frigate/api/chat.py +++ /dev/null @@ -1,642 +0,0 @@ -"""Chat and LLM tool calling APIs.""" - -import base64 -import json -import logging -from datetime import datetime, timezone -from typing import Any, Dict, List, Optional - -import cv2 -from fastapi import APIRouter, Body, Depends, Request -from fastapi.responses import JSONResponse -from pydantic import BaseModel - -from frigate.api.auth import ( - allow_any_authenticated, - get_allowed_cameras_for_filter, -) -from frigate.api.defs.query.events_query_parameters import EventsQueryParams -from frigate.api.defs.request.chat_body import ChatCompletionRequest -from frigate.api.defs.response.chat_response import ( - ChatCompletionResponse, - ChatMessageResponse, -) -from frigate.api.defs.tags import Tags -from frigate.api.event import events -from frigate.genai import get_genai_client - -logger = logging.getLogger(__name__) - -router = APIRouter(tags=[Tags.chat]) - - -class ToolExecuteRequest(BaseModel): - """Request model for tool execution.""" - - tool_name: str - arguments: Dict[str, Any] - - -def get_tool_definitions() -> List[Dict[str, Any]]: - """ - Get OpenAI-compatible tool definitions for Frigate. - - Returns a list of tool definitions that can be used with OpenAI-compatible - function calling APIs. - """ - return [ - { - "type": "function", - "function": { - "name": "search_objects", - "description": ( - "Search for detected objects in Frigate by camera, object label, time range, " - "zones, and other filters. Use this to answer questions about when " - "objects were detected, what objects appeared, or to find specific object detections. " - "An 'object' in Frigate represents a tracked detection (e.g., a person, package, car)." - ), - "parameters": { - "type": "object", - "properties": { - "camera": { - "type": "string", - "description": "Camera name to filter by (optional). Use 'all' for all cameras.", - }, - "label": { - "type": "string", - "description": "Object label to filter by (e.g., 'person', 'package', 'car').", - }, - "after": { - "type": "string", - "description": "Start time in ISO 8601 format (e.g., '2024-01-01T00:00:00Z').", - }, - "before": { - "type": "string", - "description": "End time in ISO 8601 format (e.g., '2024-01-01T23:59:59Z').", - }, - "zones": { - "type": "array", - "items": {"type": "string"}, - "description": "List of zone names to filter by.", - }, - "limit": { - "type": "integer", - "description": "Maximum number of objects to return (default: 10).", - "default": 10, - }, - }, - }, - "required": [], - }, - }, - { - "type": "function", - "function": { - "name": "get_live_context", - "description": ( - "Get the current detection information for a camera: objects being tracked, " - "zones, timestamps. Use this to understand what is visible in the live view. " - "Call this when the user has included a live image (via include_live_image) or " - "when answering questions about what is happening right now on a specific camera." - ), - "parameters": { - "type": "object", - "properties": { - "camera": { - "type": "string", - "description": "Camera name to get live context for.", - }, - }, - "required": ["camera"], - }, - }, - }, - ] - - -@router.get( - "/chat/tools", - dependencies=[Depends(allow_any_authenticated())], - summary="Get available tools", - description="Returns OpenAI-compatible tool definitions for function calling.", -) -def get_tools(request: Request) -> JSONResponse: - """Get list of available tools for LLM function calling.""" - tools = get_tool_definitions() - return JSONResponse(content={"tools": tools}) - - -async def _execute_search_objects( - request: Request, - arguments: Dict[str, Any], - allowed_cameras: List[str], -) -> JSONResponse: - """ - Execute the search_objects tool. - - This searches for detected objects (events) in Frigate using the same - logic as the events API endpoint. - """ - # Parse ISO 8601 timestamps to Unix timestamps if provided - after = arguments.get("after") - before = arguments.get("before") - - if after: - try: - after_dt = datetime.fromisoformat(after.replace("Z", "+00:00")) - after = after_dt.timestamp() - except (ValueError, AttributeError): - logger.warning(f"Invalid 'after' timestamp format: {after}") - after = None - - if before: - try: - before_dt = datetime.fromisoformat(before.replace("Z", "+00:00")) - before = before_dt.timestamp() - except (ValueError, AttributeError): - logger.warning(f"Invalid 'before' timestamp format: {before}") - before = None - - # Convert zones array to comma-separated string if provided - zones = arguments.get("zones") - if isinstance(zones, list): - zones = ",".join(zones) - elif zones is None: - zones = "all" - - # Build query parameters compatible with EventsQueryParams - query_params = EventsQueryParams( - camera=arguments.get("camera", "all"), - cameras=arguments.get("camera", "all"), - label=arguments.get("label", "all"), - labels=arguments.get("label", "all"), - zones=zones, - zone=zones, - after=after, - before=before, - limit=arguments.get("limit", 10), - ) - - try: - # Call the events endpoint function directly - # The events function is synchronous and takes params and allowed_cameras - response = events(query_params, allowed_cameras) - - # The response is already a JSONResponse with event data - # Return it as-is for the LLM - return response - except Exception as e: - logger.error(f"Error executing search_objects: {e}", exc_info=True) - return JSONResponse( - content={ - "success": False, - "message": "Error searching objects", - }, - status_code=500, - ) - - -@router.post( - "/chat/execute", - dependencies=[Depends(allow_any_authenticated())], - summary="Execute a tool", - description="Execute a tool function call from an LLM.", -) -async def execute_tool( - request: Request, - body: ToolExecuteRequest = Body(...), - allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter), -) -> JSONResponse: - """ - Execute a tool function call. - - This endpoint receives tool calls from LLMs and executes the corresponding - Frigate operations, returning results in a format the LLM can understand. - """ - tool_name = body.tool_name - arguments = body.arguments - - logger.debug(f"Executing tool: {tool_name} with arguments: {arguments}") - - if tool_name == "search_objects": - return await _execute_search_objects(request, arguments, allowed_cameras) - - return JSONResponse( - content={ - "success": False, - "message": f"Unknown tool: {tool_name}", - "tool": tool_name, - }, - status_code=400, - ) - - -async def _execute_get_live_context( - request: Request, - camera: str, - allowed_cameras: List[str], -) -> Dict[str, Any]: - if camera not in allowed_cameras: - return { - "error": f"Camera '{camera}' not found or access denied", - } - - if camera not in request.app.frigate_config.cameras: - return { - "error": f"Camera '{camera}' not found", - } - - try: - frame_processor = request.app.detected_frames_processor - camera_state = frame_processor.camera_states.get(camera) - - if camera_state is None: - return { - "error": f"Camera '{camera}' state not available", - } - - tracked_objects_dict = {} - with camera_state.current_frame_lock: - tracked_objects = camera_state.tracked_objects.copy() - frame_time = camera_state.current_frame_time - - for obj_id, tracked_obj in tracked_objects.items(): - obj_dict = tracked_obj.to_dict() - if obj_dict.get("frame_time") == frame_time: - tracked_objects_dict[obj_id] = { - "label": obj_dict.get("label"), - "zones": obj_dict.get("current_zones", []), - "sub_label": obj_dict.get("sub_label"), - "stationary": obj_dict.get("stationary", False), - } - - return { - "camera": camera, - "timestamp": frame_time, - "detections": list(tracked_objects_dict.values()), - } - - except Exception as e: - logger.error(f"Error executing get_live_context: {e}", exc_info=True) - return { - "error": "Error getting live context", - } - - -async def _get_live_frame_image_url( - request: Request, - camera: str, - allowed_cameras: List[str], -) -> Optional[str]: - """ - Fetch the current live frame for a camera as a base64 data URL. - - Returns None if the frame cannot be retrieved. Used when include_live_image - is set to attach the image to the first user message. - """ - if ( - camera not in allowed_cameras - or camera not in request.app.frigate_config.cameras - ): - return None - try: - frame_processor = request.app.detected_frames_processor - if camera not in frame_processor.camera_states: - return None - frame = frame_processor.get_current_frame(camera, {}) - if frame is None: - return None - height, width = frame.shape[:2] - max_dimension = 1024 - if height > max_dimension or width > max_dimension: - scale = max_dimension / max(height, width) - frame = cv2.resize( - frame, - (int(width * scale), int(height * scale)), - interpolation=cv2.INTER_AREA, - ) - _, img_encoded = cv2.imencode(".jpg", frame, [cv2.IMWRITE_JPEG_QUALITY, 85]) - b64 = base64.b64encode(img_encoded.tobytes()).decode("utf-8") - return f"data:image/jpeg;base64,{b64}" - except Exception as e: - logger.debug("Failed to get live frame for %s: %s", camera, e) - return None - - -async def _execute_tool_internal( - tool_name: str, - arguments: Dict[str, Any], - request: Request, - allowed_cameras: List[str], -) -> Dict[str, Any]: - """ - Internal helper to execute a tool and return the result as a dict. - - This is used by the chat completion endpoint to execute tools. - """ - if tool_name == "search_objects": - response = await _execute_search_objects(request, arguments, allowed_cameras) - try: - if hasattr(response, "body"): - body_str = response.body.decode("utf-8") - return json.loads(body_str) - elif hasattr(response, "content"): - return response.content - else: - return {} - except (json.JSONDecodeError, AttributeError) as e: - logger.warning(f"Failed to extract tool result: {e}") - return {"error": "Failed to parse tool result"} - elif tool_name == "get_live_context": - camera = arguments.get("camera") - if not camera: - return {"error": "Camera parameter is required"} - return await _execute_get_live_context(request, camera, allowed_cameras) - else: - return {"error": f"Unknown tool: {tool_name}"} - - -@router.post( - "/chat/completion", - response_model=ChatCompletionResponse, - dependencies=[Depends(allow_any_authenticated())], - summary="Chat completion with tool calling", - description=( - "Send a chat message to the configured GenAI provider with tool calling support. " - "The LLM can call Frigate tools to answer questions about your cameras and events." - ), -) -async def chat_completion( - request: Request, - body: ChatCompletionRequest = Body(...), - allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter), -) -> JSONResponse: - """ - Chat completion endpoint with tool calling support. - - This endpoint: - 1. Gets the configured GenAI client - 2. Gets tool definitions - 3. Sends messages + tools to LLM - 4. Handles tool_calls if present - 5. Executes tools and sends results back to LLM - 6. Repeats until final answer - 7. Returns response to user - """ - genai_client = get_genai_client(request.app.frigate_config) - if not genai_client: - return JSONResponse( - content={ - "error": "GenAI is not configured. Please configure a GenAI provider in your Frigate config.", - }, - status_code=400, - ) - - tools = get_tool_definitions() - conversation = [] - - current_datetime = datetime.now(timezone.utc) - current_date_str = current_datetime.strftime("%Y-%m-%d") - current_time_str = current_datetime.strftime("%H:%M:%S %Z") - - cameras_info = [] - config = request.app.frigate_config - for camera_id in allowed_cameras: - if camera_id not in config.cameras: - continue - camera_config = config.cameras[camera_id] - friendly_name = ( - camera_config.friendly_name - if camera_config.friendly_name - else camera_id.replace("_", " ").title() - ) - cameras_info.append(f" - {friendly_name} (ID: {camera_id})") - - cameras_section = "" - if cameras_info: - cameras_section = ( - "\n\nAvailable cameras:\n" - + "\n".join(cameras_info) - + "\n\nWhen users refer to cameras by their friendly name (e.g., 'Back Deck Camera'), use the corresponding camera ID (e.g., 'back_deck_cam') in tool calls." - ) - - live_image_note = "" - if body.include_live_image: - live_image_note = ( - f"\n\nThe first user message includes a live image from camera " - f"'{body.include_live_image}'. Use get_live_context for that camera to get " - "current detection details (objects, zones) to aid in understanding the image." - ) - - system_prompt = f"""You are a helpful assistant for Frigate, a security camera NVR system. You help users answer questions about their cameras, detected objects, and events. - -Current date and time: {current_date_str} at {current_time_str} (UTC) - -When users ask questions about "today", "yesterday", "this week", etc., use the current date above as reference. -When searching for objects or events, use ISO 8601 format for dates (e.g., {current_date_str}T00:00:00Z for the start of today). -Always be accurate with time calculations based on the current date provided.{cameras_section}{live_image_note}""" - - conversation.append( - { - "role": "system", - "content": system_prompt, - } - ) - - first_user_message_seen = False - for msg in body.messages: - msg_dict = { - "role": msg.role, - "content": msg.content, - } - if msg.tool_call_id: - msg_dict["tool_call_id"] = msg.tool_call_id - if msg.name: - msg_dict["name"] = msg.name - - if ( - msg.role == "user" - and not first_user_message_seen - and body.include_live_image - ): - first_user_message_seen = True - image_url = await _get_live_frame_image_url( - request, body.include_live_image, allowed_cameras - ) - if image_url: - msg_dict["content"] = [ - {"type": "text", "text": msg.content}, - {"type": "image_url", "image_url": {"url": image_url}}, - ] - - conversation.append(msg_dict) - - tool_iterations = 0 - max_iterations = body.max_tool_iterations - - logger.debug( - f"Starting chat completion with {len(conversation)} message(s), " - f"{len(tools)} tool(s) available, max_iterations={max_iterations}" - ) - - try: - while tool_iterations < max_iterations: - logger.debug( - f"Calling LLM (iteration {tool_iterations + 1}/{max_iterations}) " - f"with {len(conversation)} message(s) in conversation" - ) - response = genai_client.chat_with_tools( - messages=conversation, - tools=tools if tools else None, - tool_choice="auto", - ) - - if response.get("finish_reason") == "error": - logger.error("GenAI client returned an error") - return JSONResponse( - content={ - "error": "An error occurred while processing your request.", - }, - status_code=500, - ) - - assistant_message = { - "role": "assistant", - "content": response.get("content"), - } - if response.get("tool_calls"): - assistant_message["tool_calls"] = [ - { - "id": tc["id"], - "type": "function", - "function": { - "name": tc["name"], - "arguments": json.dumps(tc["arguments"]), - }, - } - for tc in response["tool_calls"] - ] - conversation.append(assistant_message) - - tool_calls = response.get("tool_calls") - if not tool_calls: - logger.debug( - f"Chat completion finished with final answer (iterations: {tool_iterations})" - ) - return JSONResponse( - content=ChatCompletionResponse( - message=ChatMessageResponse( - role="assistant", - content=response.get("content"), - tool_calls=None, - ), - finish_reason=response.get("finish_reason", "stop"), - tool_iterations=tool_iterations, - ).model_dump(), - ) - - # Execute tools - tool_iterations += 1 - logger.debug( - f"Tool calls detected (iteration {tool_iterations}/{max_iterations}): " - f"{len(tool_calls)} tool(s) to execute" - ) - tool_results = [] - - for tool_call in tool_calls: - tool_name = tool_call["name"] - tool_args = tool_call["arguments"] - tool_call_id = tool_call["id"] - - logger.debug( - f"Executing tool: {tool_name} (id: {tool_call_id}) with arguments: {json.dumps(tool_args, indent=2)}" - ) - - try: - tool_result = await _execute_tool_internal( - tool_name, tool_args, request, allowed_cameras - ) - - if isinstance(tool_result, dict): - result_content = json.dumps(tool_result) - result_summary = tool_result - if isinstance(tool_result, dict) and isinstance( - tool_result.get("content"), list - ): - result_count = len(tool_result.get("content", [])) - result_summary = { - "count": result_count, - "sample": tool_result.get("content", [])[:2] - if result_count > 0 - else [], - } - logger.debug( - f"Tool {tool_name} (id: {tool_call_id}) completed successfully. " - f"Result: {json.dumps(result_summary, indent=2)}" - ) - elif isinstance(tool_result, str): - result_content = tool_result - logger.debug( - f"Tool {tool_name} (id: {tool_call_id}) completed successfully. " - f"Result length: {len(result_content)} characters" - ) - else: - result_content = str(tool_result) - logger.debug( - f"Tool {tool_name} (id: {tool_call_id}) completed successfully. " - f"Result type: {type(tool_result).__name__}" - ) - - tool_results.append( - { - "role": "tool", - "tool_call_id": tool_call_id, - "content": result_content, - } - ) - except Exception as e: - logger.error( - f"Error executing tool {tool_name} (id: {tool_call_id}): {e}", - exc_info=True, - ) - error_content = json.dumps({"error": "Tool execution failed"}) - tool_results.append( - { - "role": "tool", - "tool_call_id": tool_call_id, - "content": error_content, - } - ) - logger.debug( - f"Tool {tool_name} (id: {tool_call_id}) failed. Error result added to conversation." - ) - - conversation.extend(tool_results) - logger.debug( - f"Added {len(tool_results)} tool result(s) to conversation. " - f"Continuing with next LLM call..." - ) - - logger.warning( - f"Max tool iterations ({max_iterations}) reached. Returning partial response." - ) - return JSONResponse( - content=ChatCompletionResponse( - message=ChatMessageResponse( - role="assistant", - content="I reached the maximum number of tool call iterations. Please try rephrasing your question.", - tool_calls=None, - ), - finish_reason="length", - tool_iterations=tool_iterations, - ).model_dump(), - ) - - except Exception as e: - logger.error(f"Error in chat completion: {e}", exc_info=True) - return JSONResponse( - content={ - "error": "An error occurred while processing your request.", - }, - status_code=500, - ) diff --git a/frigate/api/defs/query/media_query_parameters.py b/frigate/api/defs/query/media_query_parameters.py index 7438f2f2f..a16f0d53f 100644 --- a/frigate/api/defs/query/media_query_parameters.py +++ b/frigate/api/defs/query/media_query_parameters.py @@ -1,7 +1,8 @@ from enum import Enum -from typing import Optional +from typing import Optional, Union from pydantic import BaseModel +from pydantic.json_schema import SkipJsonSchema class Extension(str, Enum): @@ -47,3 +48,15 @@ class MediaMjpegFeedQueryParams(BaseModel): mask: Optional[int] = None motion: Optional[int] = None regions: Optional[int] = None + + +class MediaRecordingsSummaryQueryParams(BaseModel): + timezone: str = "utc" + cameras: Optional[str] = "all" + + +class MediaRecordingsAvailabilityQueryParams(BaseModel): + cameras: str = "all" + before: Union[float, SkipJsonSchema[None]] = None + after: Union[float, SkipJsonSchema[None]] = None + scale: int = 30 diff --git a/frigate/api/defs/query/recordings_query_parameters.py b/frigate/api/defs/query/recordings_query_parameters.py deleted file mode 100644 index d4f1b0a7b..000000000 --- a/frigate/api/defs/query/recordings_query_parameters.py +++ /dev/null @@ -1,21 +0,0 @@ -from typing import Optional, Union - -from pydantic import BaseModel -from pydantic.json_schema import SkipJsonSchema - - -class MediaRecordingsSummaryQueryParams(BaseModel): - timezone: str = "utc" - cameras: Optional[str] = "all" - - -class MediaRecordingsAvailabilityQueryParams(BaseModel): - cameras: str = "all" - before: Union[float, SkipJsonSchema[None]] = None - after: Union[float, SkipJsonSchema[None]] = None - scale: int = 30 - - -class RecordingsDeleteQueryParams(BaseModel): - keep: Optional[str] = None - cameras: Optional[str] = "all" diff --git a/frigate/api/defs/request/app_body.py b/frigate/api/defs/request/app_body.py index 6059daf6e..c4129d8da 100644 --- a/frigate/api/defs/request/app_body.py +++ b/frigate/api/defs/request/app_body.py @@ -1,6 +1,6 @@ -from typing import Any, Dict, List, Optional +from typing import Any, Dict, Optional -from pydantic import BaseModel, Field +from pydantic import BaseModel class AppConfigSetBody(BaseModel): @@ -27,16 +27,3 @@ class AppPostLoginBody(BaseModel): class AppPutRoleBody(BaseModel): role: str - - -class MediaSyncBody(BaseModel): - dry_run: bool = Field( - default=True, description="If True, only report orphans without deleting them" - ) - media_types: List[str] = Field( - default=["all"], - description="Types of media to sync: 'all', 'event_snapshots', 'event_thumbnails', 'review_thumbnails', 'previews', 'exports', 'recordings'", - ) - force: bool = Field( - default=False, description="If True, bypass safety threshold checks" - ) diff --git a/frigate/api/defs/request/chat_body.py b/frigate/api/defs/request/chat_body.py deleted file mode 100644 index fa3c3860a..000000000 --- a/frigate/api/defs/request/chat_body.py +++ /dev/null @@ -1,41 +0,0 @@ -"""Chat API request models.""" - -from typing import Optional - -from pydantic import BaseModel, Field - - -class ChatMessage(BaseModel): - """A single message in a chat conversation.""" - - role: str = Field( - description="Message role: 'user', 'assistant', 'system', or 'tool'" - ) - content: str = Field(description="Message content") - tool_call_id: Optional[str] = Field( - default=None, description="For tool messages, the ID of the tool call" - ) - name: Optional[str] = Field( - default=None, description="For tool messages, the tool name" - ) - - -class ChatCompletionRequest(BaseModel): - """Request for chat completion with tool calling.""" - - messages: list[ChatMessage] = Field( - description="List of messages in the conversation" - ) - max_tool_iterations: int = Field( - default=5, - ge=1, - le=10, - description="Maximum number of tool call iterations (default: 5)", - ) - include_live_image: Optional[str] = Field( - default=None, - description=( - "If set, the current live frame from this camera is attached to the first " - "user message as multimodal content. Use with get_live_context for detection info." - ), - ) diff --git a/frigate/api/defs/request/events_body.py b/frigate/api/defs/request/events_body.py index d844c31ca..50754e92a 100644 --- a/frigate/api/defs/request/events_body.py +++ b/frigate/api/defs/request/events_body.py @@ -41,7 +41,6 @@ class EventsCreateBody(BaseModel): duration: Optional[int] = 30 include_recording: Optional[bool] = True draw: Optional[dict] = {} - pre_capture: Optional[int] = None class EventsEndBody(BaseModel): diff --git a/frigate/api/defs/request/export_case_body.py b/frigate/api/defs/request/export_case_body.py deleted file mode 100644 index 35cd8ff7f..000000000 --- a/frigate/api/defs/request/export_case_body.py +++ /dev/null @@ -1,35 +0,0 @@ -from typing import Optional - -from pydantic import BaseModel, Field - - -class ExportCaseCreateBody(BaseModel): - """Request body for creating a new export case.""" - - name: str = Field(max_length=100, description="Friendly name of the export case") - description: Optional[str] = Field( - default=None, description="Optional description of the export case" - ) - - -class ExportCaseUpdateBody(BaseModel): - """Request body for updating an existing export case.""" - - name: Optional[str] = Field( - default=None, - max_length=100, - description="Updated friendly name of the export case", - ) - description: Optional[str] = Field( - default=None, description="Updated description of the export case" - ) - - -class ExportCaseAssignBody(BaseModel): - """Request body for assigning or unassigning an export to a case.""" - - export_case_id: Optional[str] = Field( - default=None, - max_length=30, - description="Case ID to assign to the export, or null to unassign", - ) diff --git a/frigate/api/defs/request/export_recordings_body.py b/frigate/api/defs/request/export_recordings_body.py index 96ecccaa4..19fc2f019 100644 --- a/frigate/api/defs/request/export_recordings_body.py +++ b/frigate/api/defs/request/export_recordings_body.py @@ -3,47 +3,18 @@ from typing import Optional, Union from pydantic import BaseModel, Field from pydantic.json_schema import SkipJsonSchema -from frigate.record.export import PlaybackSourceEnum +from frigate.record.export import ( + PlaybackFactorEnum, + PlaybackSourceEnum, +) class ExportRecordingsBody(BaseModel): + playback: PlaybackFactorEnum = Field( + default=PlaybackFactorEnum.realtime, title="Playback factor" + ) source: PlaybackSourceEnum = Field( default=PlaybackSourceEnum.recordings, title="Playback source" ) name: Optional[str] = Field(title="Friendly name", default=None, max_length=256) image_path: Union[str, SkipJsonSchema[None]] = None - export_case_id: Optional[str] = Field( - default=None, - title="Export case ID", - max_length=30, - description="ID of the export case to assign this export to", - ) - - -class ExportRecordingsCustomBody(BaseModel): - source: PlaybackSourceEnum = Field( - default=PlaybackSourceEnum.recordings, title="Playback source" - ) - name: str = Field(title="Friendly name", default=None, max_length=256) - image_path: Union[str, SkipJsonSchema[None]] = None - export_case_id: Optional[str] = Field( - default=None, - title="Export case ID", - max_length=30, - description="ID of the export case to assign this export to", - ) - ffmpeg_input_args: Optional[str] = Field( - default=None, - title="FFmpeg input arguments", - description="Custom FFmpeg input arguments. If not provided, defaults to timelapse input args.", - ) - ffmpeg_output_args: Optional[str] = Field( - default=None, - title="FFmpeg output arguments", - description="Custom FFmpeg output arguments. If not provided, defaults to timelapse output args.", - ) - cpu_fallback: bool = Field( - default=False, - title="CPU Fallback", - description="If true, retry export without hardware acceleration if the initial export fails.", - ) diff --git a/frigate/api/defs/response/chat_response.py b/frigate/api/defs/response/chat_response.py deleted file mode 100644 index f1cc9194b..000000000 --- a/frigate/api/defs/response/chat_response.py +++ /dev/null @@ -1,37 +0,0 @@ -"""Chat API response models.""" - -from typing import Any, Optional - -from pydantic import BaseModel, Field - - -class ToolCall(BaseModel): - """A tool call from the LLM.""" - - id: str = Field(description="Unique identifier for this tool call") - name: str = Field(description="Tool name to call") - arguments: dict[str, Any] = Field(description="Arguments for the tool call") - - -class ChatMessageResponse(BaseModel): - """A message in the chat response.""" - - role: str = Field(description="Message role") - content: Optional[str] = Field( - default=None, description="Message content (None if tool calls present)" - ) - tool_calls: Optional[list[ToolCall]] = Field( - default=None, description="Tool calls if LLM wants to call tools" - ) - - -class ChatCompletionResponse(BaseModel): - """Response from chat completion.""" - - message: ChatMessageResponse = Field(description="The assistant's message") - finish_reason: str = Field( - description="Reason generation stopped: 'stop', 'tool_calls', 'length', 'error'" - ) - tool_iterations: int = Field( - default=0, description="Number of tool call iterations performed" - ) diff --git a/frigate/api/defs/response/export_case_response.py b/frigate/api/defs/response/export_case_response.py deleted file mode 100644 index 713e16683..000000000 --- a/frigate/api/defs/response/export_case_response.py +++ /dev/null @@ -1,22 +0,0 @@ -from typing import List, Optional - -from pydantic import BaseModel, Field - - -class ExportCaseModel(BaseModel): - """Model representing a single export case.""" - - id: str = Field(description="Unique identifier for the export case") - name: str = Field(description="Friendly name of the export case") - description: Optional[str] = Field( - default=None, description="Optional description of the export case" - ) - created_at: float = Field( - description="Unix timestamp when the export case was created" - ) - updated_at: float = Field( - description="Unix timestamp when the export case was last updated" - ) - - -ExportCasesResponse = List[ExportCaseModel] diff --git a/frigate/api/defs/response/export_response.py b/frigate/api/defs/response/export_response.py index 600794f97..63a9e91a1 100644 --- a/frigate/api/defs/response/export_response.py +++ b/frigate/api/defs/response/export_response.py @@ -15,9 +15,6 @@ class ExportModel(BaseModel): in_progress: bool = Field( description="Whether the export is currently being processed" ) - export_case_id: Optional[str] = Field( - default=None, description="ID of the export case this export belongs to" - ) class StartExportResponse(BaseModel): diff --git a/frigate/api/defs/tags.py b/frigate/api/defs/tags.py index 3aaaa59ef..f804385d1 100644 --- a/frigate/api/defs/tags.py +++ b/frigate/api/defs/tags.py @@ -3,15 +3,13 @@ from enum import Enum class Tags(Enum): app = "App" - auth = "Auth" camera = "Camera" - chat = "Chat" - events = "Events" - export = "Export" - classification = "Classification" + preview = "Preview" logs = "Logs" media = "Media" notifications = "Notifications" - preview = "Preview" - recordings = "Recordings" review = "Review" + export = "Export" + events = "Events" + classification = "Classification" + auth = "Auth" diff --git a/frigate/api/event.py b/frigate/api/event.py index b0a749018..c03cfb431 100644 --- a/frigate/api/event.py +++ b/frigate/api/event.py @@ -1782,7 +1782,6 @@ def create_event( body.duration, "api", body.draw, - body.pre_capture, ), EventMetadataTypeEnum.manual_event_create.value, ) diff --git a/frigate/api/export.py b/frigate/api/export.py index 23f975618..24fed93b0 100644 --- a/frigate/api/export.py +++ b/frigate/api/export.py @@ -4,10 +4,10 @@ import logging import random import string from pathlib import Path -from typing import List, Optional +from typing import List import psutil -from fastapi import APIRouter, Depends, Query, Request +from fastapi import APIRouter, Depends, Request from fastapi.responses import JSONResponse from pathvalidate import sanitize_filepath from peewee import DoesNotExist @@ -19,20 +19,8 @@ from frigate.api.auth import ( require_camera_access, require_role, ) -from frigate.api.defs.request.export_case_body import ( - ExportCaseAssignBody, - ExportCaseCreateBody, - ExportCaseUpdateBody, -) -from frigate.api.defs.request.export_recordings_body import ( - ExportRecordingsBody, - ExportRecordingsCustomBody, -) +from frigate.api.defs.request.export_recordings_body import ExportRecordingsBody from frigate.api.defs.request.export_rename_body import ExportRenameBody -from frigate.api.defs.response.export_case_response import ( - ExportCaseModel, - ExportCasesResponse, -) from frigate.api.defs.response.export_response import ( ExportModel, ExportsResponse, @@ -41,9 +29,9 @@ from frigate.api.defs.response.export_response import ( from frigate.api.defs.response.generic_response import GenericResponse from frigate.api.defs.tags import Tags from frigate.const import CLIPS_DIR, EXPORT_DIR -from frigate.models import Export, ExportCase, Previews, Recordings +from frigate.models import Export, Previews, Recordings from frigate.record.export import ( - DEFAULT_TIME_LAPSE_FFMPEG_ARGS, + PlaybackFactorEnum, PlaybackSourceEnum, RecordingExporter, ) @@ -64,182 +52,17 @@ router = APIRouter(tags=[Tags.export]) ) def get_exports( allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter), - export_case_id: Optional[str] = None, - cameras: Optional[str] = Query(default="all"), - start_date: Optional[float] = None, - end_date: Optional[float] = None, ): - query = Export.select().where(Export.camera << allowed_cameras) - - if export_case_id is not None: - if export_case_id == "unassigned": - query = query.where(Export.export_case.is_null(True)) - else: - query = query.where(Export.export_case == export_case_id) - - if cameras and cameras != "all": - requested = set(cameras.split(",")) - filtered_cameras = list(requested.intersection(allowed_cameras)) - if not filtered_cameras: - return JSONResponse(content=[]) - query = query.where(Export.camera << filtered_cameras) - - if start_date is not None: - query = query.where(Export.date >= start_date) - - if end_date is not None: - query = query.where(Export.date <= end_date) - - exports = query.order_by(Export.date.desc()).dicts().iterator() + exports = ( + Export.select() + .where(Export.camera << allowed_cameras) + .order_by(Export.date.desc()) + .dicts() + .iterator() + ) return JSONResponse(content=[e for e in exports]) -@router.get( - "/cases", - response_model=ExportCasesResponse, - dependencies=[Depends(allow_any_authenticated())], - summary="Get export cases", - description="Gets all export cases from the database.", -) -def get_export_cases(): - cases = ( - ExportCase.select().order_by(ExportCase.created_at.desc()).dicts().iterator() - ) - return JSONResponse(content=[c for c in cases]) - - -@router.post( - "/cases", - response_model=ExportCaseModel, - dependencies=[Depends(require_role(["admin"]))], - summary="Create export case", - description="Creates a new export case.", -) -def create_export_case(body: ExportCaseCreateBody): - case = ExportCase.create( - id="".join(random.choices(string.ascii_lowercase + string.digits, k=12)), - name=body.name, - description=body.description, - created_at=Path().stat().st_mtime, - updated_at=Path().stat().st_mtime, - ) - return JSONResponse(content=model_to_dict(case)) - - -@router.get( - "/cases/{case_id}", - response_model=ExportCaseModel, - dependencies=[Depends(allow_any_authenticated())], - summary="Get a single export case", - description="Gets a specific export case by ID.", -) -def get_export_case(case_id: str): - try: - case = ExportCase.get(ExportCase.id == case_id) - return JSONResponse(content=model_to_dict(case)) - except DoesNotExist: - return JSONResponse( - content={"success": False, "message": "Export case not found"}, - status_code=404, - ) - - -@router.patch( - "/cases/{case_id}", - response_model=GenericResponse, - dependencies=[Depends(require_role(["admin"]))], - summary="Update export case", - description="Updates an existing export case.", -) -def update_export_case(case_id: str, body: ExportCaseUpdateBody): - try: - case = ExportCase.get(ExportCase.id == case_id) - except DoesNotExist: - return JSONResponse( - content={"success": False, "message": "Export case not found"}, - status_code=404, - ) - - if body.name is not None: - case.name = body.name - if body.description is not None: - case.description = body.description - - case.save() - - return JSONResponse( - content={"success": True, "message": "Successfully updated export case."} - ) - - -@router.delete( - "/cases/{case_id}", - response_model=GenericResponse, - dependencies=[Depends(require_role(["admin"]))], - summary="Delete export case", - description="""Deletes an export case.\n Exports that reference this case will have their export_case set to null.\n """, -) -def delete_export_case(case_id: str): - try: - case = ExportCase.get(ExportCase.id == case_id) - except DoesNotExist: - return JSONResponse( - content={"success": False, "message": "Export case not found"}, - status_code=404, - ) - - # Unassign exports from this case but keep the exports themselves - Export.update(export_case=None).where(Export.export_case == case).execute() - - case.delete_instance() - - return JSONResponse( - content={"success": True, "message": "Successfully deleted export case."} - ) - - -@router.patch( - "/export/{export_id}/case", - response_model=GenericResponse, - dependencies=[Depends(require_role(["admin"]))], - summary="Assign export to case", - description=( - "Assigns an export to a case, or unassigns it if export_case_id is null." - ), -) -async def assign_export_case( - export_id: str, - body: ExportCaseAssignBody, - request: Request, -): - try: - export: Export = Export.get(Export.id == export_id) - await require_camera_access(export.camera, request=request) - except DoesNotExist: - return JSONResponse( - content={"success": False, "message": "Export not found."}, - status_code=404, - ) - - if body.export_case_id is not None: - try: - ExportCase.get(ExportCase.id == body.export_case_id) - except DoesNotExist: - return JSONResponse( - content={"success": False, "message": "Export case not found."}, - status_code=404, - ) - export.export_case = body.export_case_id - else: - export.export_case = None - - export.save() - - return JSONResponse( - content={"success": True, "message": "Successfully updated export case."} - ) - - @router.post( "/export/{camera_name}/start/{start_time}/end/{end_time}", response_model=StartExportResponse, @@ -265,20 +88,11 @@ def export_recording( status_code=404, ) + playback_factor = body.playback playback_source = body.source friendly_name = body.name existing_image = sanitize_filepath(body.image_path) if body.image_path else None - export_case_id = body.export_case_id - if export_case_id is not None: - try: - ExportCase.get(ExportCase.id == export_case_id) - except DoesNotExist: - return JSONResponse( - content={"success": False, "message": "Export case not found"}, - status_code=404, - ) - # Ensure that existing_image is a valid path if existing_image and not existing_image.startswith(CLIPS_DIR): return JSONResponse( @@ -337,12 +151,16 @@ def export_recording( existing_image, int(start_time), int(end_time), + ( + PlaybackFactorEnum[playback_factor] + if playback_factor in PlaybackFactorEnum.__members__.values() + else PlaybackFactorEnum.realtime + ), ( PlaybackSourceEnum[playback_source] if playback_source in PlaybackSourceEnum.__members__.values() else PlaybackSourceEnum.recordings ), - export_case_id, ) exporter.start() return JSONResponse( @@ -453,138 +271,6 @@ async def export_delete(event_id: str, request: Request): ) -@router.post( - "/export/custom/{camera_name}/start/{start_time}/end/{end_time}", - response_model=StartExportResponse, - dependencies=[Depends(require_camera_access)], - summary="Start custom recording export", - description="""Starts an export of a recording for the specified time range using custom FFmpeg arguments. - The export can be from recordings or preview footage. Returns the export ID if - successful, or an error message if the camera is invalid or no recordings/previews - are found for the time range. If ffmpeg_input_args and ffmpeg_output_args are not provided, - defaults to timelapse export settings.""", -) -def export_recording_custom( - request: Request, - camera_name: str, - start_time: float, - end_time: float, - body: ExportRecordingsCustomBody, -): - if not camera_name or not request.app.frigate_config.cameras.get(camera_name): - return JSONResponse( - content=( - {"success": False, "message": f"{camera_name} is not a valid camera."} - ), - status_code=404, - ) - - playback_source = body.source - friendly_name = body.name - existing_image = sanitize_filepath(body.image_path) if body.image_path else None - ffmpeg_input_args = body.ffmpeg_input_args - ffmpeg_output_args = body.ffmpeg_output_args - cpu_fallback = body.cpu_fallback - - export_case_id = body.export_case_id - if export_case_id is not None: - try: - ExportCase.get(ExportCase.id == export_case_id) - except DoesNotExist: - return JSONResponse( - content={"success": False, "message": "Export case not found"}, - status_code=404, - ) - - # Ensure that existing_image is a valid path - if existing_image and not existing_image.startswith(CLIPS_DIR): - return JSONResponse( - content=({"success": False, "message": "Invalid image path"}), - status_code=400, - ) - - if playback_source == "recordings": - recordings_count = ( - Recordings.select() - .where( - Recordings.start_time.between(start_time, end_time) - | Recordings.end_time.between(start_time, end_time) - | ( - (start_time > Recordings.start_time) - & (end_time < Recordings.end_time) - ) - ) - .where(Recordings.camera == camera_name) - .count() - ) - - if recordings_count <= 0: - return JSONResponse( - content=( - {"success": False, "message": "No recordings found for time range"} - ), - status_code=400, - ) - else: - previews_count = ( - Previews.select() - .where( - Previews.start_time.between(start_time, end_time) - | Previews.end_time.between(start_time, end_time) - | ((start_time > Previews.start_time) & (end_time < Previews.end_time)) - ) - .where(Previews.camera == camera_name) - .count() - ) - - if not is_current_hour(start_time) and previews_count <= 0: - return JSONResponse( - content=( - {"success": False, "message": "No previews found for time range"} - ), - status_code=400, - ) - - export_id = f"{camera_name}_{''.join(random.choices(string.ascii_lowercase + string.digits, k=6))}" - - # Set default values if not provided (timelapse defaults) - if ffmpeg_input_args is None: - ffmpeg_input_args = "" - - if ffmpeg_output_args is None: - ffmpeg_output_args = DEFAULT_TIME_LAPSE_FFMPEG_ARGS - - exporter = RecordingExporter( - request.app.frigate_config, - export_id, - camera_name, - friendly_name, - existing_image, - int(start_time), - int(end_time), - ( - PlaybackSourceEnum[playback_source] - if playback_source in PlaybackSourceEnum.__members__.values() - else PlaybackSourceEnum.recordings - ), - export_case_id, - ffmpeg_input_args, - ffmpeg_output_args, - cpu_fallback, - ) - exporter.start() - return JSONResponse( - content=( - { - "success": True, - "message": "Starting export of recording.", - "export_id": export_id, - } - ), - status_code=200, - ) - - @router.get( "/exports/{export_id}", response_model=ExportModel, diff --git a/frigate/api/fastapi_app.py b/frigate/api/fastapi_app.py index 496c8fada..48c97dfaf 100644 --- a/frigate/api/fastapi_app.py +++ b/frigate/api/fastapi_app.py @@ -16,14 +16,12 @@ from frigate.api import app as main_app from frigate.api import ( auth, camera, - chat, classification, event, export, media, notification, preview, - record, review, ) from frigate.api.auth import get_jwt_secret, limiter, require_admin_by_default @@ -122,7 +120,6 @@ def create_fastapi_app( # Order of include_router matters: https://fastapi.tiangolo.com/tutorial/path-params/#order-matters app.include_router(auth.router) app.include_router(camera.router) - app.include_router(chat.router) app.include_router(classification.router) app.include_router(review.router) app.include_router(main_app.router) @@ -131,7 +128,6 @@ def create_fastapi_app( app.include_router(export.router) app.include_router(event.router) app.include_router(media.router) - app.include_router(record.router) # App Properties app.frigate_config = frigate_config app.embeddings = embeddings diff --git a/frigate/api/media.py b/frigate/api/media.py index 3cfd97674..971bfef83 100644 --- a/frigate/api/media.py +++ b/frigate/api/media.py @@ -8,8 +8,9 @@ import os import subprocess as sp import time from datetime import datetime, timedelta, timezone +from functools import reduce from pathlib import Path as FilePath -from typing import Any +from typing import Any, List from urllib.parse import unquote import cv2 @@ -18,11 +19,12 @@ import pytz from fastapi import APIRouter, Depends, Path, Query, Request, Response from fastapi.responses import FileResponse, JSONResponse, StreamingResponse from pathvalidate import sanitize_filename -from peewee import DoesNotExist, fn +from peewee import DoesNotExist, fn, operator from tzlocal import get_localzone_name from frigate.api.auth import ( allow_any_authenticated, + get_allowed_cameras_for_filter, require_camera_access, ) from frigate.api.defs.query.media_query_parameters import ( @@ -30,6 +32,8 @@ from frigate.api.defs.query.media_query_parameters import ( MediaEventsSnapshotQueryParams, MediaLatestFrameQueryParams, MediaMjpegFeedQueryParams, + MediaRecordingsAvailabilityQueryParams, + MediaRecordingsSummaryQueryParams, ) from frigate.api.defs.tags import Tags from frigate.camera.state import CameraState @@ -40,12 +44,13 @@ from frigate.const import ( INSTALL_DIR, MAX_SEGMENT_DURATION, PREVIEW_FRAME_TYPE, + RECORD_DIR, ) from frigate.models import Event, Previews, Recordings, Regions, ReviewSegment -from frigate.output.preview import get_most_recent_preview_frame from frigate.track.object_processing import TrackedObjectProcessor from frigate.util.file import get_event_thumbnail_bytes from frigate.util.image import get_image_from_recording +from frigate.util.time import get_dst_transitions logger = logging.getLogger(__name__) @@ -126,9 +131,7 @@ async def camera_ptz_info(request: Request, camera_name: str): @router.get( - "/{camera_name}/latest.{extension}", - dependencies=[Depends(require_camera_access)], - description="Returns the latest frame from the specified camera in the requested format (jpg, png, webp). Falls back to preview frames if the camera is offline.", + "/{camera_name}/latest.{extension}", dependencies=[Depends(require_camera_access)] ) async def latest_frame( request: Request, @@ -162,37 +165,20 @@ async def latest_frame( or 10 ) - is_offline = False if frame is None or datetime.now().timestamp() > ( frame_processor.get_current_frame_time(camera_name) + retry_interval ): - last_frame_time = frame_processor.get_current_frame_time(camera_name) - preview_path = get_most_recent_preview_frame( - camera_name, before=last_frame_time - ) - - if preview_path: - logger.debug(f"Using most recent preview frame for {camera_name}") - frame = cv2.imread(preview_path, cv2.IMREAD_UNCHANGED) - - if frame is not None: - is_offline = True - - if frame is None or not is_offline: - logger.debug( - f"No live or preview frame available for {camera_name}. Using error image." + if request.app.camera_error_image is None: + error_image = glob.glob( + os.path.join(INSTALL_DIR, "frigate/images/camera-error.jpg") ) - if request.app.camera_error_image is None: - error_image = glob.glob( - os.path.join(INSTALL_DIR, "frigate/images/camera-error.jpg") + + if len(error_image) > 0: + request.app.camera_error_image = cv2.imread( + error_image[0], cv2.IMREAD_UNCHANGED ) - if len(error_image) > 0: - request.app.camera_error_image = cv2.imread( - error_image[0], cv2.IMREAD_UNCHANGED - ) - - frame = request.app.camera_error_image + frame = request.app.camera_error_image height = int(params.height or str(frame.shape[0])) width = int(height * frame.shape[1] / frame.shape[0]) @@ -214,18 +200,14 @@ async def latest_frame( frame = cv2.resize(frame, dsize=(width, height), interpolation=cv2.INTER_AREA) _, img = cv2.imencode(f".{extension.value}", frame, quality_params) - - headers = { - "Cache-Control": "no-store" if not params.store else "private, max-age=60", - } - - if is_offline: - headers["X-Frigate-Offline"] = "true" - return Response( content=img.tobytes(), media_type=extension.get_mime_type(), - headers=headers, + headers={ + "Cache-Control": "no-store" + if not params.store + else "private, max-age=60", + }, ) elif ( camera_name == "birdseye" @@ -415,6 +397,333 @@ async def submit_recording_snapshot_to_plus( ) +@router.get("/recordings/storage", dependencies=[Depends(allow_any_authenticated())]) +def get_recordings_storage_usage(request: Request): + recording_stats = request.app.stats_emitter.get_latest_stats()["service"][ + "storage" + ][RECORD_DIR] + + if not recording_stats: + return JSONResponse({}) + + total_mb = recording_stats["total"] + + camera_usages: dict[str, dict] = ( + request.app.storage_maintainer.calculate_camera_usages() + ) + + for camera_name in camera_usages.keys(): + if camera_usages.get(camera_name, {}).get("usage"): + camera_usages[camera_name]["usage_percent"] = ( + camera_usages.get(camera_name, {}).get("usage", 0) / total_mb + ) * 100 + + return JSONResponse(content=camera_usages) + + +@router.get("/recordings/summary", dependencies=[Depends(allow_any_authenticated())]) +def all_recordings_summary( + request: Request, + params: MediaRecordingsSummaryQueryParams = Depends(), + allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter), +): + """Returns true/false by day indicating if recordings exist""" + + cameras = params.cameras + if cameras != "all": + requested = set(unquote(cameras).split(",")) + filtered = requested.intersection(allowed_cameras) + if not filtered: + return JSONResponse(content={}) + camera_list = list(filtered) + else: + camera_list = allowed_cameras + + time_range_query = ( + Recordings.select( + fn.MIN(Recordings.start_time).alias("min_time"), + fn.MAX(Recordings.start_time).alias("max_time"), + ) + .where(Recordings.camera << camera_list) + .dicts() + .get() + ) + + min_time = time_range_query.get("min_time") + max_time = time_range_query.get("max_time") + + if min_time is None or max_time is None: + return JSONResponse(content={}) + + dst_periods = get_dst_transitions(params.timezone, min_time, max_time) + + days: dict[str, bool] = {} + + for period_start, period_end, period_offset in dst_periods: + hours_offset = int(period_offset / 60 / 60) + minutes_offset = int(period_offset / 60 - hours_offset * 60) + period_hour_modifier = f"{hours_offset} hour" + period_minute_modifier = f"{minutes_offset} minute" + + period_query = ( + Recordings.select( + fn.strftime( + "%Y-%m-%d", + fn.datetime( + Recordings.start_time, + "unixepoch", + period_hour_modifier, + period_minute_modifier, + ), + ).alias("day") + ) + .where( + (Recordings.camera << camera_list) + & (Recordings.end_time >= period_start) + & (Recordings.start_time <= period_end) + ) + .group_by( + fn.strftime( + "%Y-%m-%d", + fn.datetime( + Recordings.start_time, + "unixepoch", + period_hour_modifier, + period_minute_modifier, + ), + ) + ) + .order_by(Recordings.start_time.desc()) + .namedtuples() + ) + + for g in period_query: + days[g.day] = True + + return JSONResponse(content=dict(sorted(days.items()))) + + +@router.get( + "/{camera_name}/recordings/summary", dependencies=[Depends(require_camera_access)] +) +async def recordings_summary(camera_name: str, timezone: str = "utc"): + """Returns hourly summary for recordings of given camera""" + + time_range_query = ( + Recordings.select( + fn.MIN(Recordings.start_time).alias("min_time"), + fn.MAX(Recordings.start_time).alias("max_time"), + ) + .where(Recordings.camera == camera_name) + .dicts() + .get() + ) + + min_time = time_range_query.get("min_time") + max_time = time_range_query.get("max_time") + + days: dict[str, dict] = {} + + if min_time is None or max_time is None: + return JSONResponse(content=list(days.values())) + + dst_periods = get_dst_transitions(timezone, min_time, max_time) + + for period_start, period_end, period_offset in dst_periods: + hours_offset = int(period_offset / 60 / 60) + minutes_offset = int(period_offset / 60 - hours_offset * 60) + period_hour_modifier = f"{hours_offset} hour" + period_minute_modifier = f"{minutes_offset} minute" + + recording_groups = ( + Recordings.select( + fn.strftime( + "%Y-%m-%d %H", + fn.datetime( + Recordings.start_time, + "unixepoch", + period_hour_modifier, + period_minute_modifier, + ), + ).alias("hour"), + fn.SUM(Recordings.duration).alias("duration"), + fn.SUM(Recordings.motion).alias("motion"), + fn.SUM(Recordings.objects).alias("objects"), + ) + .where( + (Recordings.camera == camera_name) + & (Recordings.end_time >= period_start) + & (Recordings.start_time <= period_end) + ) + .group_by((Recordings.start_time + period_offset).cast("int") / 3600) + .order_by(Recordings.start_time.desc()) + .namedtuples() + ) + + event_groups = ( + Event.select( + fn.strftime( + "%Y-%m-%d %H", + fn.datetime( + Event.start_time, + "unixepoch", + period_hour_modifier, + period_minute_modifier, + ), + ).alias("hour"), + fn.COUNT(Event.id).alias("count"), + ) + .where(Event.camera == camera_name, Event.has_clip) + .where( + (Event.start_time >= period_start) & (Event.start_time <= period_end) + ) + .group_by((Event.start_time + period_offset).cast("int") / 3600) + .namedtuples() + ) + + event_map = {g.hour: g.count for g in event_groups} + + for recording_group in recording_groups: + parts = recording_group.hour.split() + hour = parts[1] + day = parts[0] + events_count = event_map.get(recording_group.hour, 0) + hour_data = { + "hour": hour, + "events": events_count, + "motion": recording_group.motion, + "objects": recording_group.objects, + "duration": round(recording_group.duration), + } + if day in days: + # merge counts if already present (edge-case at DST boundary) + days[day]["events"] += events_count or 0 + days[day]["hours"].append(hour_data) + else: + days[day] = { + "events": events_count or 0, + "hours": [hour_data], + "day": day, + } + + return JSONResponse(content=list(days.values())) + + +@router.get("/{camera_name}/recordings", dependencies=[Depends(require_camera_access)]) +async def recordings( + camera_name: str, + after: float = (datetime.now() - timedelta(hours=1)).timestamp(), + before: float = datetime.now().timestamp(), +): + """Return specific camera recordings between the given 'after'/'end' times. If not provided the last hour will be used""" + recordings = ( + Recordings.select( + Recordings.id, + Recordings.start_time, + Recordings.end_time, + Recordings.segment_size, + Recordings.motion, + Recordings.objects, + Recordings.duration, + ) + .where( + Recordings.camera == camera_name, + Recordings.end_time >= after, + Recordings.start_time <= before, + ) + .order_by(Recordings.start_time) + .dicts() + .iterator() + ) + + return JSONResponse(content=list(recordings)) + + +@router.get( + "/recordings/unavailable", + response_model=list[dict], + dependencies=[Depends(allow_any_authenticated())], +) +async def no_recordings( + request: Request, + params: MediaRecordingsAvailabilityQueryParams = Depends(), + allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter), +): + """Get time ranges with no recordings.""" + cameras = params.cameras + if cameras != "all": + requested = set(unquote(cameras).split(",")) + filtered = requested.intersection(allowed_cameras) + if not filtered: + return JSONResponse(content=[]) + cameras = ",".join(filtered) + else: + cameras = allowed_cameras + + before = params.before or datetime.datetime.now().timestamp() + after = ( + params.after + or (datetime.datetime.now() - datetime.timedelta(hours=1)).timestamp() + ) + scale = params.scale + + clauses = [(Recordings.end_time >= after) & (Recordings.start_time <= before)] + if cameras != "all": + camera_list = cameras.split(",") + clauses.append((Recordings.camera << camera_list)) + else: + camera_list = allowed_cameras + + # Get recording start times + data: list[Recordings] = ( + Recordings.select(Recordings.start_time, Recordings.end_time) + .where(reduce(operator.and_, clauses)) + .order_by(Recordings.start_time.asc()) + .dicts() + .iterator() + ) + + # Convert recordings to list of (start, end) tuples + recordings = [(r["start_time"], r["end_time"]) for r in data] + + # Iterate through time segments and check if each has any recording + no_recording_segments = [] + current = after + current_gap_start = None + + while current < before: + segment_end = min(current + scale, before) + + # Check if this segment overlaps with any recording + has_recording = any( + rec_start < segment_end and rec_end > current + for rec_start, rec_end in recordings + ) + + if not has_recording: + # This segment has no recordings + if current_gap_start is None: + current_gap_start = current # Start a new gap + else: + # This segment has recordings + if current_gap_start is not None: + # End the current gap and append it + no_recording_segments.append( + {"start_time": int(current_gap_start), "end_time": int(current)} + ) + current_gap_start = None + + current = segment_end + + # Append the last gap if it exists + if current_gap_start is not None: + no_recording_segments.append( + {"start_time": int(current_gap_start), "end_time": int(before)} + ) + + return JSONResponse(content=no_recording_segments) + + @router.get( "/{camera_name}/start/{start_ts}/end/{end_ts}/clip.mp4", dependencies=[Depends(require_camera_access)], @@ -737,7 +1046,6 @@ async def event_snapshot( ): event_complete = False jpg_bytes = None - frame_time = 0 try: event = Event.get(Event.id == event_id, Event.end_time != None) event_complete = True @@ -762,7 +1070,7 @@ async def event_snapshot( if event_id in camera_state.tracked_objects: tracked_obj = camera_state.tracked_objects.get(event_id) if tracked_obj is not None: - jpg_bytes, frame_time = tracked_obj.get_img_bytes( + jpg_bytes = tracked_obj.get_img_bytes( ext="jpg", timestamp=params.timestamp, bounding_box=params.bbox, @@ -791,7 +1099,6 @@ async def event_snapshot( headers = { "Content-Type": "image/jpeg", "Cache-Control": "private, max-age=31536000" if event_complete else "no-store", - "X-Frame-Time": str(frame_time), } if params.download: diff --git a/frigate/api/record.py b/frigate/api/record.py deleted file mode 100644 index 789aa4a80..000000000 --- a/frigate/api/record.py +++ /dev/null @@ -1,479 +0,0 @@ -"""Recording APIs.""" - -import logging -from datetime import datetime, timedelta -from functools import reduce -from pathlib import Path -from typing import List -from urllib.parse import unquote - -from fastapi import APIRouter, Depends, Request -from fastapi import Path as PathParam -from fastapi.responses import JSONResponse -from peewee import fn, operator - -from frigate.api.auth import ( - allow_any_authenticated, - get_allowed_cameras_for_filter, - require_camera_access, - require_role, -) -from frigate.api.defs.query.recordings_query_parameters import ( - MediaRecordingsAvailabilityQueryParams, - MediaRecordingsSummaryQueryParams, - RecordingsDeleteQueryParams, -) -from frigate.api.defs.response.generic_response import GenericResponse -from frigate.api.defs.tags import Tags -from frigate.const import RECORD_DIR -from frigate.models import Event, Recordings -from frigate.util.time import get_dst_transitions - -logger = logging.getLogger(__name__) - -router = APIRouter(tags=[Tags.recordings]) - - -@router.get("/recordings/storage", dependencies=[Depends(allow_any_authenticated())]) -def get_recordings_storage_usage(request: Request): - recording_stats = request.app.stats_emitter.get_latest_stats()["service"][ - "storage" - ][RECORD_DIR] - - if not recording_stats: - return JSONResponse({}) - - total_mb = recording_stats["total"] - - camera_usages: dict[str, dict] = ( - request.app.storage_maintainer.calculate_camera_usages() - ) - - for camera_name in camera_usages.keys(): - if camera_usages.get(camera_name, {}).get("usage"): - camera_usages[camera_name]["usage_percent"] = ( - camera_usages.get(camera_name, {}).get("usage", 0) / total_mb - ) * 100 - - return JSONResponse(content=camera_usages) - - -@router.get("/recordings/summary", dependencies=[Depends(allow_any_authenticated())]) -def all_recordings_summary( - request: Request, - params: MediaRecordingsSummaryQueryParams = Depends(), - allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter), -): - """Returns true/false by day indicating if recordings exist""" - - cameras = params.cameras - if cameras != "all": - requested = set(unquote(cameras).split(",")) - filtered = requested.intersection(allowed_cameras) - if not filtered: - return JSONResponse(content={}) - camera_list = list(filtered) - else: - camera_list = allowed_cameras - - time_range_query = ( - Recordings.select( - fn.MIN(Recordings.start_time).alias("min_time"), - fn.MAX(Recordings.start_time).alias("max_time"), - ) - .where(Recordings.camera << camera_list) - .dicts() - .get() - ) - - min_time = time_range_query.get("min_time") - max_time = time_range_query.get("max_time") - - if min_time is None or max_time is None: - return JSONResponse(content={}) - - dst_periods = get_dst_transitions(params.timezone, min_time, max_time) - - days: dict[str, bool] = {} - - for period_start, period_end, period_offset in dst_periods: - hours_offset = int(period_offset / 60 / 60) - minutes_offset = int(period_offset / 60 - hours_offset * 60) - period_hour_modifier = f"{hours_offset} hour" - period_minute_modifier = f"{minutes_offset} minute" - - period_query = ( - Recordings.select( - fn.strftime( - "%Y-%m-%d", - fn.datetime( - Recordings.start_time, - "unixepoch", - period_hour_modifier, - period_minute_modifier, - ), - ).alias("day") - ) - .where( - (Recordings.camera << camera_list) - & (Recordings.end_time >= period_start) - & (Recordings.start_time <= period_end) - ) - .group_by( - fn.strftime( - "%Y-%m-%d", - fn.datetime( - Recordings.start_time, - "unixepoch", - period_hour_modifier, - period_minute_modifier, - ), - ) - ) - .order_by(Recordings.start_time.desc()) - .namedtuples() - ) - - for g in period_query: - days[g.day] = True - - return JSONResponse(content=dict(sorted(days.items()))) - - -@router.get( - "/{camera_name}/recordings/summary", dependencies=[Depends(require_camera_access)] -) -async def recordings_summary(camera_name: str, timezone: str = "utc"): - """Returns hourly summary for recordings of given camera""" - - time_range_query = ( - Recordings.select( - fn.MIN(Recordings.start_time).alias("min_time"), - fn.MAX(Recordings.start_time).alias("max_time"), - ) - .where(Recordings.camera == camera_name) - .dicts() - .get() - ) - - min_time = time_range_query.get("min_time") - max_time = time_range_query.get("max_time") - - days: dict[str, dict] = {} - - if min_time is None or max_time is None: - return JSONResponse(content=list(days.values())) - - dst_periods = get_dst_transitions(timezone, min_time, max_time) - - for period_start, period_end, period_offset in dst_periods: - hours_offset = int(period_offset / 60 / 60) - minutes_offset = int(period_offset / 60 - hours_offset * 60) - period_hour_modifier = f"{hours_offset} hour" - period_minute_modifier = f"{minutes_offset} minute" - - recording_groups = ( - Recordings.select( - fn.strftime( - "%Y-%m-%d %H", - fn.datetime( - Recordings.start_time, - "unixepoch", - period_hour_modifier, - period_minute_modifier, - ), - ).alias("hour"), - fn.SUM(Recordings.duration).alias("duration"), - fn.SUM(Recordings.motion).alias("motion"), - fn.SUM(Recordings.objects).alias("objects"), - ) - .where( - (Recordings.camera == camera_name) - & (Recordings.end_time >= period_start) - & (Recordings.start_time <= period_end) - ) - .group_by((Recordings.start_time + period_offset).cast("int") / 3600) - .order_by(Recordings.start_time.desc()) - .namedtuples() - ) - - event_groups = ( - Event.select( - fn.strftime( - "%Y-%m-%d %H", - fn.datetime( - Event.start_time, - "unixepoch", - period_hour_modifier, - period_minute_modifier, - ), - ).alias("hour"), - fn.COUNT(Event.id).alias("count"), - ) - .where(Event.camera == camera_name, Event.has_clip) - .where( - (Event.start_time >= period_start) & (Event.start_time <= period_end) - ) - .group_by((Event.start_time + period_offset).cast("int") / 3600) - .namedtuples() - ) - - event_map = {g.hour: g.count for g in event_groups} - - for recording_group in recording_groups: - parts = recording_group.hour.split() - hour = parts[1] - day = parts[0] - events_count = event_map.get(recording_group.hour, 0) - hour_data = { - "hour": hour, - "events": events_count, - "motion": recording_group.motion, - "objects": recording_group.objects, - "duration": round(recording_group.duration), - } - if day in days: - # merge counts if already present (edge-case at DST boundary) - days[day]["events"] += events_count or 0 - days[day]["hours"].append(hour_data) - else: - days[day] = { - "events": events_count or 0, - "hours": [hour_data], - "day": day, - } - - return JSONResponse(content=list(days.values())) - - -@router.get("/{camera_name}/recordings", dependencies=[Depends(require_camera_access)]) -async def recordings( - camera_name: str, - after: float = (datetime.now() - timedelta(hours=1)).timestamp(), - before: float = datetime.now().timestamp(), -): - """Return specific camera recordings between the given 'after'/'end' times. If not provided the last hour will be used""" - recordings = ( - Recordings.select( - Recordings.id, - Recordings.start_time, - Recordings.end_time, - Recordings.segment_size, - Recordings.motion, - Recordings.objects, - Recordings.duration, - ) - .where( - Recordings.camera == camera_name, - Recordings.end_time >= after, - Recordings.start_time <= before, - ) - .order_by(Recordings.start_time) - .dicts() - .iterator() - ) - - return JSONResponse(content=list(recordings)) - - -@router.get( - "/recordings/unavailable", - response_model=list[dict], - dependencies=[Depends(allow_any_authenticated())], -) -async def no_recordings( - request: Request, - params: MediaRecordingsAvailabilityQueryParams = Depends(), - allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter), -): - """Get time ranges with no recordings.""" - cameras = params.cameras - if cameras != "all": - requested = set(unquote(cameras).split(",")) - filtered = requested.intersection(allowed_cameras) - if not filtered: - return JSONResponse(content=[]) - cameras = ",".join(filtered) - else: - cameras = allowed_cameras - - before = params.before or datetime.datetime.now().timestamp() - after = ( - params.after - or (datetime.datetime.now() - datetime.timedelta(hours=1)).timestamp() - ) - scale = params.scale - - clauses = [(Recordings.end_time >= after) & (Recordings.start_time <= before)] - if cameras != "all": - camera_list = cameras.split(",") - clauses.append((Recordings.camera << camera_list)) - else: - camera_list = allowed_cameras - - # Get recording start times - data: list[Recordings] = ( - Recordings.select(Recordings.start_time, Recordings.end_time) - .where(reduce(operator.and_, clauses)) - .order_by(Recordings.start_time.asc()) - .dicts() - .iterator() - ) - - # Convert recordings to list of (start, end) tuples - recordings = [(r["start_time"], r["end_time"]) for r in data] - - # Iterate through time segments and check if each has any recording - no_recording_segments = [] - current = after - current_gap_start = None - - while current < before: - segment_end = min(current + scale, before) - - # Check if this segment overlaps with any recording - has_recording = any( - rec_start < segment_end and rec_end > current - for rec_start, rec_end in recordings - ) - - if not has_recording: - # This segment has no recordings - if current_gap_start is None: - current_gap_start = current # Start a new gap - else: - # This segment has recordings - if current_gap_start is not None: - # End the current gap and append it - no_recording_segments.append( - {"start_time": int(current_gap_start), "end_time": int(current)} - ) - current_gap_start = None - - current = segment_end - - # Append the last gap if it exists - if current_gap_start is not None: - no_recording_segments.append( - {"start_time": int(current_gap_start), "end_time": int(before)} - ) - - return JSONResponse(content=no_recording_segments) - - -@router.delete( - "/recordings/start/{start}/end/{end}", - response_model=GenericResponse, - dependencies=[Depends(require_role(["admin"]))], - summary="Delete recordings", - description="""Deletes recordings within the specified time range. - Recordings can be filtered by cameras and kept based on motion, objects, or audio attributes. - """, -) -async def delete_recordings( - start: float = PathParam(..., description="Start timestamp (unix)"), - end: float = PathParam(..., description="End timestamp (unix)"), - params: RecordingsDeleteQueryParams = Depends(), - allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter), -): - """Delete recordings in the specified time range.""" - if start >= end: - return JSONResponse( - content={ - "success": False, - "message": "Start time must be less than end time.", - }, - status_code=400, - ) - - cameras = params.cameras - - if cameras != "all": - requested = set(cameras.split(",")) - filtered = requested.intersection(allowed_cameras) - - if not filtered: - return JSONResponse( - content={ - "success": False, - "message": "No valid cameras found in the request.", - }, - status_code=400, - ) - - camera_list = list(filtered) - else: - camera_list = allowed_cameras - - # Parse keep parameter - keep_set = set() - - if params.keep: - keep_set = set(params.keep.split(",")) - - # Build query to find overlapping recordings - clauses = [ - ( - Recordings.start_time.between(start, end) - | Recordings.end_time.between(start, end) - | ((start > Recordings.start_time) & (end < Recordings.end_time)) - ), - (Recordings.camera << camera_list), - ] - - keep_clauses = [] - - if "motion" in keep_set: - keep_clauses.append(Recordings.motion.is_null(False) & (Recordings.motion > 0)) - - if "object" in keep_set: - keep_clauses.append( - Recordings.objects.is_null(False) & (Recordings.objects > 0) - ) - - if "audio" in keep_set: - keep_clauses.append(Recordings.dBFS.is_null(False)) - - if keep_clauses: - keep_condition = reduce(operator.or_, keep_clauses) - clauses.append(~keep_condition) - - recordings_to_delete = ( - Recordings.select(Recordings.id, Recordings.path) - .where(reduce(operator.and_, clauses)) - .dicts() - .iterator() - ) - - recording_ids = [] - deleted_count = 0 - error_count = 0 - - for recording in recordings_to_delete: - recording_ids.append(recording["id"]) - - try: - Path(recording["path"]).unlink(missing_ok=True) - deleted_count += 1 - except Exception as e: - logger.error(f"Failed to delete recording file {recording['path']}: {e}") - error_count += 1 - - if recording_ids: - max_deletes = 100000 - recording_ids_list = list(recording_ids) - - for i in range(0, len(recording_ids_list), max_deletes): - Recordings.delete().where( - Recordings.id << recording_ids_list[i : i + max_deletes] - ).execute() - - message = f"Successfully deleted {deleted_count} recording(s)." - - if error_count > 0: - message += f" {error_count} file deletion error(s) occurred." - - return JSONResponse( - content={"success": True, "message": message}, - status_code=200, - ) diff --git a/frigate/camera/__init__.py b/frigate/camera/__init__.py index 0461c98cb..77b1fd424 100644 --- a/frigate/camera/__init__.py +++ b/frigate/camera/__init__.py @@ -19,8 +19,6 @@ class CameraMetrics: process_pid: Synchronized capture_process_pid: Synchronized ffmpeg_pid: Synchronized - reconnects_last_hour: Synchronized - stalls_last_hour: Synchronized def __init__(self, manager: SyncManager): self.camera_fps = manager.Value("d", 0) @@ -37,8 +35,6 @@ class CameraMetrics: self.process_pid = manager.Value("i", 0) self.capture_process_pid = manager.Value("i", 0) self.ffmpeg_pid = manager.Value("i", 0) - self.reconnects_last_hour = manager.Value("i", 0) - self.stalls_last_hour = manager.Value("i", 0) class PTZMetrics: diff --git a/frigate/comms/dispatcher.py b/frigate/comms/dispatcher.py index 68749b102..6e45ac175 100644 --- a/frigate/comms/dispatcher.py +++ b/frigate/comms/dispatcher.py @@ -28,7 +28,6 @@ from frigate.const import ( UPDATE_CAMERA_ACTIVITY, UPDATE_EMBEDDINGS_REINDEX_PROGRESS, UPDATE_EVENT_DESCRIPTION, - UPDATE_JOB_STATE, UPDATE_MODEL_STATE, UPDATE_REVIEW_DESCRIPTION, UPSERT_REVIEW_SEGMENT, @@ -61,7 +60,6 @@ class Dispatcher: self.camera_activity = CameraActivityManager(config, self.publish) self.audio_activity = AudioActivityManager(config, self.publish) self.model_state: dict[str, ModelStatusTypesEnum] = {} - self.job_state: dict[str, dict[str, Any]] = {} # {job_type: job_data} self.embeddings_reindex: dict[str, Any] = {} self.birdseye_layout: dict[str, Any] = {} self.audio_transcription_state: str = "idle" @@ -182,19 +180,6 @@ class Dispatcher: def handle_model_state() -> None: self.publish("model_state", json.dumps(self.model_state.copy())) - def handle_update_job_state() -> None: - if payload and isinstance(payload, dict): - job_type = payload.get("job_type") - if job_type: - self.job_state[job_type] = payload - self.publish( - "job_state", - json.dumps(self.job_state), - ) - - def handle_job_state() -> None: - self.publish("job_state", json.dumps(self.job_state.copy())) - def handle_update_audio_transcription_state() -> None: if payload: self.audio_transcription_state = payload @@ -292,7 +277,6 @@ class Dispatcher: UPDATE_EVENT_DESCRIPTION: handle_update_event_description, UPDATE_REVIEW_DESCRIPTION: handle_update_review_description, UPDATE_MODEL_STATE: handle_update_model_state, - UPDATE_JOB_STATE: handle_update_job_state, UPDATE_EMBEDDINGS_REINDEX_PROGRESS: handle_update_embeddings_reindex_progress, UPDATE_BIRDSEYE_LAYOUT: handle_update_birdseye_layout, UPDATE_AUDIO_TRANSCRIPTION_STATE: handle_update_audio_transcription_state, @@ -300,7 +284,6 @@ class Dispatcher: "restart": handle_restart, "embeddingsReindexProgress": handle_embeddings_reindex_progress, "modelState": handle_model_state, - "jobState": handle_job_state, "audioTranscriptionState": handle_audio_transcription_state, "birdseyeLayout": handle_birdseye_layout, "onConnect": handle_on_connect, diff --git a/frigate/config/__init__.py b/frigate/config/__init__.py index 88f7b79f9..c6ff535b0 100644 --- a/frigate/config/__init__.py +++ b/frigate/config/__init__.py @@ -8,7 +8,6 @@ from .config import * # noqa: F403 from .database import * # noqa: F403 from .logger import * # noqa: F403 from .mqtt import * # noqa: F403 -from .network import * # noqa: F403 from .proxy import * # noqa: F403 from .telemetry import * # noqa: F403 from .tls import * # noqa: F403 diff --git a/frigate/config/camera/genai.py b/frigate/config/camera/genai.py index 3dd596c3b..a4d9199af 100644 --- a/frigate/config/camera/genai.py +++ b/frigate/config/camera/genai.py @@ -14,7 +14,6 @@ class GenAIProviderEnum(str, Enum): azure_openai = "azure_openai" gemini = "gemini" ollama = "ollama" - llamacpp = "llamacpp" class GenAIConfig(FrigateBaseModel): diff --git a/frigate/config/camera/record.py b/frigate/config/camera/record.py index fe24cf522..09a7a84d5 100644 --- a/frigate/config/camera/record.py +++ b/frigate/config/camera/record.py @@ -1,5 +1,5 @@ from enum import Enum -from typing import Optional, Union +from typing import Optional from pydantic import Field @@ -19,6 +19,8 @@ __all__ = [ "RetainModeEnum", ] +DEFAULT_TIME_LAPSE_FFMPEG_ARGS = "-vf setpts=0.04*PTS -r 30" + class RecordRetainConfig(FrigateBaseModel): days: float = Field(default=0, ge=0, title="Default retention period.") @@ -65,13 +67,16 @@ class RecordPreviewConfig(FrigateBaseModel): class RecordExportConfig(FrigateBaseModel): - hwaccel_args: Union[str, list[str]] = Field( - default="auto", title="Export-specific FFmpeg hardware acceleration arguments." + timelapse_args: str = Field( + default=DEFAULT_TIME_LAPSE_FFMPEG_ARGS, title="Timelapse Args" ) class RecordConfig(FrigateBaseModel): enabled: bool = Field(default=False, title="Enable record on all cameras.") + sync_recordings: bool = Field( + default=False, title="Sync recordings with disk on startup and once a day." + ) expire_interval: int = Field( default=60, title="Number of minutes to wait between cleanup runs.", diff --git a/frigate/config/config.py b/frigate/config/config.py index 370c89458..a26d4c50e 100644 --- a/frigate/config/config.py +++ b/frigate/config/config.py @@ -525,14 +525,6 @@ class FrigateConfig(FrigateBaseModel): if camera_config.ffmpeg.hwaccel_args == "auto": camera_config.ffmpeg.hwaccel_args = self.ffmpeg.hwaccel_args - # Resolve export hwaccel_args: camera export -> camera ffmpeg -> global ffmpeg - # This allows per-camera override for exports (e.g., when camera resolution - # exceeds hardware encoder limits) - if camera_config.record.export.hwaccel_args == "auto": - camera_config.record.export.hwaccel_args = ( - camera_config.ffmpeg.hwaccel_args - ) - for input in camera_config.ffmpeg.inputs: need_detect_dimensions = "detect" in input.roles and ( camera_config.detect.height is None diff --git a/frigate/config/network.py b/frigate/config/network.py index ab4e5b83e..c8b3cfd1c 100644 --- a/frigate/config/network.py +++ b/frigate/config/network.py @@ -1,27 +1,13 @@ -from typing import Union - from pydantic import Field from .base import FrigateBaseModel -__all__ = ["IPv6Config", "ListenConfig", "NetworkingConfig"] +__all__ = ["IPv6Config", "NetworkingConfig"] class IPv6Config(FrigateBaseModel): enabled: bool = Field(default=False, title="Enable IPv6 for port 5000 and/or 8971") -class ListenConfig(FrigateBaseModel): - internal: Union[int, str] = Field( - default=5000, title="Internal listening port for Frigate" - ) - external: Union[int, str] = Field( - default=8971, title="External listening port for Frigate" - ) - - class NetworkingConfig(FrigateBaseModel): - ipv6: IPv6Config = Field(default_factory=IPv6Config, title="IPv6 configuration") - listen: ListenConfig = Field( - default_factory=ListenConfig, title="Listening ports configuration" - ) + ipv6: IPv6Config = Field(default_factory=IPv6Config, title="Network configuration") diff --git a/frigate/const.py b/frigate/const.py index 87fdb8e70..41c24f087 100644 --- a/frigate/const.py +++ b/frigate/const.py @@ -14,6 +14,7 @@ RECORD_DIR = f"{BASE_DIR}/recordings" TRIGGER_DIR = f"{CLIPS_DIR}/triggers" BIRDSEYE_PIPE = "/tmp/cache/birdseye" CACHE_DIR = "/tmp/cache" +FRIGATE_LOCALHOST = "http://127.0.0.1:5000" PLUS_ENV_VAR = "PLUS_API_KEY" PLUS_API_HOST = "https://api.frigate.video" @@ -121,7 +122,6 @@ UPDATE_REVIEW_DESCRIPTION = "update_review_description" UPDATE_MODEL_STATE = "update_model_state" UPDATE_EMBEDDINGS_REINDEX_PROGRESS = "handle_embeddings_reindex_progress" UPDATE_BIRDSEYE_LAYOUT = "update_birdseye_layout" -UPDATE_JOB_STATE = "update_job_state" NOTIFICATION_TEST = "notification_test" # IO Nice Values diff --git a/frigate/detectors/detection_runners.py b/frigate/detectors/detection_runners.py index da7df9d36..fcbb41e66 100644 --- a/frigate/detectors/detection_runners.py +++ b/frigate/detectors/detection_runners.py @@ -131,8 +131,10 @@ class ONNXModelRunner(BaseModelRunner): return model_type in [ EnrichmentModelTypeEnum.paddleocr.value, + EnrichmentModelTypeEnum.yolov9_license_plate.value, + EnrichmentModelTypeEnum.jina_v1.value, EnrichmentModelTypeEnum.jina_v2.value, - EnrichmentModelTypeEnum.arcface.value, + EnrichmentModelTypeEnum.facenet.value, ModelTypeEnum.rfdetr.value, ModelTypeEnum.dfine.value, ] diff --git a/frigate/genai/__init__.py b/frigate/genai/__init__.py index 0ae664b9f..be1f6d1e7 100644 --- a/frigate/genai/__init__.py +++ b/frigate/genai/__init__.py @@ -69,7 +69,7 @@ class GenAIClient: return "\n- (No objects detected)" context_prompt = f""" -Your task is to analyze a sequence of images taken in chronological order from a security camera. +Your task is to analyze the sequence of images ({len(thumbnails)} total) taken in chronological order from the perspective of the {review_data["camera"]} security camera. ## Normal Activity Patterns for This Property @@ -108,8 +108,7 @@ Your response MUST be a flat JSON object with: ## Sequence Details -- Camera: {review_data["camera"]} -- Total frames: {len(thumbnails)} (Frame 1 = earliest, Frame {len(thumbnails)} = latest) +- Frame 1 = earliest, Frame {len(thumbnails)} = latest - Activity started at {review_data["start"]} and lasted {review_data["duration"]} seconds - Zones involved: {", ".join(review_data["zones"]) if review_data["zones"] else "None"} @@ -293,64 +292,6 @@ Guidelines: """Get the context window size for this provider in tokens.""" return 4096 - def chat_with_tools( - self, - messages: list[dict[str, Any]], - tools: Optional[list[dict[str, Any]]] = None, - tool_choice: Optional[str] = "auto", - ) -> dict[str, Any]: - """ - Send chat messages to LLM with optional tool definitions. - - This method handles conversation-style interactions with the LLM, - including function calling/tool usage capabilities. - - Args: - messages: List of message dictionaries. Each message should have: - - 'role': str - One of 'user', 'assistant', 'system', or 'tool' - - 'content': str - The message content - - 'tool_call_id': Optional[str] - For tool responses, the ID of the tool call - - 'name': Optional[str] - For tool messages, the tool name - tools: Optional list of tool definitions in OpenAI-compatible format. - Each tool should have 'type': 'function' and 'function' with: - - 'name': str - Tool name - - 'description': str - Tool description - - 'parameters': dict - JSON schema for parameters - tool_choice: How the model should handle tools: - - 'auto': Model decides whether to call tools - - 'none': Model must not call tools - - 'required': Model must call at least one tool - - Or a dict specifying a specific tool to call - **kwargs: Additional provider-specific parameters. - - Returns: - Dictionary with: - - 'content': Optional[str] - The text response from the LLM, None if tool calls - - 'tool_calls': Optional[List[Dict]] - List of tool calls if LLM wants to call tools. - Each tool call dict has: - - 'id': str - Unique identifier for this tool call - - 'name': str - Tool name to call - - 'arguments': dict - Arguments for the tool call (parsed JSON) - - 'finish_reason': str - Reason generation stopped: - - 'stop': Normal completion - - 'tool_calls': LLM wants to call tools - - 'length': Hit token limit - - 'error': An error occurred - - Raises: - NotImplementedError: If the provider doesn't implement this method. - """ - # Base implementation - each provider should override this - logger.warning( - f"{self.__class__.__name__} does not support chat_with_tools. " - "This method should be overridden by the provider implementation." - ) - return { - "content": None, - "tool_calls": None, - "finish_reason": "error", - } - def get_genai_client(config: FrigateConfig) -> Optional[GenAIClient]: """Get the GenAI client.""" diff --git a/frigate/genai/azure-openai.py b/frigate/genai/azure-openai.py index 21ed5d856..eb08f7786 100644 --- a/frigate/genai/azure-openai.py +++ b/frigate/genai/azure-openai.py @@ -1,9 +1,8 @@ """Azure OpenAI Provider for Frigate AI.""" import base64 -import json import logging -from typing import Any, Optional +from typing import Optional from urllib.parse import parse_qs, urlparse from openai import AzureOpenAI @@ -77,93 +76,3 @@ class OpenAIClient(GenAIClient): def get_context_size(self) -> int: """Get the context window size for Azure OpenAI.""" return 128000 - - def chat_with_tools( - self, - messages: list[dict[str, Any]], - tools: Optional[list[dict[str, Any]]] = None, - tool_choice: Optional[str] = "auto", - ) -> dict[str, Any]: - try: - openai_tool_choice = None - if tool_choice: - if tool_choice == "none": - openai_tool_choice = "none" - elif tool_choice == "auto": - openai_tool_choice = "auto" - elif tool_choice == "required": - openai_tool_choice = "required" - - request_params = { - "model": self.genai_config.model, - "messages": messages, - "timeout": self.timeout, - } - - if tools: - request_params["tools"] = tools - if openai_tool_choice is not None: - request_params["tool_choice"] = openai_tool_choice - - result = self.provider.chat.completions.create(**request_params) - - if ( - result is None - or not hasattr(result, "choices") - or len(result.choices) == 0 - ): - return { - "content": None, - "tool_calls": None, - "finish_reason": "error", - } - - choice = result.choices[0] - message = choice.message - - content = message.content.strip() if message.content else None - - tool_calls = None - if message.tool_calls: - tool_calls = [] - for tool_call in message.tool_calls: - try: - arguments = json.loads(tool_call.function.arguments) - except (json.JSONDecodeError, AttributeError) as e: - logger.warning( - f"Failed to parse tool call arguments: {e}, " - f"tool: {tool_call.function.name if hasattr(tool_call.function, 'name') else 'unknown'}" - ) - arguments = {} - - tool_calls.append( - { - "id": tool_call.id if hasattr(tool_call, "id") else "", - "name": tool_call.function.name - if hasattr(tool_call.function, "name") - else "", - "arguments": arguments, - } - ) - - finish_reason = "error" - if hasattr(choice, "finish_reason") and choice.finish_reason: - finish_reason = choice.finish_reason - elif tool_calls: - finish_reason = "tool_calls" - elif content: - finish_reason = "stop" - - return { - "content": content, - "tool_calls": tool_calls, - "finish_reason": finish_reason, - } - - except Exception as e: - logger.warning("Azure OpenAI returned an error: %s", str(e)) - return { - "content": None, - "tool_calls": None, - "finish_reason": "error", - } diff --git a/frigate/genai/gemini.py b/frigate/genai/gemini.py index fd273faec..b700c33a4 100644 --- a/frigate/genai/gemini.py +++ b/frigate/genai/gemini.py @@ -1,7 +1,7 @@ """Gemini Provider for Frigate AI.""" import logging -from typing import Any, Optional +from typing import Optional from google import genai from google.genai import errors, types @@ -76,200 +76,3 @@ class GeminiClient(GenAIClient): """Get the context window size for Gemini.""" # Gemini Pro Vision has a 1M token context window return 1000000 - - def chat_with_tools( - self, - messages: list[dict[str, Any]], - tools: Optional[list[dict[str, Any]]] = None, - tool_choice: Optional[str] = "auto", - ) -> dict[str, Any]: - """ - Send chat messages to Gemini with optional tool definitions. - - Implements function calling/tool usage for Gemini models. - """ - try: - # Convert messages to Gemini format - gemini_messages = [] - for msg in messages: - role = msg.get("role", "user") - content = msg.get("content", "") - - # Map roles to Gemini format - if role == "system": - # Gemini doesn't have system role, prepend to first user message - if gemini_messages and gemini_messages[0].role == "user": - gemini_messages[0].parts[ - 0 - ].text = f"{content}\n\n{gemini_messages[0].parts[0].text}" - else: - gemini_messages.append( - types.Content( - role="user", parts=[types.Part.from_text(text=content)] - ) - ) - elif role == "assistant": - gemini_messages.append( - types.Content( - role="model", parts=[types.Part.from_text(text=content)] - ) - ) - elif role == "tool": - # Handle tool response - function_response = { - "name": msg.get("name", ""), - "response": content, - } - gemini_messages.append( - types.Content( - role="function", - parts=[ - types.Part.from_function_response(function_response) - ], - ) - ) - else: # user - gemini_messages.append( - types.Content( - role="user", parts=[types.Part.from_text(text=content)] - ) - ) - - # Convert tools to Gemini format - gemini_tools = None - if tools: - gemini_tools = [] - for tool in tools: - if tool.get("type") == "function": - func = tool.get("function", {}) - gemini_tools.append( - types.Tool( - function_declarations=[ - types.FunctionDeclaration( - name=func.get("name", ""), - description=func.get("description", ""), - parameters=func.get("parameters", {}), - ) - ] - ) - ) - - # Configure tool choice - tool_config = None - if tool_choice: - if tool_choice == "none": - tool_config = types.ToolConfig( - function_calling_config=types.FunctionCallingConfig(mode="NONE") - ) - elif tool_choice == "auto": - tool_config = types.ToolConfig( - function_calling_config=types.FunctionCallingConfig(mode="AUTO") - ) - elif tool_choice == "required": - tool_config = types.ToolConfig( - function_calling_config=types.FunctionCallingConfig(mode="ANY") - ) - - # Build request config - config_params = {"candidate_count": 1} - - if gemini_tools: - config_params["tools"] = gemini_tools - - if tool_config: - config_params["tool_config"] = tool_config - - # Merge runtime_options - if isinstance(self.genai_config.runtime_options, dict): - config_params.update(self.genai_config.runtime_options) - - response = self.provider.models.generate_content( - model=self.genai_config.model, - contents=gemini_messages, - config=types.GenerateContentConfig(**config_params), - ) - - # Check if response is valid - if not response or not response.candidates: - return { - "content": None, - "tool_calls": None, - "finish_reason": "error", - } - - candidate = response.candidates[0] - content = None - tool_calls = None - - # Extract content and tool calls from response - if candidate.content and candidate.content.parts: - for part in candidate.content.parts: - if part.text: - content = part.text.strip() - elif part.function_call: - # Handle function call - if tool_calls is None: - tool_calls = [] - - try: - arguments = ( - dict(part.function_call.args) - if part.function_call.args - else {} - ) - except Exception: - arguments = {} - - tool_calls.append( - { - "id": part.function_call.name or "", - "name": part.function_call.name or "", - "arguments": arguments, - } - ) - - # Determine finish reason - finish_reason = "error" - if hasattr(candidate, "finish_reason") and candidate.finish_reason: - from google.genai.types import FinishReason - - if candidate.finish_reason == FinishReason.STOP: - finish_reason = "stop" - elif candidate.finish_reason == FinishReason.MAX_TOKENS: - finish_reason = "length" - elif candidate.finish_reason in [ - FinishReason.SAFETY, - FinishReason.RECITATION, - ]: - finish_reason = "error" - elif tool_calls: - finish_reason = "tool_calls" - elif content: - finish_reason = "stop" - elif tool_calls: - finish_reason = "tool_calls" - elif content: - finish_reason = "stop" - - return { - "content": content, - "tool_calls": tool_calls, - "finish_reason": finish_reason, - } - - except errors.APIError as e: - logger.warning("Gemini API error during chat_with_tools: %s", str(e)) - return { - "content": None, - "tool_calls": None, - "finish_reason": "error", - } - except Exception as e: - logger.warning( - "Gemini returned an error during chat_with_tools: %s", str(e) - ) - return { - "content": None, - "tool_calls": None, - "finish_reason": "error", - } diff --git a/frigate/genai/llama_cpp.py b/frigate/genai/llama_cpp.py deleted file mode 100644 index fafef74ae..000000000 --- a/frigate/genai/llama_cpp.py +++ /dev/null @@ -1,238 +0,0 @@ -"""llama.cpp Provider for Frigate AI.""" - -import base64 -import json -import logging -from typing import Any, Optional - -import requests - -from frigate.config import GenAIProviderEnum -from frigate.genai import GenAIClient, register_genai_provider - -logger = logging.getLogger(__name__) - - -@register_genai_provider(GenAIProviderEnum.llamacpp) -class LlamaCppClient(GenAIClient): - """Generative AI client for Frigate using llama.cpp server.""" - - LOCAL_OPTIMIZED_OPTIONS = { - "temperature": 0.7, - "repeat_penalty": 1.05, - "top_p": 0.8, - } - - provider: str # base_url - provider_options: dict[str, Any] - - def _init_provider(self): - """Initialize the client.""" - self.provider_options = { - **self.LOCAL_OPTIMIZED_OPTIONS, - **self.genai_config.provider_options, - } - return ( - self.genai_config.base_url.rstrip("/") - if self.genai_config.base_url - else None - ) - - def _send(self, prompt: str, images: list[bytes]) -> Optional[str]: - """Submit a request to llama.cpp server.""" - if self.provider is None: - logger.warning( - "llama.cpp provider has not been initialized, a description will not be generated. Check your llama.cpp configuration." - ) - return None - - try: - content = [] - for image in images: - encoded_image = base64.b64encode(image).decode("utf-8") - content.append( - { - "type": "image_url", - "image_url": { - "url": f"data:image/jpeg;base64,{encoded_image}", - }, - } - ) - content.append( - { - "type": "text", - "text": prompt, - } - ) - - # Build request payload with llama.cpp native options - payload = { - "messages": [ - { - "role": "user", - "content": content, - }, - ], - **self.provider_options, - } - - response = requests.post( - f"{self.provider}/v1/chat/completions", - json=payload, - timeout=self.timeout, - ) - response.raise_for_status() - result = response.json() - - if ( - result is not None - and "choices" in result - and len(result["choices"]) > 0 - ): - choice = result["choices"][0] - if "message" in choice and "content" in choice["message"]: - return choice["message"]["content"].strip() - return None - except Exception as e: - logger.warning("llama.cpp returned an error: %s", str(e)) - return None - - def get_context_size(self) -> int: - """Get the context window size for llama.cpp.""" - return self.genai_config.provider_options.get("context_size", 4096) - - def chat_with_tools( - self, - messages: list[dict[str, Any]], - tools: Optional[list[dict[str, Any]]] = None, - tool_choice: Optional[str] = "auto", - ) -> dict[str, Any]: - """ - Send chat messages to llama.cpp server with optional tool definitions. - - Uses the OpenAI-compatible endpoint but passes through all native llama.cpp - parameters (like slot_id, temperature, etc.) via provider_options. - """ - if self.provider is None: - logger.warning( - "llama.cpp provider has not been initialized. Check your llama.cpp configuration." - ) - return { - "content": None, - "tool_calls": None, - "finish_reason": "error", - } - - try: - openai_tool_choice = None - if tool_choice: - if tool_choice == "none": - openai_tool_choice = "none" - elif tool_choice == "auto": - openai_tool_choice = "auto" - elif tool_choice == "required": - openai_tool_choice = "required" - - payload = { - "messages": messages, - } - - if tools: - payload["tools"] = tools - if openai_tool_choice is not None: - payload["tool_choice"] = openai_tool_choice - - provider_opts = { - k: v for k, v in self.provider_options.items() if k != "context_size" - } - payload.update(provider_opts) - - response = requests.post( - f"{self.provider}/v1/chat/completions", - json=payload, - timeout=self.timeout, - ) - response.raise_for_status() - result = response.json() - - if result is None or "choices" not in result or len(result["choices"]) == 0: - return { - "content": None, - "tool_calls": None, - "finish_reason": "error", - } - - choice = result["choices"][0] - message = choice.get("message", {}) - - content = message.get("content") - if content: - content = content.strip() - else: - content = None - - tool_calls = None - if "tool_calls" in message and message["tool_calls"]: - tool_calls = [] - for tool_call in message["tool_calls"]: - try: - function_data = tool_call.get("function", {}) - arguments_str = function_data.get("arguments", "{}") - arguments = json.loads(arguments_str) - except (json.JSONDecodeError, KeyError, TypeError) as e: - logger.warning( - f"Failed to parse tool call arguments: {e}, " - f"tool: {function_data.get('name', 'unknown')}" - ) - arguments = {} - - tool_calls.append( - { - "id": tool_call.get("id", ""), - "name": function_data.get("name", ""), - "arguments": arguments, - } - ) - - finish_reason = "error" - if "finish_reason" in choice and choice["finish_reason"]: - finish_reason = choice["finish_reason"] - elif tool_calls: - finish_reason = "tool_calls" - elif content: - finish_reason = "stop" - - return { - "content": content, - "tool_calls": tool_calls, - "finish_reason": finish_reason, - } - - except requests.exceptions.Timeout as e: - logger.warning("llama.cpp request timed out: %s", str(e)) - return { - "content": None, - "tool_calls": None, - "finish_reason": "error", - } - except requests.exceptions.RequestException as e: - error_detail = str(e) - if hasattr(e, "response") and e.response is not None: - try: - error_body = e.response.text - error_detail = f"{str(e)} - Response: {error_body[:500]}" - except Exception: - pass - logger.warning("llama.cpp returned an error: %s", error_detail) - return { - "content": None, - "tool_calls": None, - "finish_reason": "error", - } - except Exception as e: - logger.warning("Unexpected error in llama.cpp chat_with_tools: %s", str(e)) - return { - "content": None, - "tool_calls": None, - "finish_reason": "error", - } diff --git a/frigate/genai/ollama.py b/frigate/genai/ollama.py index 6e9a4f5d5..ab6d3c0b3 100644 --- a/frigate/genai/ollama.py +++ b/frigate/genai/ollama.py @@ -1,6 +1,5 @@ """Ollama Provider for Frigate AI.""" -import json import logging from typing import Any, Optional @@ -87,120 +86,3 @@ class OllamaClient(GenAIClient): return self.genai_config.provider_options.get("options", {}).get( "num_ctx", 4096 ) - - def chat_with_tools( - self, - messages: list[dict[str, Any]], - tools: Optional[list[dict[str, Any]]] = None, - tool_choice: Optional[str] = "auto", - ) -> dict[str, Any]: - if self.provider is None: - logger.warning( - "Ollama provider has not been initialized. Check your Ollama configuration." - ) - return { - "content": None, - "tool_calls": None, - "finish_reason": "error", - } - - try: - request_messages = [] - for msg in messages: - msg_dict = { - "role": msg.get("role"), - "content": msg.get("content", ""), - } - if msg.get("tool_call_id"): - msg_dict["tool_call_id"] = msg["tool_call_id"] - if msg.get("name"): - msg_dict["name"] = msg["name"] - if msg.get("tool_calls"): - msg_dict["tool_calls"] = msg["tool_calls"] - request_messages.append(msg_dict) - - request_params = { - "model": self.genai_config.model, - "messages": request_messages, - } - - if tools: - request_params["tools"] = tools - if tool_choice: - if tool_choice == "none": - request_params["tool_choice"] = "none" - elif tool_choice == "required": - request_params["tool_choice"] = "required" - elif tool_choice == "auto": - request_params["tool_choice"] = "auto" - - request_params.update(self.provider_options) - - response = self.provider.chat(**request_params) - - if not response or "message" not in response: - return { - "content": None, - "tool_calls": None, - "finish_reason": "error", - } - - message = response["message"] - content = ( - message.get("content", "").strip() if message.get("content") else None - ) - - tool_calls = None - if "tool_calls" in message and message["tool_calls"]: - tool_calls = [] - for tool_call in message["tool_calls"]: - try: - function_data = tool_call.get("function", {}) - arguments_str = function_data.get("arguments", "{}") - arguments = json.loads(arguments_str) - except (json.JSONDecodeError, KeyError, TypeError) as e: - logger.warning( - f"Failed to parse tool call arguments: {e}, " - f"tool: {function_data.get('name', 'unknown')}" - ) - arguments = {} - - tool_calls.append( - { - "id": tool_call.get("id", ""), - "name": function_data.get("name", ""), - "arguments": arguments, - } - ) - - finish_reason = "error" - if "done" in response and response["done"]: - if tool_calls: - finish_reason = "tool_calls" - elif content: - finish_reason = "stop" - elif tool_calls: - finish_reason = "tool_calls" - elif content: - finish_reason = "stop" - - return { - "content": content, - "tool_calls": tool_calls, - "finish_reason": finish_reason, - } - - except (TimeoutException, ResponseError, ConnectionError) as e: - logger.warning("Ollama returned an error: %s", str(e)) - return { - "content": None, - "tool_calls": None, - "finish_reason": "error", - } - except Exception as e: - logger.warning("Unexpected error in Ollama chat_with_tools: %s", str(e)) - return { - "content": None, - "tool_calls": None, - "finish_reason": "error", - } diff --git a/frigate/genai/openai.py b/frigate/genai/openai.py index c8d9ca7ab..1fb0dd852 100644 --- a/frigate/genai/openai.py +++ b/frigate/genai/openai.py @@ -1,9 +1,8 @@ """OpenAI Provider for Frigate AI.""" import base64 -import json import logging -from typing import Any, Optional +from typing import Optional from httpx import TimeoutException from openai import OpenAI @@ -117,113 +116,3 @@ class OpenAIClient(GenAIClient): f"Using default context size {self.context_size} for model {self.genai_config.model}" ) return self.context_size - - def chat_with_tools( - self, - messages: list[dict[str, Any]], - tools: Optional[list[dict[str, Any]]] = None, - tool_choice: Optional[str] = "auto", - ) -> dict[str, Any]: - """ - Send chat messages to OpenAI with optional tool definitions. - - Implements function calling/tool usage for OpenAI models. - """ - try: - openai_tool_choice = None - if tool_choice: - if tool_choice == "none": - openai_tool_choice = "none" - elif tool_choice == "auto": - openai_tool_choice = "auto" - elif tool_choice == "required": - openai_tool_choice = "required" - - request_params = { - "model": self.genai_config.model, - "messages": messages, - "timeout": self.timeout, - } - - if tools: - request_params["tools"] = tools - if openai_tool_choice is not None: - request_params["tool_choice"] = openai_tool_choice - - if isinstance(self.genai_config.provider_options, dict): - excluded_options = {"context_size"} - provider_opts = { - k: v - for k, v in self.genai_config.provider_options.items() - if k not in excluded_options - } - request_params.update(provider_opts) - - result = self.provider.chat.completions.create(**request_params) - - if ( - result is None - or not hasattr(result, "choices") - or len(result.choices) == 0 - ): - return { - "content": None, - "tool_calls": None, - "finish_reason": "error", - } - - choice = result.choices[0] - message = choice.message - content = message.content.strip() if message.content else None - - tool_calls = None - if message.tool_calls: - tool_calls = [] - for tool_call in message.tool_calls: - try: - arguments = json.loads(tool_call.function.arguments) - except (json.JSONDecodeError, AttributeError) as e: - logger.warning( - f"Failed to parse tool call arguments: {e}, " - f"tool: {tool_call.function.name if hasattr(tool_call.function, 'name') else 'unknown'}" - ) - arguments = {} - - tool_calls.append( - { - "id": tool_call.id if hasattr(tool_call, "id") else "", - "name": tool_call.function.name - if hasattr(tool_call.function, "name") - else "", - "arguments": arguments, - } - ) - - finish_reason = "error" - if hasattr(choice, "finish_reason") and choice.finish_reason: - finish_reason = choice.finish_reason - elif tool_calls: - finish_reason = "tool_calls" - elif content: - finish_reason = "stop" - - return { - "content": content, - "tool_calls": tool_calls, - "finish_reason": finish_reason, - } - - except TimeoutException as e: - logger.warning("OpenAI request timed out: %s", str(e)) - return { - "content": None, - "tool_calls": None, - "finish_reason": "error", - } - except Exception as e: - logger.warning("OpenAI returned an error: %s", str(e)) - return { - "content": None, - "tool_calls": None, - "finish_reason": "error", - } diff --git a/frigate/jobs/__init__.py b/frigate/jobs/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/frigate/jobs/job.py b/frigate/jobs/job.py deleted file mode 100644 index a445eebf5..000000000 --- a/frigate/jobs/job.py +++ /dev/null @@ -1,21 +0,0 @@ -"""Generic base class for long-running background jobs.""" - -from dataclasses import asdict, dataclass, field -from typing import Any, Optional - - -@dataclass -class Job: - """Base class for long-running background jobs.""" - - id: str = field(default_factory=lambda: __import__("uuid").uuid4().__str__()[:12]) - job_type: str = "" # Must be set by subclasses - status: str = "queued" # queued, running, success, failed, cancelled - results: Optional[dict[str, Any]] = None - start_time: Optional[float] = None - end_time: Optional[float] = None - error_message: Optional[str] = None - - def to_dict(self) -> dict[str, Any]: - """Convert to dictionary for WebSocket transmission.""" - return asdict(self) diff --git a/frigate/jobs/manager.py b/frigate/jobs/manager.py deleted file mode 100644 index 8aa77b3c7..000000000 --- a/frigate/jobs/manager.py +++ /dev/null @@ -1,70 +0,0 @@ -"""Generic job management for long-running background tasks.""" - -import threading -from typing import Optional - -from frigate.jobs.job import Job -from frigate.types import JobStatusTypesEnum - -# Global state and locks for enforcing single concurrent job per job type -_job_locks: dict[str, threading.Lock] = {} -_current_jobs: dict[str, Optional[Job]] = {} -# Keep completed jobs for retrieval, keyed by (job_type, job_id) -_completed_jobs: dict[tuple[str, str], Job] = {} - - -def _get_lock(job_type: str) -> threading.Lock: - """Get or create a lock for the specified job type.""" - if job_type not in _job_locks: - _job_locks[job_type] = threading.Lock() - return _job_locks[job_type] - - -def set_current_job(job: Job) -> None: - """Set the current job for a given job type.""" - lock = _get_lock(job.job_type) - with lock: - # Store the previous job if it was completed - old_job = _current_jobs.get(job.job_type) - if old_job and old_job.status in ( - JobStatusTypesEnum.success, - JobStatusTypesEnum.failed, - JobStatusTypesEnum.cancelled, - ): - _completed_jobs[(job.job_type, old_job.id)] = old_job - _current_jobs[job.job_type] = job - - -def clear_current_job(job_type: str, job_id: Optional[str] = None) -> None: - """Clear the current job for a given job type, optionally checking the ID.""" - lock = _get_lock(job_type) - with lock: - if job_type in _current_jobs: - current = _current_jobs[job_type] - if current is None or (job_id is None or current.id == job_id): - _current_jobs[job_type] = None - - -def get_current_job(job_type: str) -> Optional[Job]: - """Get the current running/queued job for a given job type, if any.""" - lock = _get_lock(job_type) - with lock: - return _current_jobs.get(job_type) - - -def get_job_by_id(job_type: str, job_id: str) -> Optional[Job]: - """Get job by ID. Checks current job first, then completed jobs.""" - lock = _get_lock(job_type) - with lock: - # Check if it's the current job - current = _current_jobs.get(job_type) - if current and current.id == job_id: - return current - # Check if it's a completed job - return _completed_jobs.get((job_type, job_id)) - - -def job_is_running(job_type: str) -> bool: - """Check if a job of the given type is currently running or queued.""" - job = get_current_job(job_type) - return job is not None and job.status in ("queued", "running") diff --git a/frigate/jobs/media_sync.py b/frigate/jobs/media_sync.py deleted file mode 100644 index 7c15435fd..000000000 --- a/frigate/jobs/media_sync.py +++ /dev/null @@ -1,135 +0,0 @@ -"""Media sync job management with background execution.""" - -import logging -import threading -from dataclasses import dataclass, field -from datetime import datetime -from typing import Optional - -from frigate.comms.inter_process import InterProcessRequestor -from frigate.const import UPDATE_JOB_STATE -from frigate.jobs.job import Job -from frigate.jobs.manager import ( - get_current_job, - get_job_by_id, - job_is_running, - set_current_job, -) -from frigate.types import JobStatusTypesEnum -from frigate.util.media import sync_all_media - -logger = logging.getLogger(__name__) - - -@dataclass -class MediaSyncJob(Job): - """In-memory job state for media sync operations.""" - - job_type: str = "media_sync" - dry_run: bool = False - media_types: list[str] = field(default_factory=lambda: ["all"]) - force: bool = False - - -class MediaSyncRunner(threading.Thread): - """Thread-based runner for media sync jobs.""" - - def __init__(self, job: MediaSyncJob) -> None: - super().__init__(daemon=True, name="media_sync") - self.job = job - self.requestor = InterProcessRequestor() - - def run(self) -> None: - """Execute the media sync job and broadcast status updates.""" - try: - # Update job status to running - self.job.status = JobStatusTypesEnum.running - self.job.start_time = datetime.now().timestamp() - self._broadcast_status() - - # Execute sync with provided parameters - logger.debug( - f"Starting media sync job {self.job.id}: " - f"media_types={self.job.media_types}, " - f"dry_run={self.job.dry_run}, " - f"force={self.job.force}" - ) - - results = sync_all_media( - dry_run=self.job.dry_run, - media_types=self.job.media_types, - force=self.job.force, - ) - - # Store results and mark as complete - self.job.results = results.to_dict() - self.job.status = JobStatusTypesEnum.success - self.job.end_time = datetime.now().timestamp() - - logger.debug(f"Media sync job {self.job.id} completed successfully") - self._broadcast_status() - - except Exception as e: - logger.error(f"Media sync job {self.job.id} failed: {e}", exc_info=True) - self.job.status = JobStatusTypesEnum.failed - self.job.error_message = str(e) - self.job.end_time = datetime.now().timestamp() - self._broadcast_status() - - finally: - if self.requestor: - self.requestor.stop() - - def _broadcast_status(self) -> None: - """Broadcast job status update via IPC to all WebSocket subscribers.""" - try: - self.requestor.send_data( - UPDATE_JOB_STATE, - self.job.to_dict(), - ) - except Exception as e: - logger.warning(f"Failed to broadcast media sync status: {e}") - - -def start_media_sync_job( - dry_run: bool = False, - media_types: Optional[list[str]] = None, - force: bool = False, -) -> Optional[str]: - """Start a new media sync job if none is currently running. - - Returns job ID on success, None if job already running. - """ - # Check if a job is already running - if job_is_running("media_sync"): - current = get_current_job("media_sync") - logger.warning( - f"Media sync job {current.id} is already running. Rejecting new request." - ) - return None - - # Create and start new job - job = MediaSyncJob( - dry_run=dry_run, - media_types=media_types or ["all"], - force=force, - ) - - logger.debug(f"Creating new media sync job: {job.id}") - set_current_job(job) - - # Start the background runner - runner = MediaSyncRunner(job) - runner.start() - - return job.id - - -def get_current_media_sync_job() -> Optional[MediaSyncJob]: - """Get the current running/queued media sync job, if any.""" - return get_current_job("media_sync") - - -def get_media_sync_job_by_id(job_id: str) -> Optional[MediaSyncJob]: - """Get media sync job by ID. Currently only tracks the current job.""" - return get_job_by_id("media_sync", job_id) diff --git a/frigate/models.py b/frigate/models.py index fd5061613..93f6cb54f 100644 --- a/frigate/models.py +++ b/frigate/models.py @@ -80,14 +80,6 @@ class Recordings(Model): regions = IntegerField(null=True) -class ExportCase(Model): - id = CharField(null=False, primary_key=True, max_length=30) - name = CharField(index=True, max_length=100) - description = TextField(null=True) - created_at = DateTimeField() - updated_at = DateTimeField() - - class Export(Model): id = CharField(null=False, primary_key=True, max_length=30) camera = CharField(index=True, max_length=20) @@ -96,12 +88,6 @@ class Export(Model): video_path = CharField(unique=True) thumb_path = CharField(unique=True) in_progress = BooleanField() - export_case = ForeignKeyField( - ExportCase, - null=True, - backref="exports", - column_name="export_case_id", - ) class ReviewSegment(Model): diff --git a/frigate/output/preview.py b/frigate/output/preview.py index b66c1298a..6dfd90904 100644 --- a/frigate/output/preview.py +++ b/frigate/output/preview.py @@ -47,15 +47,6 @@ PREVIEW_QUALITY_BIT_RATES = { RecordQualityEnum.high: 9864, RecordQualityEnum.very_high: 10096, } -# the -qmax param for ffmpeg prevents the encoder from overly compressing frames while still trying to hit the bitrate target -# lower values are higher quality. This is especially important for iniitial frames in the segment -PREVIEW_QMAX_PARAM = { - RecordQualityEnum.very_low: "", - RecordQualityEnum.low: "", - RecordQualityEnum.medium: "", - RecordQualityEnum.high: " -qmax 25", - RecordQualityEnum.very_high: " -qmax 25", -} def get_cache_image_name(camera: str, frame_time: float) -> str: @@ -66,51 +57,6 @@ def get_cache_image_name(camera: str, frame_time: float) -> str: ) -def get_most_recent_preview_frame(camera: str, before: float = None) -> str | None: - """Get the most recent preview frame for a camera.""" - if not os.path.exists(PREVIEW_CACHE_DIR): - return None - - try: - # files are named preview_{camera}-{timestamp}.webp - # we want the largest timestamp that is less than or equal to before - preview_files = [ - f - for f in os.listdir(PREVIEW_CACHE_DIR) - if f.startswith(f"preview_{camera}-") - and f.endswith(f".{PREVIEW_FRAME_TYPE}") - ] - - if not preview_files: - return None - - # sort by timestamp in descending order - # filenames are like preview_front-1712345678.901234.webp - preview_files.sort(reverse=True) - - if before is None: - return os.path.join(PREVIEW_CACHE_DIR, preview_files[0]) - - for file_name in preview_files: - try: - # Extract timestamp: preview_front-1712345678.901234.webp - # Split by dash and extension - timestamp_part = file_name.split("-")[-1].split( - f".{PREVIEW_FRAME_TYPE}" - )[0] - timestamp = float(timestamp_part) - - if timestamp <= before: - return os.path.join(PREVIEW_CACHE_DIR, file_name) - except (ValueError, IndexError): - continue - - return None - except Exception as e: - logger.error(f"Error searching for most recent preview frame: {e}") - return None - - class FFMpegConverter(threading.Thread): """Convert a list of still frames into a vfr mp4.""" @@ -134,7 +80,7 @@ class FFMpegConverter(threading.Thread): config.ffmpeg.ffmpeg_path, "default", input="-f concat -y -protocol_whitelist pipe,file -safe 0 -threads 1 -i /dev/stdin", - output=f"-threads 1 -g {PREVIEW_KEYFRAME_INTERVAL} -bf 0 -b:v {PREVIEW_QUALITY_BIT_RATES[self.config.record.preview.quality]}{PREVIEW_QMAX_PARAM[self.config.record.preview.quality]} {FPS_VFR_PARAM} -movflags +faststart -pix_fmt yuv420p {self.path}", + output=f"-threads 1 -g {PREVIEW_KEYFRAME_INTERVAL} -bf 0 -b:v {PREVIEW_QUALITY_BIT_RATES[self.config.record.preview.quality]} {FPS_VFR_PARAM} -movflags +faststart -pix_fmt yuv420p {self.path}", type=EncodeTypeEnum.preview, ) diff --git a/frigate/record/cleanup.py b/frigate/record/cleanup.py index 15a0ba7e8..94dd43eba 100644 --- a/frigate/record/cleanup.py +++ b/frigate/record/cleanup.py @@ -13,8 +13,9 @@ from playhouse.sqlite_ext import SqliteExtDatabase from frigate.config import CameraConfig, FrigateConfig, RetainModeEnum from frigate.const import CACHE_DIR, CLIPS_DIR, MAX_WAL_SIZE, RECORD_DIR from frigate.models import Previews, Recordings, ReviewSegment, UserReviewStatus +from frigate.record.util import remove_empty_directories, sync_recordings from frigate.util.builtin import clear_and_unlink -from frigate.util.media import remove_empty_directories +from frigate.util.time import get_tomorrow_at_time logger = logging.getLogger(__name__) @@ -60,7 +61,7 @@ class RecordingCleanup(threading.Thread): db.execute_sql("PRAGMA wal_checkpoint(TRUNCATE);") db.close() - def expire_review_segments(self, config: CameraConfig, now: datetime) -> set[Path]: + def expire_review_segments(self, config: CameraConfig, now: datetime) -> None: """Delete review segments that are expired""" alert_expire_date = ( now - datetime.timedelta(days=config.record.alerts.retain.days) @@ -84,12 +85,9 @@ class RecordingCleanup(threading.Thread): .namedtuples() ) - maybe_empty_dirs = set() thumbs_to_delete = list(map(lambda x: x[1], expired_reviews)) for thumb_path in thumbs_to_delete: - thumb_path = Path(thumb_path) - thumb_path.unlink(missing_ok=True) - maybe_empty_dirs.add(thumb_path.parent) + Path(thumb_path).unlink(missing_ok=True) max_deletes = 100000 deleted_reviews_list = list(map(lambda x: x[0], expired_reviews)) @@ -102,15 +100,13 @@ class RecordingCleanup(threading.Thread): << deleted_reviews_list[i : i + max_deletes] ).execute() - return maybe_empty_dirs - def expire_existing_camera_recordings( self, continuous_expire_date: float, motion_expire_date: float, config: CameraConfig, reviews: ReviewSegment, - ) -> set[Path]: + ) -> None: """Delete recordings for existing camera based on retention config.""" # Get the timestamp for cutoff of retained days @@ -141,8 +137,6 @@ class RecordingCleanup(threading.Thread): .iterator() ) - maybe_empty_dirs = set() - # loop over recordings and see if they overlap with any non-expired reviews # TODO: expire segments based on segment stats according to config review_start = 0 @@ -197,10 +191,8 @@ class RecordingCleanup(threading.Thread): ) or (mode == RetainModeEnum.active_objects and recording.objects == 0) ): - recording_path = Path(recording.path) - recording_path.unlink(missing_ok=True) + Path(recording.path).unlink(missing_ok=True) deleted_recordings.add(recording.id) - maybe_empty_dirs.add(recording_path.parent) else: kept_recordings.append((recording.start_time, recording.end_time)) @@ -261,10 +253,8 @@ class RecordingCleanup(threading.Thread): # Delete previews without any relevant recordings if not keep: - preview_path = Path(preview.path) - preview_path.unlink(missing_ok=True) + Path(preview.path).unlink(missing_ok=True) deleted_previews.add(preview.id) - maybe_empty_dirs.add(preview_path.parent) # expire previews logger.debug(f"Expiring {len(deleted_previews)} previews") @@ -276,9 +266,7 @@ class RecordingCleanup(threading.Thread): Previews.id << deleted_previews_list[i : i + max_deletes] ).execute() - return maybe_empty_dirs - - def expire_recordings(self) -> set[Path]: + def expire_recordings(self) -> None: """Delete recordings based on retention config.""" logger.debug("Start expire recordings.") logger.debug("Start deleted cameras.") @@ -303,14 +291,10 @@ class RecordingCleanup(threading.Thread): .iterator() ) - maybe_empty_dirs = set() - deleted_recordings = set() for recording in no_camera_recordings: - recording_path = Path(recording.path) - recording_path.unlink(missing_ok=True) + Path(recording.path).unlink(missing_ok=True) deleted_recordings.add(recording.id) - maybe_empty_dirs.add(recording_path.parent) logger.debug(f"Expiring {len(deleted_recordings)} recordings") # delete up to 100,000 at a time @@ -327,7 +311,7 @@ class RecordingCleanup(threading.Thread): logger.debug(f"Start camera: {camera}.") now = datetime.datetime.now() - maybe_empty_dirs |= self.expire_review_segments(config, now) + self.expire_review_segments(config, now) continuous_expire_date = ( now - datetime.timedelta(days=config.record.continuous.days) ).timestamp() @@ -357,7 +341,7 @@ class RecordingCleanup(threading.Thread): .namedtuples() ) - maybe_empty_dirs |= self.expire_existing_camera_recordings( + self.expire_existing_camera_recordings( continuous_expire_date, motion_expire_date, config, reviews ) logger.debug(f"End camera: {camera}.") @@ -365,9 +349,12 @@ class RecordingCleanup(threading.Thread): logger.debug("End all cameras.") logger.debug("End expire recordings.") - return maybe_empty_dirs - def run(self) -> None: + # on startup sync recordings with disk if enabled + if self.config.record.sync_recordings: + sync_recordings(limited=False) + next_sync = get_tomorrow_at_time(3) + # Expire tmp clips every minute, recordings and clean directories every hour. for counter in itertools.cycle(range(self.config.record.expire_interval)): if self.stop_event.wait(60): @@ -376,8 +363,16 @@ class RecordingCleanup(threading.Thread): self.clean_tmp_previews() + if ( + self.config.record.sync_recordings + and datetime.datetime.now().astimezone(datetime.timezone.utc) + > next_sync + ): + sync_recordings(limited=True) + next_sync = get_tomorrow_at_time(3) + if counter == 0: self.clean_tmp_clips() - maybe_empty_dirs = self.expire_recordings() - remove_empty_directories(Path(RECORD_DIR), maybe_empty_dirs) + self.expire_recordings() + remove_empty_directories(RECORD_DIR) self.truncate_wal() diff --git a/frigate/record/export.py b/frigate/record/export.py index c1c478ef4..d4b49bb4b 100644 --- a/frigate/record/export.py +++ b/frigate/record/export.py @@ -33,7 +33,6 @@ from frigate.util.time import is_current_hour logger = logging.getLogger(__name__) -DEFAULT_TIME_LAPSE_FFMPEG_ARGS = "-vf setpts=0.04*PTS -r 30" TIMELAPSE_DATA_INPUT_ARGS = "-an -skip_frame nokey" @@ -41,6 +40,11 @@ def lower_priority(): os.nice(PROCESS_PRIORITY_LOW) +class PlaybackFactorEnum(str, Enum): + realtime = "realtime" + timelapse_25x = "timelapse_25x" + + class PlaybackSourceEnum(str, Enum): recordings = "recordings" preview = "preview" @@ -58,11 +62,8 @@ class RecordingExporter(threading.Thread): image: Optional[str], start_time: int, end_time: int, + playback_factor: PlaybackFactorEnum, playback_source: PlaybackSourceEnum, - export_case_id: Optional[str] = None, - ffmpeg_input_args: Optional[str] = None, - ffmpeg_output_args: Optional[str] = None, - cpu_fallback: bool = False, ) -> None: super().__init__() self.config = config @@ -72,11 +73,8 @@ class RecordingExporter(threading.Thread): self.user_provided_image = image self.start_time = start_time self.end_time = end_time + self.playback_factor = playback_factor self.playback_source = playback_source - self.export_case_id = export_case_id - self.ffmpeg_input_args = ffmpeg_input_args - self.ffmpeg_output_args = ffmpeg_output_args - self.cpu_fallback = cpu_fallback # ensure export thumb dir Path(os.path.join(CLIPS_DIR, "export")).mkdir(exist_ok=True) @@ -181,16 +179,9 @@ class RecordingExporter(threading.Thread): return thumb_path - def get_record_export_command( - self, video_path: str, use_hwaccel: bool = True - ) -> list[str]: - # handle case where internal port is a string with ip:port - internal_port = self.config.networking.listen.internal - if type(internal_port) is str: - internal_port = int(internal_port.split(":")[-1]) - + def get_record_export_command(self, video_path: str) -> list[str]: if (self.end_time - self.start_time) <= MAX_PLAYLIST_SECONDS: - playlist_lines = f"http://127.0.0.1:{internal_port}/vod/{self.camera}/start/{self.start_time}/end/{self.end_time}/index.m3u8" + playlist_lines = f"http://127.0.0.1:5000/vod/{self.camera}/start/{self.start_time}/end/{self.end_time}/index.m3u8" ffmpeg_input = ( f"-y -protocol_whitelist pipe,file,http,tcp -i {playlist_lines}" ) @@ -222,30 +213,25 @@ class RecordingExporter(threading.Thread): for page in range(1, num_pages + 1): playlist = export_recordings.paginate(page, page_size) playlist_lines.append( - f"file 'http://127.0.0.1:{internal_port}/vod/{self.camera}/start/{float(playlist[0].start_time)}/end/{float(playlist[-1].end_time)}/index.m3u8'" + f"file 'http://127.0.0.1:5000/vod/{self.camera}/start/{float(playlist[0].start_time)}/end/{float(playlist[-1].end_time)}/index.m3u8'" ) ffmpeg_input = "-y -protocol_whitelist pipe,file,http,tcp -f concat -safe 0 -i /dev/stdin" - if self.ffmpeg_input_args is not None and self.ffmpeg_output_args is not None: - hwaccel_args = ( - self.config.cameras[self.camera].record.export.hwaccel_args - if use_hwaccel - else None - ) + if self.playback_factor == PlaybackFactorEnum.realtime: + ffmpeg_cmd = ( + f"{self.config.ffmpeg.ffmpeg_path} -hide_banner {ffmpeg_input} -c copy -movflags +faststart" + ).split(" ") + elif self.playback_factor == PlaybackFactorEnum.timelapse_25x: ffmpeg_cmd = ( parse_preset_hardware_acceleration_encode( self.config.ffmpeg.ffmpeg_path, - hwaccel_args, - f"{self.ffmpeg_input_args} -an {ffmpeg_input}".strip(), - f"{self.ffmpeg_output_args} -movflags +faststart".strip(), + self.config.ffmpeg.hwaccel_args, + f"-an {ffmpeg_input}", + f"{self.config.cameras[self.camera].record.export.timelapse_args} -movflags +faststart", EncodeTypeEnum.timelapse, ) ).split(" ") - else: - ffmpeg_cmd = ( - f"{self.config.ffmpeg.ffmpeg_path} -hide_banner {ffmpeg_input} -c copy -movflags +faststart" - ).split(" ") # add metadata title = f"Frigate Recording for {self.camera}, {self.get_datetime_from_timestamp(self.start_time)} - {self.get_datetime_from_timestamp(self.end_time)}" @@ -255,9 +241,7 @@ class RecordingExporter(threading.Thread): return ffmpeg_cmd, playlist_lines - def get_preview_export_command( - self, video_path: str, use_hwaccel: bool = True - ) -> list[str]: + def get_preview_export_command(self, video_path: str) -> list[str]: playlist_lines = [] codec = "-c copy" @@ -325,25 +309,20 @@ class RecordingExporter(threading.Thread): "-y -protocol_whitelist pipe,file,tcp -f concat -safe 0 -i /dev/stdin" ) - if self.ffmpeg_input_args is not None and self.ffmpeg_output_args is not None: - hwaccel_args = ( - self.config.cameras[self.camera].record.export.hwaccel_args - if use_hwaccel - else None - ) + if self.playback_factor == PlaybackFactorEnum.realtime: + ffmpeg_cmd = ( + f"{self.config.ffmpeg.ffmpeg_path} -hide_banner {ffmpeg_input} {codec} -movflags +faststart {video_path}" + ).split(" ") + elif self.playback_factor == PlaybackFactorEnum.timelapse_25x: ffmpeg_cmd = ( parse_preset_hardware_acceleration_encode( self.config.ffmpeg.ffmpeg_path, - hwaccel_args, - f"{self.ffmpeg_input_args} {TIMELAPSE_DATA_INPUT_ARGS} {ffmpeg_input}".strip(), - f"{self.ffmpeg_output_args} -movflags +faststart {video_path}".strip(), + self.config.ffmpeg.hwaccel_args, + f"{TIMELAPSE_DATA_INPUT_ARGS} {ffmpeg_input}", + f"{self.config.cameras[self.camera].record.export.timelapse_args} -movflags +faststart {video_path}", EncodeTypeEnum.timelapse, ) ).split(" ") - else: - ffmpeg_cmd = ( - f"{self.config.ffmpeg.ffmpeg_path} -hide_banner {ffmpeg_input} {codec} -movflags +faststart {video_path}" - ).split(" ") # add metadata title = f"Frigate Preview for {self.camera}, {self.get_datetime_from_timestamp(self.start_time)} - {self.get_datetime_from_timestamp(self.end_time)}" @@ -369,20 +348,17 @@ class RecordingExporter(threading.Thread): video_path = f"{EXPORT_DIR}/{self.camera}_{filename_start_datetime}-{filename_end_datetime}_{cleaned_export_id}.mp4" thumb_path = self.save_thumbnail(self.export_id) - export_values = { - Export.id: self.export_id, - Export.camera: self.camera, - Export.name: export_name, - Export.date: self.start_time, - Export.video_path: video_path, - Export.thumb_path: thumb_path, - Export.in_progress: True, - } - - if self.export_case_id is not None: - export_values[Export.export_case] = self.export_case_id - - Export.insert(export_values).execute() + Export.insert( + { + Export.id: self.export_id, + Export.camera: self.camera, + Export.name: export_name, + Export.date: self.start_time, + Export.video_path: video_path, + Export.thumb_path: thumb_path, + Export.in_progress: True, + } + ).execute() try: if self.playback_source == PlaybackSourceEnum.recordings: @@ -400,34 +376,6 @@ class RecordingExporter(threading.Thread): capture_output=True, ) - # If export failed and cpu_fallback is enabled, retry without hwaccel - if ( - p.returncode != 0 - and self.cpu_fallback - and self.ffmpeg_input_args is not None - and self.ffmpeg_output_args is not None - ): - logger.warning( - f"Export with hardware acceleration failed, retrying without hwaccel for {self.export_id}" - ) - - if self.playback_source == PlaybackSourceEnum.recordings: - ffmpeg_cmd, playlist_lines = self.get_record_export_command( - video_path, use_hwaccel=False - ) - else: - ffmpeg_cmd, playlist_lines = self.get_preview_export_command( - video_path, use_hwaccel=False - ) - - p = sp.run( - ffmpeg_cmd, - input="\n".join(playlist_lines), - encoding="ascii", - preexec_fn=lower_priority, - capture_output=True, - ) - if p.returncode != 0: logger.error( f"Failed to export {self.playback_source.value} for command {' '.join(ffmpeg_cmd)}" diff --git a/frigate/record/util.py b/frigate/record/util.py new file mode 100644 index 000000000..6a91c1aaf --- /dev/null +++ b/frigate/record/util.py @@ -0,0 +1,147 @@ +"""Recordings Utilities.""" + +import datetime +import logging +import os + +from peewee import DatabaseError, chunked + +from frigate.const import RECORD_DIR +from frigate.models import Recordings, RecordingsToDelete + +logger = logging.getLogger(__name__) + + +def remove_empty_directories(directory: str) -> None: + # list all directories recursively and sort them by path, + # longest first + paths = sorted( + [x[0] for x in os.walk(directory)], + key=lambda p: len(str(p)), + reverse=True, + ) + for path in paths: + # don't delete the parent + if path == directory: + continue + if len(os.listdir(path)) == 0: + os.rmdir(path) + + +def sync_recordings(limited: bool) -> None: + """Check the db for stale recordings entries that don't exist in the filesystem.""" + + def delete_db_entries_without_file(check_timestamp: float) -> bool: + """Delete db entries where file was deleted outside of frigate.""" + + if limited: + recordings = Recordings.select(Recordings.id, Recordings.path).where( + Recordings.start_time >= check_timestamp + ) + else: + # get all recordings in the db + recordings = Recordings.select(Recordings.id, Recordings.path) + + # Use pagination to process records in chunks + page_size = 1000 + num_pages = (recordings.count() + page_size - 1) // page_size + recordings_to_delete = set() + + for page in range(num_pages): + for recording in recordings.paginate(page, page_size): + if not os.path.exists(recording.path): + recordings_to_delete.add(recording.id) + + if len(recordings_to_delete) == 0: + return True + + logger.info( + f"Deleting {len(recordings_to_delete)} recording DB entries with missing files" + ) + + # convert back to list of dictionaries for insertion + recordings_to_delete = [ + {"id": recording_id} for recording_id in recordings_to_delete + ] + + if float(len(recordings_to_delete)) / max(1, recordings.count()) > 0.5: + logger.warning( + f"Deleting {(len(recordings_to_delete) / max(1, recordings.count()) * 100):.2f}% of recordings DB entries, could be due to configuration error. Aborting..." + ) + return False + + # create a temporary table for deletion + RecordingsToDelete.create_table(temporary=True) + + # insert ids to the temporary table + max_inserts = 1000 + for batch in chunked(recordings_to_delete, max_inserts): + RecordingsToDelete.insert_many(batch).execute() + + try: + # delete records in the main table that exist in the temporary table + query = Recordings.delete().where( + Recordings.id.in_(RecordingsToDelete.select(RecordingsToDelete.id)) + ) + query.execute() + except DatabaseError as e: + logger.error(f"Database error during recordings db cleanup: {e}") + + return True + + def delete_files_without_db_entry(files_on_disk: list[str]): + """Delete files where file is not inside frigate db.""" + files_to_delete = [] + + for file in files_on_disk: + if not Recordings.select().where(Recordings.path == file).exists(): + files_to_delete.append(file) + + if len(files_to_delete) == 0: + return True + + logger.info( + f"Deleting {len(files_to_delete)} recordings files with missing DB entries" + ) + + if float(len(files_to_delete)) / max(1, len(files_on_disk)) > 0.5: + logger.debug( + f"Deleting {(len(files_to_delete) / max(1, len(files_on_disk)) * 100):.2f}% of recordings DB entries, could be due to configuration error. Aborting..." + ) + return False + + for file in files_to_delete: + os.unlink(file) + + return True + + logger.debug("Start sync recordings.") + + # start checking on the hour 36 hours ago + check_point = datetime.datetime.now().replace( + minute=0, second=0, microsecond=0 + ).astimezone(datetime.timezone.utc) - datetime.timedelta(hours=36) + db_success = delete_db_entries_without_file(check_point.timestamp()) + + # only try to cleanup files if db cleanup was successful + if db_success: + if limited: + # get recording files from last 36 hours + hour_check = f"{RECORD_DIR}/{check_point.strftime('%Y-%m-%d/%H')}" + files_on_disk = { + os.path.join(root, file) + for root, _, files in os.walk(RECORD_DIR) + for file in files + if root > hour_check + } + else: + # get all recordings files on disk and put them in a set + files_on_disk = { + os.path.join(root, file) + for root, _, files in os.walk(RECORD_DIR) + for file in files + } + + delete_files_without_db_entry(files_on_disk) + + logger.debug("End sync recordings.") diff --git a/frigate/review/maintainer.py b/frigate/review/maintainer.py index 6afdc8de9..917c0c5ac 100644 --- a/frigate/review/maintainer.py +++ b/frigate/review/maintainer.py @@ -394,11 +394,7 @@ class ReviewSegmentMaintainer(threading.Thread): if activity.has_activity_category(SeverityEnum.alert): # update current time for last alert activity - if ( - segment.last_alert_time is None - or frame_time > segment.last_alert_time - ): - segment.last_alert_time = frame_time + segment.last_alert_time = frame_time if segment.severity != SeverityEnum.alert: # if segment is not alert category but current activity is @@ -408,11 +404,7 @@ class ReviewSegmentMaintainer(threading.Thread): should_update_image = True if activity.has_activity_category(SeverityEnum.detection): - if ( - segment.last_detection_time is None - or frame_time > segment.last_detection_time - ): - segment.last_detection_time = frame_time + segment.last_detection_time = frame_time for object in activity.get_all_objects(): # Alert-level objects should always be added (they extend/upgrade the segment) @@ -703,28 +695,17 @@ class ReviewSegmentMaintainer(threading.Thread): current_segment.detections[manual_info["event_id"]] = ( manual_info["label"] ) - if topic == DetectionTypeEnum.api: - # manual_info["label"] contains 'label: sub_label' - # so split out the label without modifying manual_info - if ( - self.config.cameras[camera].review.detections.enabled - and manual_info["label"].split(": ")[0] - in self.config.cameras[camera].review.detections.labels - ): - current_segment.last_detection_time = manual_info[ - "end_time" - ] - elif self.config.cameras[camera].review.alerts.enabled: - current_segment.severity = SeverityEnum.alert - current_segment.last_alert_time = manual_info[ - "end_time" - ] + if ( + topic == DetectionTypeEnum.api + and self.config.cameras[camera].review.alerts.enabled + ): + current_segment.severity = SeverityEnum.alert elif ( topic == DetectionTypeEnum.lpr and self.config.cameras[camera].review.detections.enabled ): current_segment.severity = SeverityEnum.detection - current_segment.last_alert_time = manual_info["end_time"] + current_segment.last_alert_time = manual_info["end_time"] elif manual_info["state"] == ManualEventState.start: self.indefinite_events[camera][manual_info["event_id"]] = ( manual_info["label"] @@ -736,18 +717,7 @@ class ReviewSegmentMaintainer(threading.Thread): topic == DetectionTypeEnum.api and self.config.cameras[camera].review.alerts.enabled ): - # manual_info["label"] contains 'label: sub_label' - # so split out the label without modifying manual_info - if ( - not self.config.cameras[ - camera - ].review.detections.enabled - or manual_info["label"].split(": ")[0] - not in self.config.cameras[ - camera - ].review.detections.labels - ): - current_segment.severity = SeverityEnum.alert + current_segment.severity = SeverityEnum.alert elif ( topic == DetectionTypeEnum.lpr and self.config.cameras[camera].review.detections.enabled @@ -819,23 +789,11 @@ class ReviewSegmentMaintainer(threading.Thread): detections, ) elif topic == DetectionTypeEnum.api: - severity = None - # manual_info["label"] contains 'label: sub_label' - # so split out the label without modifying manual_info - if ( - self.config.cameras[camera].review.detections.enabled - and manual_info["label"].split(": ")[0] - in self.config.cameras[camera].review.detections.labels - ): - severity = SeverityEnum.detection - elif self.config.cameras[camera].review.alerts.enabled: - severity = SeverityEnum.alert - - if severity: + if self.config.cameras[camera].review.alerts.enabled: self.active_review_segments[camera] = PendingReviewSegment( camera, frame_time, - severity, + SeverityEnum.alert, {manual_info["event_id"]: manual_info["label"]}, {}, [], @@ -862,7 +820,7 @@ class ReviewSegmentMaintainer(threading.Thread): ].last_detection_time = manual_info["end_time"] else: logger.warning( - f"Manual event API has been called for {camera}, but alerts and detections are disabled. This manual event will not appear as an alert or detection." + f"Manual event API has been called for {camera}, but alerts are disabled. This manual event will not appear as an alert." ) elif topic == DetectionTypeEnum.lpr: if self.config.cameras[camera].review.detections.enabled: diff --git a/frigate/stats/util.py b/frigate/stats/util.py index f4f91f83f..410350d96 100644 --- a/frigate/stats/util.py +++ b/frigate/stats/util.py @@ -22,7 +22,6 @@ from frigate.util.services import ( get_bandwidth_stats, get_cpu_stats, get_fs_type, - get_hailo_temps, get_intel_gpu_stats, get_jetson_stats, get_nvidia_gpu_stats, @@ -91,80 +90,9 @@ def get_temperatures() -> dict[str, float]: if temp is not None: temps[apex] = temp - # Get temperatures for Hailo devices - temps.update(get_hailo_temps()) - return temps -def get_detector_temperature( - detector_type: str, - detector_index_by_type: dict[str, int], -) -> Optional[float]: - """Get temperature for a specific detector based on its type.""" - if detector_type == "edgetpu": - # Get temperatures for all attached Corals - base = "/sys/class/apex/" - if os.path.isdir(base): - apex_devices = sorted(os.listdir(base)) - index = detector_index_by_type.get("edgetpu", 0) - if index < len(apex_devices): - apex_name = apex_devices[index] - temp = read_temperature(os.path.join(base, apex_name, "temp")) - if temp is not None: - return temp - elif detector_type == "hailo8l": - # Get temperatures for Hailo devices - hailo_temps = get_hailo_temps() - if hailo_temps: - hailo_device_names = sorted(hailo_temps.keys()) - index = detector_index_by_type.get("hailo8l", 0) - if index < len(hailo_device_names): - device_name = hailo_device_names[index] - return hailo_temps[device_name] - elif detector_type == "rknn": - # Rockchip temperatures are handled by the GPU / NPU stats - # as there are not detector specific temperatures - pass - - return None - - -def get_detector_stats( - stats_tracking: StatsTrackingTypes, -) -> dict[str, dict[str, Any]]: - """Get stats for all detectors, including temperatures based on detector type.""" - detector_stats: dict[str, dict[str, Any]] = {} - detector_type_indices: dict[str, int] = {} - - for name, detector in stats_tracking["detectors"].items(): - pid = detector.detect_process.pid if detector.detect_process else None - detector_type = detector.detector_config.type - - # Keep track of the index for each detector type to match temperatures correctly - current_index = detector_type_indices.get(detector_type, 0) - detector_type_indices[detector_type] = current_index + 1 - - detector_stat = { - "inference_speed": round(detector.avg_inference_speed.value * 1000, 2), # type: ignore[attr-defined] - # issue https://github.com/python/typeshed/issues/8799 - # from mypy 0.981 onwards - "detection_start": detector.detection_start.value, # type: ignore[attr-defined] - # issue https://github.com/python/typeshed/issues/8799 - # from mypy 0.981 onwards - "pid": pid, - } - - temp = get_detector_temperature(detector_type, {detector_type: current_index}) - - if temp is not None: - detector_stat["temperature"] = round(temp, 1) - - detector_stats[name] = detector_stat - - return detector_stats - - def get_processing_stats( config: FrigateConfig, stats: dict[str, str], hwaccel_errors: list[str] ) -> None: @@ -245,7 +173,6 @@ async def set_gpu_stats( "mem": str(round(float(nvidia_usage[i]["mem"]), 2)) + "%", "enc": str(round(float(nvidia_usage[i]["enc"]), 2)) + "%", "dec": str(round(float(nvidia_usage[i]["dec"]), 2)) + "%", - "temp": str(nvidia_usage[i]["temp"]), } else: @@ -351,32 +278,6 @@ def stats_snapshot( if camera_stats.capture_process_pid.value else None ) - # Calculate connection quality based on current state - # This is computed at stats-collection time so offline cameras - # correctly show as unusable rather than excellent - expected_fps = config.cameras[name].detect.fps - current_fps = camera_stats.camera_fps.value - reconnects = camera_stats.reconnects_last_hour.value - stalls = camera_stats.stalls_last_hour.value - - if current_fps < 0.1: - quality_str = "unusable" - elif reconnects == 0 and current_fps >= 0.9 * expected_fps and stalls < 5: - quality_str = "excellent" - elif reconnects <= 2 and current_fps >= 0.6 * expected_fps: - quality_str = "fair" - elif reconnects > 10 or current_fps < 1.0 or stalls > 100: - quality_str = "unusable" - else: - quality_str = "poor" - - connection_quality = { - "connection_quality": quality_str, - "expected_fps": expected_fps, - "reconnects_last_hour": reconnects, - "stalls_last_hour": stalls, - } - stats["cameras"][name] = { "camera_fps": round(camera_stats.camera_fps.value, 2), "process_fps": round(camera_stats.process_fps.value, 2), @@ -388,10 +289,20 @@ def stats_snapshot( "ffmpeg_pid": ffmpeg_pid, "audio_rms": round(camera_stats.audio_rms.value, 4), "audio_dBFS": round(camera_stats.audio_dBFS.value, 4), - **connection_quality, } - stats["detectors"] = get_detector_stats(stats_tracking) + stats["detectors"] = {} + for name, detector in stats_tracking["detectors"].items(): + pid = detector.detect_process.pid if detector.detect_process else None + stats["detectors"][name] = { + "inference_speed": round(detector.avg_inference_speed.value * 1000, 2), # type: ignore[attr-defined] + # issue https://github.com/python/typeshed/issues/8799 + # from mypy 0.981 onwards + "detection_start": detector.detection_start.value, # type: ignore[attr-defined] + # issue https://github.com/python/typeshed/issues/8799 + # from mypy 0.981 onwards + "pid": pid, + } stats["camera_fps"] = round(total_camera_fps, 2) stats["process_fps"] = round(total_process_fps, 2) stats["skipped_fps"] = round(total_skipped_fps, 2) @@ -477,6 +388,7 @@ def stats_snapshot( "version": VERSION, "latest_version": stats_tracking["latest_frigate_version"], "storage": {}, + "temperatures": get_temperatures(), "last_updated": int(time.time()), } diff --git a/frigate/test/http_api/test_http_latest_frame.py b/frigate/test/http_api/test_http_latest_frame.py deleted file mode 100644 index 755ee6eb1..000000000 --- a/frigate/test/http_api/test_http_latest_frame.py +++ /dev/null @@ -1,107 +0,0 @@ -import os -import shutil -from unittest.mock import MagicMock - -import cv2 -import numpy as np - -from frigate.output.preview import PREVIEW_CACHE_DIR, PREVIEW_FRAME_TYPE -from frigate.test.http_api.base_http_test import AuthTestClient, BaseTestHttp - - -class TestHttpLatestFrame(BaseTestHttp): - def setUp(self): - super().setUp([]) - self.app = super().create_app() - self.app.detected_frames_processor = MagicMock() - - if os.path.exists(PREVIEW_CACHE_DIR): - shutil.rmtree(PREVIEW_CACHE_DIR) - os.makedirs(PREVIEW_CACHE_DIR) - - def tearDown(self): - if os.path.exists(PREVIEW_CACHE_DIR): - shutil.rmtree(PREVIEW_CACHE_DIR) - super().tearDown() - - def test_latest_frame_fallback_to_preview(self): - camera = "front_door" - # 1. Mock frame processor to return None (simulating offline/missing frame) - self.app.detected_frames_processor.get_current_frame.return_value = None - # Return a timestamp that is after our dummy preview frame - self.app.detected_frames_processor.get_current_frame_time.return_value = ( - 1234567891.0 - ) - - # 2. Create a dummy preview file - dummy_frame = np.zeros((180, 320, 3), np.uint8) - cv2.putText( - dummy_frame, - "PREVIEW", - (50, 50), - cv2.FONT_HERSHEY_SIMPLEX, - 1, - (255, 255, 255), - 2, - ) - preview_path = os.path.join( - PREVIEW_CACHE_DIR, f"preview_{camera}-1234567890.0.{PREVIEW_FRAME_TYPE}" - ) - cv2.imwrite(preview_path, dummy_frame) - - with AuthTestClient(self.app) as client: - response = client.get(f"/{camera}/latest.webp") - assert response.status_code == 200 - assert response.headers.get("X-Frigate-Offline") == "true" - # Verify we got an image (webp) - assert response.headers.get("content-type") == "image/webp" - - def test_latest_frame_no_fallback_when_live(self): - camera = "front_door" - # 1. Mock frame processor to return a live frame - dummy_frame = np.zeros((180, 320, 3), np.uint8) - self.app.detected_frames_processor.get_current_frame.return_value = dummy_frame - self.app.detected_frames_processor.get_current_frame_time.return_value = ( - 2000000000.0 # Way in the future - ) - - with AuthTestClient(self.app) as client: - response = client.get(f"/{camera}/latest.webp") - assert response.status_code == 200 - assert "X-Frigate-Offline" not in response.headers - - def test_latest_frame_stale_falls_back_to_preview(self): - camera = "front_door" - # 1. Mock frame processor to return a stale frame - dummy_frame = np.zeros((180, 320, 3), np.uint8) - self.app.detected_frames_processor.get_current_frame.return_value = dummy_frame - # Return a timestamp that is after our dummy preview frame, but way in the past - self.app.detected_frames_processor.get_current_frame_time.return_value = 1000.0 - - # 2. Create a dummy preview file - preview_path = os.path.join( - PREVIEW_CACHE_DIR, f"preview_{camera}-999.0.{PREVIEW_FRAME_TYPE}" - ) - cv2.imwrite(preview_path, dummy_frame) - - with AuthTestClient(self.app) as client: - response = client.get(f"/{camera}/latest.webp") - assert response.status_code == 200 - assert response.headers.get("X-Frigate-Offline") == "true" - - def test_latest_frame_no_preview_found(self): - camera = "front_door" - # 1. Mock frame processor to return None - self.app.detected_frames_processor.get_current_frame.return_value = None - - # 2. No preview file created - - with AuthTestClient(self.app) as client: - response = client.get(f"/{camera}/latest.webp") - # Should fall back to camera-error.jpg (which might not exist in test env, but let's see) - # If camera-error.jpg is not found, it returns 500 "Unable to get valid frame" in latest_frame - # OR it uses request.app.camera_error_image if already loaded. - - # Since we didn't provide camera-error.jpg, it might 500 if glob fails or return 500 if frame is None. - assert response.status_code in [200, 500] - assert "X-Frigate-Offline" not in response.headers diff --git a/frigate/test/test_preview_loader.py b/frigate/test/test_preview_loader.py deleted file mode 100644 index e2062fce1..000000000 --- a/frigate/test/test_preview_loader.py +++ /dev/null @@ -1,80 +0,0 @@ -import os -import shutil -import unittest - -from frigate.output.preview import ( - PREVIEW_CACHE_DIR, - PREVIEW_FRAME_TYPE, - get_most_recent_preview_frame, -) - - -class TestPreviewLoader(unittest.TestCase): - def setUp(self): - if os.path.exists(PREVIEW_CACHE_DIR): - shutil.rmtree(PREVIEW_CACHE_DIR) - os.makedirs(PREVIEW_CACHE_DIR) - - def tearDown(self): - if os.path.exists(PREVIEW_CACHE_DIR): - shutil.rmtree(PREVIEW_CACHE_DIR) - - def test_get_most_recent_preview_frame_missing(self): - self.assertIsNone(get_most_recent_preview_frame("test_camera")) - - def test_get_most_recent_preview_frame_exists(self): - camera = "test_camera" - # create dummy preview files - for ts in ["1000.0", "2000.0", "1500.0"]: - with open( - os.path.join( - PREVIEW_CACHE_DIR, f"preview_{camera}-{ts}.{PREVIEW_FRAME_TYPE}" - ), - "w", - ) as f: - f.write(f"test_{ts}") - - expected_path = os.path.join( - PREVIEW_CACHE_DIR, f"preview_{camera}-2000.0.{PREVIEW_FRAME_TYPE}" - ) - self.assertEqual(get_most_recent_preview_frame(camera), expected_path) - - def test_get_most_recent_preview_frame_before(self): - camera = "test_camera" - # create dummy preview files - for ts in ["1000.0", "2000.0"]: - with open( - os.path.join( - PREVIEW_CACHE_DIR, f"preview_{camera}-{ts}.{PREVIEW_FRAME_TYPE}" - ), - "w", - ) as f: - f.write(f"test_{ts}") - - # Test finding frame before or at 1500 - expected_path = os.path.join( - PREVIEW_CACHE_DIR, f"preview_{camera}-1000.0.{PREVIEW_FRAME_TYPE}" - ) - self.assertEqual( - get_most_recent_preview_frame(camera, before=1500.0), expected_path - ) - - # Test finding frame before or at 999 - self.assertIsNone(get_most_recent_preview_frame(camera, before=999.0)) - - def test_get_most_recent_preview_frame_other_camera(self): - camera = "test_camera" - other_camera = "other_camera" - with open( - os.path.join( - PREVIEW_CACHE_DIR, f"preview_{other_camera}-3000.0.{PREVIEW_FRAME_TYPE}" - ), - "w", - ) as f: - f.write("test") - - self.assertIsNone(get_most_recent_preview_frame(camera)) - - def test_get_most_recent_preview_frame_no_directory(self): - shutil.rmtree(PREVIEW_CACHE_DIR) - self.assertIsNone(get_most_recent_preview_frame("test_camera")) diff --git a/frigate/track/object_processing.py b/frigate/track/object_processing.py index 9ac04b42a..e0ee74228 100644 --- a/frigate/track/object_processing.py +++ b/frigate/track/object_processing.py @@ -185,7 +185,7 @@ class TrackedObjectProcessor(threading.Thread): def snapshot(camera: str, obj: TrackedObject) -> bool: mqtt_config: CameraMqttConfig = self.config.cameras[camera].mqtt if mqtt_config.enabled and self.should_mqtt_snapshot(camera, obj): - jpg_bytes, _ = obj.get_img_bytes( + jpg_bytes = obj.get_img_bytes( ext="jpg", timestamp=mqtt_config.timestamp, bounding_box=mqtt_config.bounding_box, @@ -515,7 +515,6 @@ class TrackedObjectProcessor(threading.Thread): duration, source_type, draw, - pre_capture, ) = payload # save the snapshot image @@ -523,11 +522,6 @@ class TrackedObjectProcessor(threading.Thread): None, event_id, label, draw ) end_time = frame_time + duration if duration is not None else None - start_time = ( - frame_time - self.config.cameras[camera_name].record.event_pre_capture - if pre_capture is None - else frame_time - pre_capture - ) # send event to event maintainer self.event_sender.publish( @@ -542,7 +536,8 @@ class TrackedObjectProcessor(threading.Thread): "sub_label": sub_label, "score": score, "camera": camera_name, - "start_time": start_time, + "start_time": frame_time + - self.config.cameras[camera_name].record.event_pre_capture, "end_time": end_time, "has_clip": self.config.cameras[camera_name].record.enabled and include_recording, diff --git a/frigate/track/tracked_object.py b/frigate/track/tracked_object.py index f435de7b6..a95221bbd 100644 --- a/frigate/track/tracked_object.py +++ b/frigate/track/tracked_object.py @@ -434,7 +434,7 @@ class TrackedObject: return count > (self.camera_config.detect.stationary.threshold or 50) def get_thumbnail(self, ext: str) -> bytes | None: - img_bytes, _ = self.get_img_bytes( + img_bytes = self.get_img_bytes( ext, timestamp=False, bounding_box=False, crop=True, height=175 ) @@ -475,21 +475,20 @@ class TrackedObject: crop: bool = False, height: int | None = None, quality: int | None = None, - ) -> tuple[bytes | None, float | None]: + ) -> bytes | None: if self.thumbnail_data is None: - return None, None + return None try: - frame_time = self.thumbnail_data["frame_time"] best_frame = cv2.cvtColor( - self.frame_cache[frame_time]["frame"], + self.frame_cache[self.thumbnail_data["frame_time"]]["frame"], cv2.COLOR_YUV2BGR_I420, ) except KeyError: logger.warning( - f"Unable to create jpg because frame {frame_time} is not in the cache" + f"Unable to create jpg because frame {self.thumbnail_data['frame_time']} is not in the cache" ) - return None, None + return None if bounding_box: thickness = 2 @@ -571,13 +570,13 @@ class TrackedObject: ret, jpg = cv2.imencode(f".{ext}", best_frame, quality_params) if ret: - return jpg.tobytes(), frame_time + return jpg.tobytes() else: - return None, None + return None def write_snapshot_to_disk(self) -> None: snapshot_config: SnapshotsConfig = self.camera_config.snapshots - jpg_bytes, _ = self.get_img_bytes( + jpg_bytes = self.get_img_bytes( ext="jpg", timestamp=snapshot_config.timestamp, bounding_box=snapshot_config.bounding_box, diff --git a/frigate/types.py b/frigate/types.py index 77bb50845..6c5135616 100644 --- a/frigate/types.py +++ b/frigate/types.py @@ -26,15 +26,6 @@ class ModelStatusTypesEnum(str, Enum): failed = "failed" -class JobStatusTypesEnum(str, Enum): - pending = "pending" - queued = "queued" - running = "running" - success = "success" - failed = "failed" - cancelled = "cancelled" - - class TrackedObjectUpdateTypesEnum(str, Enum): description = "description" face = "face" diff --git a/frigate/util/config.py b/frigate/util/config.py index 1af5c8e4e..c3d796397 100644 --- a/frigate/util/config.py +++ b/frigate/util/config.py @@ -13,7 +13,7 @@ from frigate.util.services import get_video_properties logger = logging.getLogger(__name__) -CURRENT_CONFIG_VERSION = "0.18-0" +CURRENT_CONFIG_VERSION = "0.17-0" DEFAULT_CONFIG_FILE = os.path.join(CONFIG_DIR, "config.yml") @@ -98,13 +98,6 @@ def migrate_frigate_config(config_file: str): yaml.dump(new_config, f) previous_version = "0.17-0" - if previous_version < "0.18-0": - logger.info(f"Migrating frigate config from {previous_version} to 0.18-0...") - new_config = migrate_018_0(config) - with open(config_file, "w") as f: - yaml.dump(new_config, f) - previous_version = "0.18-0" - logger.info("Finished frigate config migration...") @@ -434,49 +427,6 @@ def migrate_017_0(config: dict[str, dict[str, Any]]) -> dict[str, dict[str, Any] return new_config -def migrate_018_0(config: dict[str, dict[str, Any]]) -> dict[str, dict[str, Any]]: - """Handle migrating frigate config to 0.18-0""" - new_config = config.copy() - - # Remove deprecated sync_recordings from global record config - if new_config.get("record", {}).get("sync_recordings") is not None: - del new_config["record"]["sync_recordings"] - - # Remove deprecated timelapse_args from global record export config - if new_config.get("record", {}).get("export", {}).get("timelapse_args") is not None: - del new_config["record"]["export"]["timelapse_args"] - # Remove export section if empty - if not new_config.get("record", {}).get("export"): - del new_config["record"]["export"] - # Remove record section if empty - if not new_config.get("record"): - del new_config["record"] - - # Remove deprecated sync_recordings and timelapse_args from camera-specific record configs - for name, camera in config.get("cameras", {}).items(): - camera_config: dict[str, dict[str, Any]] = camera.copy() - - if camera_config.get("record", {}).get("sync_recordings") is not None: - del camera_config["record"]["sync_recordings"] - - if ( - camera_config.get("record", {}).get("export", {}).get("timelapse_args") - is not None - ): - del camera_config["record"]["export"]["timelapse_args"] - # Remove export section if empty - if not camera_config.get("record", {}).get("export"): - del camera_config["record"]["export"] - # Remove record section if empty - if not camera_config.get("record"): - del camera_config["record"] - - new_config["cameras"][name] = camera_config - - new_config["version"] = "0.18-0" - return new_config - - def get_relative_coordinates( mask: Optional[Union[str, list]], frame_shape: tuple[int, int] ) -> Union[str, list]: diff --git a/frigate/util/media.py b/frigate/util/media.py deleted file mode 100644 index c7de85c9f..000000000 --- a/frigate/util/media.py +++ /dev/null @@ -1,808 +0,0 @@ -"""Recordings Utilities.""" - -import datetime -import errno -import logging -import os -from dataclasses import dataclass, field -from pathlib import Path -from typing import Iterable - -from peewee import DatabaseError, chunked - -from frigate.const import CLIPS_DIR, EXPORT_DIR, RECORD_DIR, THUMB_DIR -from frigate.models import ( - Event, - Export, - Previews, - Recordings, - RecordingsToDelete, - ReviewSegment, -) - -logger = logging.getLogger(__name__) - - -# Safety threshold - abort if more than 50% of files would be deleted -SAFETY_THRESHOLD = 0.5 - - -@dataclass -class SyncResult: - """Result of a sync operation.""" - - media_type: str - files_checked: int = 0 - orphans_found: int = 0 - orphans_deleted: int = 0 - orphan_paths: list[str] = field(default_factory=list) - aborted: bool = False - error: str | None = None - - def to_dict(self) -> dict: - return { - "media_type": self.media_type, - "files_checked": self.files_checked, - "orphans_found": self.orphans_found, - "orphans_deleted": self.orphans_deleted, - "aborted": self.aborted, - "error": self.error, - } - - -def remove_empty_directories(root: Path, paths: Iterable[Path]) -> None: - """ - Remove directories if they exist and are empty. - Silently ignores non-existent and non-empty directories. - Attempts to remove parent directories as well, stopping at the given root. - """ - count = 0 - while True: - parents = set() - for path in paths: - if path == root: - continue - - try: - path.rmdir() - count += 1 - except FileNotFoundError: - pass - except OSError as e: - if e.errno == errno.ENOTEMPTY: - continue - raise - - parents.add(path.parent) - - if not parents: - break - - paths = parents - - logger.debug("Removed {count} empty directories") - - -def sync_recordings( - limited: bool = False, dry_run: bool = False, force: bool = False -) -> SyncResult: - """Sync recordings between the database and disk using the SyncResult format.""" - - result = SyncResult(media_type="recordings") - - try: - logger.debug("Start sync recordings.") - - # start checking on the hour 36 hours ago - check_point = datetime.datetime.now().replace( - minute=0, second=0, microsecond=0 - ).astimezone(datetime.timezone.utc) - datetime.timedelta(hours=36) - - # Gather DB recordings to inspect - if limited: - recordings_query = Recordings.select(Recordings.id, Recordings.path).where( - Recordings.start_time >= check_point.timestamp() - ) - else: - recordings_query = Recordings.select(Recordings.id, Recordings.path) - - recordings_count = recordings_query.count() - page_size = 1000 - num_pages = (recordings_count + page_size - 1) // page_size - recordings_to_delete: list[dict] = [] - - for page in range(num_pages): - for recording in recordings_query.paginate(page, page_size): - if not os.path.exists(recording.path): - recordings_to_delete.append( - {"id": recording.id, "path": recording.path} - ) - - result.orphans_found += len(recordings_to_delete) - result.orphan_paths.extend( - [ - recording["path"] - for recording in recordings_to_delete - if recording.get("path") - ] - ) - - if ( - recordings_count - and len(recordings_to_delete) / recordings_count > SAFETY_THRESHOLD - ): - if force: - logger.warning( - f"Deleting {(len(recordings_to_delete) / max(1, recordings_count) * 100):.2f}% of recordings DB entries (force=True, bypassing safety threshold)" - ) - else: - logger.warning( - f"Deleting {(len(recordings_to_delete) / max(1, recordings_count) * 100):.2f}% of recordings DB entries, could be due to configuration error. Aborting..." - ) - result.aborted = True - return result - - if recordings_to_delete and not dry_run: - logger.info( - f"Deleting {len(recordings_to_delete)} recording DB entries with missing files" - ) - - RecordingsToDelete.create_table(temporary=True) - - max_inserts = 1000 - for batch in chunked(recordings_to_delete, max_inserts): - RecordingsToDelete.insert_many(batch).execute() - - try: - deleted = ( - Recordings.delete() - .where( - Recordings.id.in_( - RecordingsToDelete.select(RecordingsToDelete.id) - ) - ) - .execute() - ) - result.orphans_deleted += int(deleted) - except DatabaseError as e: - logger.error(f"Database error during recordings db cleanup: {e}") - result.error = str(e) - result.aborted = True - return result - - if result.aborted: - logger.warning("Recording DB sync aborted; skipping file cleanup.") - return result - - # Only try to cleanup files if db cleanup was successful or dry_run - if limited: - # get recording files from last 36 hours - hour_check = f"{RECORD_DIR}/{check_point.strftime('%Y-%m-%d/%H')}" - files_on_disk = { - os.path.join(root, file) - for root, _, files in os.walk(RECORD_DIR) - for file in files - if root > hour_check - } - else: - # get all recordings files on disk and put them in a set - files_on_disk = { - os.path.join(root, file) - for root, _, files in os.walk(RECORD_DIR) - for file in files - } - - result.files_checked = len(files_on_disk) - - files_to_delete: list[str] = [] - for file in files_on_disk: - if not Recordings.select().where(Recordings.path == file).exists(): - files_to_delete.append(file) - - result.orphans_found += len(files_to_delete) - result.orphan_paths.extend(files_to_delete) - - if ( - files_on_disk - and len(files_to_delete) / len(files_on_disk) > SAFETY_THRESHOLD - ): - if force: - logger.warning( - f"Deleting {(len(files_to_delete) / max(1, len(files_on_disk)) * 100):.2f}% of recordings files (force=True, bypassing safety threshold)" - ) - else: - logger.warning( - f"Deleting {(len(files_to_delete) / max(1, len(files_on_disk)) * 100):.2f}% of recordings files, could be due to configuration error. Aborting..." - ) - result.aborted = True - return result - - if dry_run: - logger.info( - f"Recordings sync (dry run): Found {len(files_to_delete)} orphaned files" - ) - return result - - # Delete orphans - logger.info(f"Deleting {len(files_to_delete)} orphaned recordings files") - for file in files_to_delete: - try: - os.unlink(file) - result.orphans_deleted += 1 - except OSError as e: - logger.error(f"Failed to delete {file}: {e}") - - logger.debug("End sync recordings.") - - except Exception as e: - logger.error(f"Error syncing recordings: {e}") - result.error = str(e) - - return result - - -def sync_event_snapshots(dry_run: bool = False, force: bool = False) -> SyncResult: - """Sync event snapshots - delete files not referenced by any event. - - Event snapshots are stored at: CLIPS_DIR/{camera}-{event_id}.jpg - Also checks for clean variants: {camera}-{event_id}-clean.webp and -clean.png - """ - result = SyncResult(media_type="event_snapshots") - - try: - # Get all event IDs with snapshots from DB - events_with_snapshots = set( - f"{e.camera}-{e.id}" - for e in Event.select(Event.id, Event.camera).where( - Event.has_snapshot == True - ) - ) - - # Find snapshot files on disk (directly in CLIPS_DIR, not subdirectories) - snapshot_files: list[tuple[str, str]] = [] # (full_path, base_name) - if os.path.isdir(CLIPS_DIR): - for file in os.listdir(CLIPS_DIR): - file_path = os.path.join(CLIPS_DIR, file) - if os.path.isfile(file_path) and file.endswith( - (".jpg", "-clean.webp", "-clean.png") - ): - # Extract base name (camera-event_id) from filename - base_name = file - for suffix in ["-clean.webp", "-clean.png", ".jpg"]: - if file.endswith(suffix): - base_name = file[: -len(suffix)] - break - snapshot_files.append((file_path, base_name)) - - result.files_checked = len(snapshot_files) - - # Find orphans - orphans: list[str] = [] - for file_path, base_name in snapshot_files: - if base_name not in events_with_snapshots: - orphans.append(file_path) - - result.orphans_found = len(orphans) - result.orphan_paths = orphans - - if len(orphans) == 0: - return result - - # Safety check - if ( - result.files_checked > 0 - and len(orphans) / result.files_checked > SAFETY_THRESHOLD - ): - if force: - logger.warning( - f"Event snapshots sync: Would delete {len(orphans)}/{result.files_checked} " - f"({len(orphans) / result.files_checked * 100:.2f}%) files (force=True, bypassing safety threshold)." - ) - else: - logger.warning( - f"Event snapshots sync: Would delete {len(orphans)}/{result.files_checked} " - f"({len(orphans) / result.files_checked * 100:.2f}%) files. " - "Aborting due to safety threshold." - ) - result.aborted = True - return result - - if dry_run: - logger.info( - f"Event snapshots sync (dry run): Found {len(orphans)} orphaned files" - ) - return result - - # Delete orphans - logger.info(f"Deleting {len(orphans)} orphaned event snapshot files") - for file_path in orphans: - try: - os.unlink(file_path) - result.orphans_deleted += 1 - except OSError as e: - logger.error(f"Failed to delete {file_path}: {e}") - - except Exception as e: - logger.error(f"Error syncing event snapshots: {e}") - result.error = str(e) - - return result - - -def sync_event_thumbnails(dry_run: bool = False, force: bool = False) -> SyncResult: - """Sync event thumbnails - delete files not referenced by any event. - - Event thumbnails are stored at: THUMB_DIR/{camera}/{event_id}.webp - Only events without inline thumbnail (thumbnail field is None/empty) use files. - """ - result = SyncResult(media_type="event_thumbnails") - - try: - # Get all events that use file-based thumbnails - # Events with thumbnail field populated don't need files - events_with_file_thumbs = set( - (e.camera, e.id) - for e in Event.select(Event.id, Event.camera, Event.thumbnail).where( - (Event.thumbnail.is_null(True)) | (Event.thumbnail == "") - ) - ) - - # Find thumbnail files on disk - thumbnail_files: list[ - tuple[str, str, str] - ] = [] # (full_path, camera, event_id) - if os.path.isdir(THUMB_DIR): - for camera_dir in os.listdir(THUMB_DIR): - camera_path = os.path.join(THUMB_DIR, camera_dir) - if not os.path.isdir(camera_path): - continue - for file in os.listdir(camera_path): - if file.endswith(".webp"): - event_id = file[:-5] # Remove .webp - file_path = os.path.join(camera_path, file) - thumbnail_files.append((file_path, camera_dir, event_id)) - - result.files_checked = len(thumbnail_files) - - # Find orphans - files where event doesn't exist or event has inline thumbnail - orphans: list[str] = [] - for file_path, camera, event_id in thumbnail_files: - if (camera, event_id) not in events_with_file_thumbs: - # Check if event exists with inline thumbnail - event_exists = Event.select().where(Event.id == event_id).exists() - if not event_exists: - orphans.append(file_path) - # If event exists with inline thumbnail, the file is also orphaned - elif event_exists: - event = Event.get_or_none(Event.id == event_id) - if event and event.thumbnail: - orphans.append(file_path) - - result.orphans_found = len(orphans) - result.orphan_paths = orphans - - if len(orphans) == 0: - return result - - # Safety check - if ( - result.files_checked > 0 - and len(orphans) / result.files_checked > SAFETY_THRESHOLD - ): - if force: - logger.warning( - f"Event thumbnails sync: Would delete {len(orphans)}/{result.files_checked} " - f"({len(orphans) / result.files_checked * 100:.2f}%) files (force=True, bypassing safety threshold)." - ) - else: - logger.warning( - f"Event thumbnails sync: Would delete {len(orphans)}/{result.files_checked} " - f"({len(orphans) / result.files_checked * 100:.2f}%) files. " - "Aborting due to safety threshold." - ) - result.aborted = True - return result - - if dry_run: - logger.info( - f"Event thumbnails sync (dry run): Found {len(orphans)} orphaned files" - ) - return result - - # Delete orphans - logger.info(f"Deleting {len(orphans)} orphaned event thumbnail files") - for file_path in orphans: - try: - os.unlink(file_path) - result.orphans_deleted += 1 - except OSError as e: - logger.error(f"Failed to delete {file_path}: {e}") - - except Exception as e: - logger.error(f"Error syncing event thumbnails: {e}") - result.error = str(e) - - return result - - -def sync_review_thumbnails(dry_run: bool = False, force: bool = False) -> SyncResult: - """Sync review segment thumbnails - delete files not referenced by any review segment. - - Review thumbnails are stored at: CLIPS_DIR/review/thumb-{camera}-{review_id}.webp - The full path is stored in ReviewSegment.thumb_path - """ - result = SyncResult(media_type="review_thumbnails") - - try: - # Get all thumb paths from DB - review_thumb_paths = set( - r.thumb_path - for r in ReviewSegment.select(ReviewSegment.thumb_path) - if r.thumb_path - ) - - # Find review thumbnail files on disk - review_dir = os.path.join(CLIPS_DIR, "review") - thumbnail_files: list[str] = [] - if os.path.isdir(review_dir): - for file in os.listdir(review_dir): - if file.startswith("thumb-") and file.endswith(".webp"): - file_path = os.path.join(review_dir, file) - thumbnail_files.append(file_path) - - result.files_checked = len(thumbnail_files) - - # Find orphans - orphans: list[str] = [] - for file_path in thumbnail_files: - if file_path not in review_thumb_paths: - orphans.append(file_path) - - result.orphans_found = len(orphans) - result.orphan_paths = orphans - - if len(orphans) == 0: - return result - - # Safety check - if ( - result.files_checked > 0 - and len(orphans) / result.files_checked > SAFETY_THRESHOLD - ): - if force: - logger.warning( - f"Review thumbnails sync: Would delete {len(orphans)}/{result.files_checked} " - f"({len(orphans) / result.files_checked * 100:.2f}%) files (force=True, bypassing safety threshold)." - ) - else: - logger.warning( - f"Review thumbnails sync: Would delete {len(orphans)}/{result.files_checked} " - f"({len(orphans) / result.files_checked * 100:.2f}%) files. " - "Aborting due to safety threshold." - ) - result.aborted = True - return result - - if dry_run: - logger.info( - f"Review thumbnails sync (dry run): Found {len(orphans)} orphaned files" - ) - return result - - # Delete orphans - logger.info(f"Deleting {len(orphans)} orphaned review thumbnail files") - for file_path in orphans: - try: - os.unlink(file_path) - result.orphans_deleted += 1 - except OSError as e: - logger.error(f"Failed to delete {file_path}: {e}") - - except Exception as e: - logger.error(f"Error syncing review thumbnails: {e}") - result.error = str(e) - - return result - - -def sync_previews(dry_run: bool = False, force: bool = False) -> SyncResult: - """Sync preview files - delete files not referenced by any preview record. - - Previews are stored at: CLIPS_DIR/previews/{camera}/*.mp4 - The full path is stored in Previews.path - """ - result = SyncResult(media_type="previews") - - try: - # Get all preview paths from DB - preview_paths = set(p.path for p in Previews.select(Previews.path) if p.path) - - # Find preview files on disk - previews_dir = os.path.join(CLIPS_DIR, "previews") - preview_files: list[str] = [] - if os.path.isdir(previews_dir): - for camera_dir in os.listdir(previews_dir): - camera_path = os.path.join(previews_dir, camera_dir) - if not os.path.isdir(camera_path): - continue - for file in os.listdir(camera_path): - if file.endswith(".mp4"): - file_path = os.path.join(camera_path, file) - preview_files.append(file_path) - - result.files_checked = len(preview_files) - - # Find orphans - orphans: list[str] = [] - for file_path in preview_files: - if file_path not in preview_paths: - orphans.append(file_path) - - result.orphans_found = len(orphans) - result.orphan_paths = orphans - - if len(orphans) == 0: - return result - - # Safety check - if ( - result.files_checked > 0 - and len(orphans) / result.files_checked > SAFETY_THRESHOLD - ): - if force: - logger.warning( - f"Previews sync: Would delete {len(orphans)}/{result.files_checked} " - f"({len(orphans) / result.files_checked * 100:.2f}%) files (force=True, bypassing safety threshold)." - ) - else: - logger.warning( - f"Previews sync: Would delete {len(orphans)}/{result.files_checked} " - f"({len(orphans) / result.files_checked * 100:.2f}%) files. " - "Aborting due to safety threshold." - ) - result.aborted = True - return result - - if dry_run: - logger.info(f"Previews sync (dry run): Found {len(orphans)} orphaned files") - return result - - # Delete orphans - logger.info(f"Deleting {len(orphans)} orphaned preview files") - for file_path in orphans: - try: - os.unlink(file_path) - result.orphans_deleted += 1 - except OSError as e: - logger.error(f"Failed to delete {file_path}: {e}") - - except Exception as e: - logger.error(f"Error syncing previews: {e}") - result.error = str(e) - - return result - - -def sync_exports(dry_run: bool = False, force: bool = False) -> SyncResult: - """Sync export files - delete files not referenced by any export record. - - Export videos are stored at: EXPORT_DIR/*.mp4 - Export thumbnails are stored at: CLIPS_DIR/export/*.jpg - The paths are stored in Export.video_path and Export.thumb_path - """ - result = SyncResult(media_type="exports") - - try: - # Get all export paths from DB - export_video_paths = set() - export_thumb_paths = set() - for e in Export.select(Export.video_path, Export.thumb_path): - if e.video_path: - export_video_paths.add(e.video_path) - if e.thumb_path: - export_thumb_paths.add(e.thumb_path) - - # Find export video files on disk - export_files: list[str] = [] - if os.path.isdir(EXPORT_DIR): - for file in os.listdir(EXPORT_DIR): - if file.endswith(".mp4"): - file_path = os.path.join(EXPORT_DIR, file) - export_files.append(file_path) - - # Find export thumbnail files on disk - export_thumb_dir = os.path.join(CLIPS_DIR, "export") - thumb_files: list[str] = [] - if os.path.isdir(export_thumb_dir): - for file in os.listdir(export_thumb_dir): - if file.endswith(".jpg"): - file_path = os.path.join(export_thumb_dir, file) - thumb_files.append(file_path) - - result.files_checked = len(export_files) + len(thumb_files) - - # Find orphans - orphans: list[str] = [] - for file_path in export_files: - if file_path not in export_video_paths: - orphans.append(file_path) - for file_path in thumb_files: - if file_path not in export_thumb_paths: - orphans.append(file_path) - - result.orphans_found = len(orphans) - result.orphan_paths = orphans - - if len(orphans) == 0: - return result - - # Safety check - if ( - result.files_checked > 0 - and len(orphans) / result.files_checked > SAFETY_THRESHOLD - ): - if force: - logger.warning( - f"Exports sync: Would delete {len(orphans)}/{result.files_checked} " - f"({len(orphans) / result.files_checked * 100:.2f}%) files (force=True, bypassing safety threshold)." - ) - else: - logger.warning( - f"Exports sync: Would delete {len(orphans)}/{result.files_checked} " - f"({len(orphans) / result.files_checked * 100:.2f}%) files. " - "Aborting due to safety threshold." - ) - result.aborted = True - return result - - if dry_run: - logger.info(f"Exports sync (dry run): Found {len(orphans)} orphaned files") - return result - - # Delete orphans - logger.info(f"Deleting {len(orphans)} orphaned export files") - for file_path in orphans: - try: - os.unlink(file_path) - result.orphans_deleted += 1 - except OSError as e: - logger.error(f"Failed to delete {file_path}: {e}") - - except Exception as e: - logger.error(f"Error syncing exports: {e}") - result.error = str(e) - - return result - - -@dataclass -class MediaSyncResults: - """Combined results from all media sync operations.""" - - event_snapshots: SyncResult | None = None - event_thumbnails: SyncResult | None = None - review_thumbnails: SyncResult | None = None - previews: SyncResult | None = None - exports: SyncResult | None = None - recordings: SyncResult | None = None - - @property - def total_files_checked(self) -> int: - total = 0 - for result in [ - self.event_snapshots, - self.event_thumbnails, - self.review_thumbnails, - self.previews, - self.exports, - self.recordings, - ]: - if result: - total += result.files_checked - return total - - @property - def total_orphans_found(self) -> int: - total = 0 - for result in [ - self.event_snapshots, - self.event_thumbnails, - self.review_thumbnails, - self.previews, - self.exports, - self.recordings, - ]: - if result: - total += result.orphans_found - return total - - @property - def total_orphans_deleted(self) -> int: - total = 0 - for result in [ - self.event_snapshots, - self.event_thumbnails, - self.review_thumbnails, - self.previews, - self.exports, - self.recordings, - ]: - if result: - total += result.orphans_deleted - return total - - def to_dict(self) -> dict: - """Convert results to dictionary for API response.""" - results = {} - for name, result in [ - ("event_snapshots", self.event_snapshots), - ("event_thumbnails", self.event_thumbnails), - ("review_thumbnails", self.review_thumbnails), - ("previews", self.previews), - ("exports", self.exports), - ("recordings", self.recordings), - ]: - if result: - results[name] = { - "files_checked": result.files_checked, - "orphans_found": result.orphans_found, - "orphans_deleted": result.orphans_deleted, - "aborted": result.aborted, - "error": result.error, - } - results["totals"] = { - "files_checked": self.total_files_checked, - "orphans_found": self.total_orphans_found, - "orphans_deleted": self.total_orphans_deleted, - } - return results - - -def sync_all_media( - dry_run: bool = False, media_types: list[str] = ["all"], force: bool = False -) -> MediaSyncResults: - """Sync specified media types with the database. - - Args: - dry_run: If True, only report orphans without deleting them. - media_types: List of media types to sync. Can include: 'all', 'event_snapshots', - 'event_thumbnails', 'review_thumbnails', 'previews', 'exports', 'recordings' - force: If True, bypass safety threshold checks. - - Returns: - MediaSyncResults with details of each sync operation. - """ - logger.debug( - f"Starting media sync (dry_run={dry_run}, media_types={media_types}, force={force})" - ) - - results = MediaSyncResults() - - # Determine which media types to sync - sync_all = "all" in media_types - - if sync_all or "event_snapshots" in media_types: - results.event_snapshots = sync_event_snapshots(dry_run=dry_run, force=force) - - if sync_all or "event_thumbnails" in media_types: - results.event_thumbnails = sync_event_thumbnails(dry_run=dry_run, force=force) - - if sync_all or "review_thumbnails" in media_types: - results.review_thumbnails = sync_review_thumbnails(dry_run=dry_run, force=force) - - if sync_all or "previews" in media_types: - results.previews = sync_previews(dry_run=dry_run, force=force) - - if sync_all or "exports" in media_types: - results.exports = sync_exports(dry_run=dry_run, force=force) - - if sync_all or "recordings" in media_types: - results.recordings = sync_recordings(dry_run=dry_run, force=force) - - logger.info( - f"Media sync complete: checked {results.total_files_checked} files, " - f"found {results.total_orphans_found} orphans, " - f"deleted {results.total_orphans_deleted}" - ) - - return results diff --git a/frigate/util/services.py b/frigate/util/services.py index 19ec4efdf..64d83833d 100644 --- a/frigate/util/services.py +++ b/frigate/util/services.py @@ -417,12 +417,12 @@ def get_openvino_npu_stats() -> Optional[dict[str, str]]: else: usage = 0.0 - return {"npu": f"{round(usage, 2)}", "mem": "-%"} + return {"npu": f"{round(usage, 2)}", "mem": "-"} except (FileNotFoundError, PermissionError, ValueError): return None -def get_rockchip_gpu_stats() -> Optional[dict[str, str | float]]: +def get_rockchip_gpu_stats() -> Optional[dict[str, str]]: """Get GPU stats using rk.""" try: with open("/sys/kernel/debug/rkrga/load", "r") as f: @@ -440,16 +440,7 @@ def get_rockchip_gpu_stats() -> Optional[dict[str, str | float]]: return None average_load = f"{round(sum(load_values) / len(load_values), 2)}%" - stats: dict[str, str | float] = {"gpu": average_load, "mem": "-%"} - - try: - with open("/sys/class/thermal/thermal_zone5/temp", "r") as f: - line = f.readline().strip() - stats["temp"] = round(int(line) / 1000, 1) - except (FileNotFoundError, OSError, ValueError): - pass - - return stats + return {"gpu": average_load, "mem": "-"} def get_rockchip_npu_stats() -> Optional[dict[str, float | str]]: @@ -472,25 +463,13 @@ def get_rockchip_npu_stats() -> Optional[dict[str, float | str]]: percentages = [int(load) for load in core_loads] mean = round(sum(percentages) / len(percentages), 2) - stats: dict[str, float | str] = {"npu": mean, "mem": "-%"} - - try: - with open("/sys/class/thermal/thermal_zone6/temp", "r") as f: - line = f.readline().strip() - stats["temp"] = round(int(line) / 1000, 1) - except (FileNotFoundError, OSError, ValueError): - pass - - return stats + return {"npu": mean, "mem": "-"} -def try_get_info(f, h, default="N/A", sensor=None): +def try_get_info(f, h, default="N/A"): try: if h: - if sensor is not None: - v = f(h, sensor) - else: - v = f(h) + v = f(h) else: v = f() except nvml.NVMLError_NotSupported: @@ -519,9 +498,6 @@ def get_nvidia_gpu_stats() -> dict[int, dict]: util = try_get_info(nvml.nvmlDeviceGetUtilizationRates, handle) enc = try_get_info(nvml.nvmlDeviceGetEncoderUtilization, handle) dec = try_get_info(nvml.nvmlDeviceGetDecoderUtilization, handle) - temp = try_get_info( - nvml.nvmlDeviceGetTemperature, handle, default=None, sensor=0 - ) pstate = try_get_info(nvml.nvmlDeviceGetPowerState, handle, default=None) if util != "N/A": @@ -534,11 +510,6 @@ def get_nvidia_gpu_stats() -> dict[int, dict]: else: gpu_mem_util = -1 - if temp != "N/A" and temp is not None: - temp = float(temp) - else: - temp = None - if enc != "N/A": enc_util = enc[0] else: @@ -556,7 +527,6 @@ def get_nvidia_gpu_stats() -> dict[int, dict]: "enc": enc_util, "dec": dec_util, "pstate": pstate or "unknown", - "temp": temp, } except Exception: pass @@ -586,53 +556,6 @@ def get_jetson_stats() -> Optional[dict[int, dict]]: return results -def get_hailo_temps() -> dict[str, float]: - """Get temperatures for Hailo devices.""" - try: - from hailo_platform import Device - except ModuleNotFoundError: - return {} - - temps = {} - - try: - device_ids = Device.scan() - for i, device_id in enumerate(device_ids): - try: - with Device(device_id) as device: - temp_info = device.control.get_chip_temperature() - - # Get board name and normalise it - identity = device.control.identify() - board_name = None - for line in str(identity).split("\n"): - if line.startswith("Board Name:"): - board_name = ( - line.split(":", 1)[1].strip().lower().replace("-", "") - ) - break - - if not board_name: - board_name = f"hailo{i}" - - # Use indexed name if multiple devices, otherwise just the board name - device_name = ( - f"{board_name}-{i}" if len(device_ids) > 1 else board_name - ) - - # ts1_temperature is also available, but appeared to be the same as ts0 in testing. - temps[device_name] = round(temp_info.ts0_temperature, 1) - except Exception as e: - logger.debug( - f"Failed to get temperature for Hailo device {device_id}: {e}" - ) - continue - except Exception as e: - logger.debug(f"Failed to scan for Hailo devices: {e}") - - return temps - - def ffprobe_stream(ffmpeg, path: str, detailed: bool = False) -> sp.CompletedProcess: """Run ffprobe on stream.""" clean_path = escape_special_characters(path) @@ -668,17 +591,12 @@ def ffprobe_stream(ffmpeg, path: str, detailed: bool = False) -> sp.CompletedPro def vainfo_hwaccel(device_name: Optional[str] = None) -> sp.CompletedProcess: """Run vainfo.""" - if not device_name: - cmd = ["vainfo"] - else: - if os.path.isabs(device_name) and device_name.startswith("/dev/dri/"): - device_path = device_name - else: - device_path = f"/dev/dri/{device_name}" - - cmd = ["vainfo", "--display", "drm", "--device", device_path] - - return sp.run(cmd, capture_output=True) + ffprobe_cmd = ( + ["vainfo"] + if not device_name + else ["vainfo", "--display", "drm", "--device", f"/dev/dri/{device_name}"] + ) + return sp.run(ffprobe_cmd, capture_output=True) def get_nvidia_driver_info() -> dict[str, Any]: diff --git a/frigate/video.py b/frigate/video.py index 5e42619dd..112844543 100755 --- a/frigate/video.py +++ b/frigate/video.py @@ -3,7 +3,6 @@ import queue import subprocess as sp import threading import time -from collections import deque from datetime import datetime, timedelta, timezone from multiprocessing import Queue, Value from multiprocessing.synchronize import Event as MpEvent @@ -118,7 +117,6 @@ def capture_frames( frame_rate.start() skipped_eps = EventsPerSecond() skipped_eps.start() - config_subscriber = CameraConfigUpdateSubscriber( None, {config.name: config}, [CameraConfigUpdateEnum.enabled] ) @@ -183,9 +181,6 @@ class CameraWatchdog(threading.Thread): camera_fps, skipped_fps, ffmpeg_pid, - stalls, - reconnects, - detection_frame, stop_event, ): threading.Thread.__init__(self) @@ -206,10 +201,6 @@ class CameraWatchdog(threading.Thread): self.frame_index = 0 self.stop_event = stop_event self.sleeptime = self.config.ffmpeg.retry_interval - self.reconnect_timestamps = deque() - self.stalls = stalls - self.reconnects = reconnects - self.detection_frame = detection_frame self.config_subscriber = CameraConfigUpdateSubscriber( None, @@ -225,35 +216,6 @@ class CameraWatchdog(threading.Thread): self.latest_cache_segment_time: float = 0 self.record_enable_time: datetime | None = None - # Stall tracking (based on last processed frame) - self._stall_timestamps: deque[float] = deque() - self._stall_active: bool = False - - # Status caching to reduce message volume - self._last_detect_status: str | None = None - self._last_record_status: str | None = None - self._last_status_update_time: float = 0.0 - - def _send_detect_status(self, status: str, now: float) -> None: - """Send detect status only if changed or retry_interval has elapsed.""" - if ( - status != self._last_detect_status - or (now - self._last_status_update_time) >= self.sleeptime - ): - self.requestor.send_data(f"{self.config.name}/status/detect", status) - self._last_detect_status = status - self._last_status_update_time = now - - def _send_record_status(self, status: str, now: float) -> None: - """Send record status only if changed or retry_interval has elapsed.""" - if ( - status != self._last_record_status - or (now - self._last_status_update_time) >= self.sleeptime - ): - self.requestor.send_data(f"{self.config.name}/status/record", status) - self._last_record_status = status - self._last_status_update_time = now - def _update_enabled_state(self) -> bool: """Fetch the latest config and update enabled state.""" self.config_subscriber.check_for_updates() @@ -280,14 +242,6 @@ class CameraWatchdog(threading.Thread): else: self.ffmpeg_detect_process.wait() - # Update reconnects - now = datetime.now().timestamp() - self.reconnect_timestamps.append(now) - while self.reconnect_timestamps and self.reconnect_timestamps[0] < now - 3600: - self.reconnect_timestamps.popleft() - if self.reconnects: - self.reconnects.value = len(self.reconnect_timestamps) - # Wait for old capture thread to fully exit before starting a new one if self.capture_thread is not None and self.capture_thread.is_alive(): self.logger.info("Waiting for capture thread to exit...") @@ -313,10 +267,7 @@ class CameraWatchdog(threading.Thread): self.record_enable_time = datetime.now().astimezone(timezone.utc) time.sleep(self.sleeptime) - last_restart_time = datetime.now().timestamp() - - # 1 second watchdog loop - while not self.stop_event.wait(1): + while not self.stop_event.wait(self.sleeptime): enabled = self._update_enabled_state() if enabled != self.was_enabled: if enabled: @@ -334,9 +285,12 @@ class CameraWatchdog(threading.Thread): self.record_enable_time = None # update camera status - now = datetime.now().timestamp() - self._send_detect_status("disabled", now) - self._send_record_status("disabled", now) + self.requestor.send_data( + f"{self.config.name}/status/detect", "disabled" + ) + self.requestor.send_data( + f"{self.config.name}/status/record", "disabled" + ) self.was_enabled = enabled continue @@ -375,44 +329,36 @@ class CameraWatchdog(threading.Thread): now = datetime.now().timestamp() - # Check if enough time has passed to allow ffmpeg restart (backoff pacing) - time_since_last_restart = now - last_restart_time - can_restart = time_since_last_restart >= self.sleeptime - if not self.capture_thread.is_alive(): - self._send_detect_status("offline", now) + self.requestor.send_data(f"{self.config.name}/status/detect", "offline") self.camera_fps.value = 0 self.logger.error( f"Ffmpeg process crashed unexpectedly for {self.config.name}." ) - if can_restart: - self.reset_capture_thread(terminate=False) - last_restart_time = now + self.reset_capture_thread(terminate=False) elif self.camera_fps.value >= (self.config.detect.fps + 10): self.fps_overflow_count += 1 if self.fps_overflow_count == 3: - self._send_detect_status("offline", now) + self.requestor.send_data( + f"{self.config.name}/status/detect", "offline" + ) self.fps_overflow_count = 0 self.camera_fps.value = 0 self.logger.info( f"{self.config.name} exceeded fps limit. Exiting ffmpeg..." ) - if can_restart: - self.reset_capture_thread(drain_output=False) - last_restart_time = now + self.reset_capture_thread(drain_output=False) elif now - self.capture_thread.current_frame.value > 20: - self._send_detect_status("offline", now) + self.requestor.send_data(f"{self.config.name}/status/detect", "offline") self.camera_fps.value = 0 self.logger.info( f"No frames received from {self.config.name} in 20 seconds. Exiting ffmpeg..." ) - if can_restart: - self.reset_capture_thread() - last_restart_time = now + self.reset_capture_thread() else: # process is running normally - self._send_detect_status("online", now) + self.requestor.send_data(f"{self.config.name}/status/detect", "online") self.fps_overflow_count = 0 for p in self.ffmpeg_other_processes: @@ -495,7 +441,9 @@ class CameraWatchdog(threading.Thread): continue else: - self._send_record_status("online", now) + self.requestor.send_data( + f"{self.config.name}/status/record", "online" + ) p["latest_segment_time"] = self.latest_cache_segment_time if poll is None: @@ -511,34 +459,6 @@ class CameraWatchdog(threading.Thread): p["cmd"], self.logger, p["logpipe"], ffmpeg_process=p["process"] ) - # Update stall metrics based on last processed frame timestamp - now = datetime.now().timestamp() - processed_ts = ( - float(self.detection_frame.value) if self.detection_frame else 0.0 - ) - if processed_ts > 0: - delta = now - processed_ts - observed_fps = ( - self.camera_fps.value - if self.camera_fps.value > 0 - else self.config.detect.fps - ) - interval = 1.0 / max(observed_fps, 0.1) - stall_threshold = max(2.0 * interval, 2.0) - - if delta > stall_threshold: - if not self._stall_active: - self._stall_timestamps.append(now) - self._stall_active = True - else: - self._stall_active = False - - while self._stall_timestamps and self._stall_timestamps[0] < now - 3600: - self._stall_timestamps.popleft() - - if self.stalls: - self.stalls.value = len(self._stall_timestamps) - self.stop_all_ffmpeg() self.logpipe.close() self.config_subscriber.stop() @@ -676,9 +596,6 @@ class CameraCapture(FrigateProcess): self.camera_metrics.camera_fps, self.camera_metrics.skipped_fps, self.camera_metrics.ffmpeg_pid, - self.camera_metrics.stalls_last_hour, - self.camera_metrics.reconnects_last_hour, - self.camera_metrics.detection_frame, self.stop_event, ) camera_watchdog.start() diff --git a/migrations/033_create_export_case_table.py b/migrations/033_create_export_case_table.py deleted file mode 100644 index 08edcbc32..000000000 --- a/migrations/033_create_export_case_table.py +++ /dev/null @@ -1,50 +0,0 @@ -"""Peewee migrations -- 033_create_export_case_table.py. - -Some examples (model - class or model name):: - - > Model = migrator.orm['model_name'] # Return model in current state by name - - > migrator.sql(sql) # Run custom SQL - > migrator.python(func, *args, **kwargs) # Run python code - > migrator.create_model(Model) # Create a model (could be used as decorator) - > migrator.remove_model(model, cascade=True) # Remove a model - > migrator.add_fields(model, **fields) # Add fields to a model - > migrator.change_fields(model, **fields) # Change fields - > migrator.remove_fields(model, *field_names, cascade=True) - > migrator.rename_field(model, old_field_name, new_field_name) - > migrator.rename_table(model, new_table_name) - > migrator.add_index(model, *col_names, unique=False) - > migrator.drop_index(model, *col_names) - > migrator.add_not_null(model, *field_names) - > migrator.drop_not_null(model, *field_names) - > migrator.add_default(model, field_name, default) - -""" - -import peewee as pw - -SQL = pw.SQL - - -def migrate(migrator, database, fake=False, **kwargs): - migrator.sql( - """ - CREATE TABLE IF NOT EXISTS "exportcase" ( - "id" VARCHAR(30) NOT NULL PRIMARY KEY, - "name" VARCHAR(100) NOT NULL, - "description" TEXT NULL, - "created_at" DATETIME NOT NULL, - "updated_at" DATETIME NOT NULL - ) - """ - ) - migrator.sql( - 'CREATE INDEX IF NOT EXISTS "exportcase_name" ON "exportcase" ("name")' - ) - migrator.sql( - 'CREATE INDEX IF NOT EXISTS "exportcase_created_at" ON "exportcase" ("created_at")' - ) - - -def rollback(migrator, database, fake=False, **kwargs): - pass diff --git a/migrations/034_add_export_case_to_exports.py b/migrations/034_add_export_case_to_exports.py deleted file mode 100644 index da9e1d4ac..000000000 --- a/migrations/034_add_export_case_to_exports.py +++ /dev/null @@ -1,40 +0,0 @@ -"""Peewee migrations -- 034_add_export_case_to_exports.py. - -Some examples (model - class or model name):: - - > Model = migrator.orm['model_name'] # Return model in current state by name - - > migrator.sql(sql) # Run custom SQL - > migrator.python(func, *args, **kwargs) # Run python code - > migrator.create_model(Model) # Create a model (could be used as decorator) - > migrator.remove_model(model, cascade=True) # Remove a model - > migrator.add_fields(model, **fields) # Add fields to a model - > migrator.change_fields(model, **fields) # Change fields - > migrator.remove_fields(model, *field_names, cascade=True) - > migrator.rename_field(model, old_field_name, new_field_name) - > migrator.rename_table(model, new_table_name) - > migrator.add_index(model, *col_names, unique=False) - > migrator.drop_index(model, *col_names) - > migrator.add_not_null(model, *field_names) - > migrator.drop_not_null(model, *field_names) - > migrator.add_default(model, field_name, default) - -""" - -import peewee as pw - -SQL = pw.SQL - - -def migrate(migrator, database, fake=False, **kwargs): - # Add nullable export_case_id column to export table - migrator.sql('ALTER TABLE "export" ADD COLUMN "export_case_id" VARCHAR(30) NULL') - - # Index for faster case-based queries - migrator.sql( - 'CREATE INDEX IF NOT EXISTS "export_export_case_id" ON "export" ("export_case_id")' - ) - - -def rollback(migrator, database, fake=False, **kwargs): - pass diff --git a/web/public/locales/en/components/dialog.json b/web/public/locales/en/components/dialog.json index 9a6f68daf..91ff38d82 100644 --- a/web/public/locales/en/components/dialog.json +++ b/web/public/locales/en/components/dialog.json @@ -49,10 +49,6 @@ "name": { "placeholder": "Name the Export" }, - "case": { - "label": "Case", - "placeholder": "Select a case" - }, "select": "Select", "export": "Export", "selectOrExport": "Select or Export", diff --git a/web/public/locales/en/config/cameras.json b/web/public/locales/en/config/cameras.json index d2c74dc54..67015bde5 100644 --- a/web/public/locales/en/config/cameras.json +++ b/web/public/locales/en/config/cameras.json @@ -324,6 +324,9 @@ "enabled": { "label": "Enable record on all cameras." }, + "sync_recordings": { + "label": "Sync recordings with disk on startup and once a day." + }, "expire_interval": { "label": "Number of minutes to wait between cleanup runs." }, @@ -755,4 +758,4 @@ "label": "Keep track of original state of camera." } } -} +} \ No newline at end of file diff --git a/web/public/locales/en/config/networking.json b/web/public/locales/en/config/networking.json index 592ea9477..0f8d9cc54 100644 --- a/web/public/locales/en/config/networking.json +++ b/web/public/locales/en/config/networking.json @@ -2,23 +2,12 @@ "label": "Networking configuration", "properties": { "ipv6": { - "label": "IPv6 configuration", + "label": "Network configuration", "properties": { "enabled": { "label": "Enable IPv6 for port 5000 and/or 8971" } } - }, - "listen": { - "label": "Listening ports configuration", - "properties": { - "internal": { - "label": "Internal listening port for Frigate" - }, - "external": { - "label": "External listening port for Frigate" - } - } } } -} +} \ No newline at end of file diff --git a/web/public/locales/en/config/record.json b/web/public/locales/en/config/record.json index 0c4a5fc42..81139084e 100644 --- a/web/public/locales/en/config/record.json +++ b/web/public/locales/en/config/record.json @@ -4,6 +4,9 @@ "enabled": { "label": "Enable record on all cameras." }, + "sync_recordings": { + "label": "Sync recordings with disk on startup and once a day." + }, "expire_interval": { "label": "Number of minutes to wait between cleanup runs." }, @@ -87,4 +90,4 @@ "label": "Keep track of original state of recording." } } -} +} \ No newline at end of file diff --git a/web/public/locales/en/views/exports.json b/web/public/locales/en/views/exports.json index 8f9e8205e..4a79d20e1 100644 --- a/web/public/locales/en/views/exports.json +++ b/web/public/locales/en/views/exports.json @@ -2,10 +2,6 @@ "documentTitle": "Export - Frigate", "search": "Search", "noExports": "No exports found", - "headings": { - "cases": "Cases", - "uncategorizedExports": "Uncategorized Exports" - }, "deleteExport": "Delete Export", "deleteExport.desc": "Are you sure you want to delete {{exportName}}?", "editExport": { @@ -17,21 +13,11 @@ "shareExport": "Share export", "downloadVideo": "Download video", "editName": "Edit name", - "deleteExport": "Delete export", - "assignToCase": "Add to case" + "deleteExport": "Delete export" }, "toast": { "error": { - "renameExportFailed": "Failed to rename export: {{errorMessage}}", - "assignCaseFailed": "Failed to update case assignment: {{errorMessage}}" + "renameExportFailed": "Failed to rename export: {{errorMessage}}" } - }, - "caseDialog": { - "title": "Add to case", - "description": "Choose an existing case or create a new one.", - "selectLabel": "Case", - "newCaseOption": "Create new case", - "nameLabel": "Case name", - "descriptionLabel": "Description" } } diff --git a/web/public/locales/en/views/settings.json b/web/public/locales/en/views/settings.json index a84c15619..ea2869986 100644 --- a/web/public/locales/en/views/settings.json +++ b/web/public/locales/en/views/settings.json @@ -1067,53 +1067,5 @@ "deleteTriggerFailed": "Failed to delete trigger: {{errorMessage}}" } } - }, - "maintenance": { - "title": "Maintenance", - "sync": { - "title": "Media Sync", - "desc": "Frigate will periodically clean up media on a regular schedule according to your retention configuration. It is normal to see a few orphaned files as Frigate runs. Use this feature to remove orphaned media files from disk that are no longer referenced in the database.", - "started": "Media sync started.", - "alreadyRunning": "A sync job is already running", - "error": "Failed to start sync", - "currentStatus": "Status", - "jobId": "Job ID", - "startTime": "Start Time", - "endTime": "End Time", - "statusLabel": "Status", - "results": "Results", - "errorLabel": "Error", - "mediaTypes": "Media Types", - "allMedia": "All Media", - "dryRun": "Dry Run", - "dryRunEnabled": "No files will be deleted", - "dryRunDisabled": "Files will be deleted", - "force": "Force", - "forceDesc": "Bypass safety threshold and complete sync even if more than 50% of the files would be deleted.", - "running": "Sync Running...", - "start": "Start Sync", - "inProgress": "Sync is in progress. This page is disabled.", - "status": { - "queued": "Queued", - "running": "Running", - "completed": "Completed", - "failed": "Failed", - "notRunning": "Not Running" - }, - "resultsFields": { - "filesChecked": "Files Checked", - "orphansFound": "Orphans Found", - "orphansDeleted": "Orphans Deleted", - "aborted": "Aborted. Deletion would exceed safety threshold.", - "error": "Error", - "totals": "Totals" - }, - "event_snapshots": "Tracked Object Snapshots", - "event_thumbnails": "Tracked Object Thumbnails", - "review_thumbnails": "Review Thumbnails", - "previews": "Previews", - "exports": "Exports", - "recordings": "Recordings" - } } } diff --git a/web/public/locales/en/views/system.json b/web/public/locales/en/views/system.json index 8ddbc03e1..da774e302 100644 --- a/web/public/locales/en/views/system.json +++ b/web/public/locales/en/views/system.json @@ -51,7 +51,6 @@ "gpuMemory": "GPU Memory", "gpuEncoder": "GPU Encoder", "gpuDecoder": "GPU Decoder", - "gpuTemperature": "GPU Temperature", "gpuInfo": { "vainfoOutput": { "title": "Vainfo Output", @@ -78,7 +77,6 @@ }, "npuUsage": "NPU Usage", "npuMemory": "NPU Memory", - "npuTemperature": "NPU Temperature", "intelGpuWarning": { "title": "Intel GPU Stats Warning", "message": "GPU stats unavailable", @@ -160,17 +158,6 @@ "cameraDetectionsPerSecond": "{{camName}} detections per second", "cameraSkippedDetectionsPerSecond": "{{camName}} skipped detections per second" }, - "connectionQuality": { - "title": "Connection Quality", - "excellent": "Excellent", - "fair": "Fair", - "poor": "Poor", - "unusable": "Unusable", - "fps": "FPS", - "expectedFps": "Expected FPS", - "reconnectsLastHour": "Reconnects (last hour)", - "stallsLastHour": "Stalls (last hour)" - }, "toast": { "success": { "copyToClipboard": "Copied probe data to clipboard." diff --git a/web/src/api/ws.tsx b/web/src/api/ws.tsx index 6bb2fdc32..44d45ea2f 100644 --- a/web/src/api/ws.tsx +++ b/web/src/api/ws.tsx @@ -11,7 +11,6 @@ import { TrackedObjectUpdateReturnType, TriggerStatus, FrigateAudioDetections, - Job, } from "@/types/ws"; import { FrigateStats } from "@/types/stats"; import { createContainer } from "react-tracked"; @@ -652,40 +651,3 @@ export function useTriggers(): { payload: TriggerStatus } { : { name: "", camera: "", event_id: "", type: "", score: 0 }; return { payload: useDeepMemo(parsed) }; } - -export function useJobStatus( - jobType: string, - revalidateOnFocus: boolean = true, -): { payload: Job | null } { - const { - value: { payload }, - send: sendCommand, - } = useWs("job_state", "jobState"); - - const jobData = useDeepMemo( - payload && typeof payload === "string" ? JSON.parse(payload) : {}, - ); - const currentJob = jobData[jobType] || null; - - useEffect(() => { - let listener: (() => void) | undefined; - if (revalidateOnFocus) { - sendCommand("jobState"); - listener = () => { - if (document.visibilityState === "visible") { - sendCommand("jobState"); - } - }; - addEventListener("visibilitychange", listener); - } - - return () => { - if (listener) { - removeEventListener("visibilitychange", listener); - } - }; - // eslint-disable-next-line react-hooks/exhaustive-deps - }, [revalidateOnFocus]); - - return { payload: currentJob as Job | null }; -} diff --git a/web/src/components/auth/ProtectedRoute.tsx b/web/src/components/auth/ProtectedRoute.tsx index a7d1b3596..cedf5a15a 100644 --- a/web/src/components/auth/ProtectedRoute.tsx +++ b/web/src/components/auth/ProtectedRoute.tsx @@ -47,7 +47,7 @@ export default function ProtectedRoute({ return ; } - // Authenticated mode (external port): require login + // Authenticated mode (8971): require login if (!auth.user) { return ( diff --git a/web/src/components/camera/ConnectionQualityIndicator.tsx b/web/src/components/camera/ConnectionQualityIndicator.tsx deleted file mode 100644 index 3ea3c4f19..000000000 --- a/web/src/components/camera/ConnectionQualityIndicator.tsx +++ /dev/null @@ -1,76 +0,0 @@ -import { useTranslation } from "react-i18next"; -import { - Tooltip, - TooltipContent, - TooltipTrigger, -} from "@/components/ui/tooltip"; -import { cn } from "@/lib/utils"; - -type ConnectionQualityIndicatorProps = { - quality: "excellent" | "fair" | "poor" | "unusable"; - expectedFps: number; - reconnects: number; - stalls: number; -}; - -export function ConnectionQualityIndicator({ - quality, - expectedFps, - reconnects, - stalls, -}: ConnectionQualityIndicatorProps) { - const { t } = useTranslation(["views/system"]); - - const getColorClass = (quality: string): string => { - switch (quality) { - case "excellent": - return "bg-success"; - case "fair": - return "bg-yellow-500"; - case "poor": - return "bg-orange-500"; - case "unusable": - return "bg-destructive"; - default: - return "bg-gray-500"; - } - }; - - const qualityLabel = t(`cameras.connectionQuality.${quality}`); - - return ( - - -
- - -
-
- {t("cameras.connectionQuality.title")} -
-
-
{qualityLabel}
-
-
- {t("cameras.connectionQuality.expectedFps")}:{" "} - {expectedFps.toFixed(1)} {t("cameras.connectionQuality.fps")} -
-
- {t("cameras.connectionQuality.reconnectsLastHour")}:{" "} - {reconnects} -
-
- {t("cameras.connectionQuality.stallsLastHour")}: {stalls} -
-
-
-
-
- - ); -} diff --git a/web/src/components/card/ExportCard.tsx b/web/src/components/card/ExportCard.tsx index c8d9c4c65..021524532 100644 --- a/web/src/components/card/ExportCard.tsx +++ b/web/src/components/card/ExportCard.tsx @@ -1,8 +1,9 @@ import ActivityIndicator from "../indicators/activity-indicator"; +import { LuTrash } from "react-icons/lu"; import { Button } from "../ui/button"; -import { useCallback, useMemo, useState } from "react"; -import { isMobile } from "react-device-detect"; -import { FiMoreVertical } from "react-icons/fi"; +import { useCallback, useState } from "react"; +import { isDesktop, isMobile } from "react-device-detect"; +import { FaDownload, FaPlay, FaShareAlt } from "react-icons/fa"; import { Skeleton } from "../ui/skeleton"; import { Dialog, @@ -13,81 +14,35 @@ import { } from "../ui/dialog"; import { Input } from "../ui/input"; import useKeyboardListener from "@/hooks/use-keyboard-listener"; -import { DeleteClipType, Export, ExportCase } from "@/types/export"; +import { DeleteClipType, Export } from "@/types/export"; +import { MdEditSquare } from "react-icons/md"; import { baseUrl } from "@/api/baseUrl"; import { cn } from "@/lib/utils"; import { shareOrCopy } from "@/utils/browserUtil"; import { useTranslation } from "react-i18next"; import { ImageShadowOverlay } from "../overlay/ImageShadowOverlay"; import BlurredIconButton from "../button/BlurredIconButton"; +import { Tooltip, TooltipContent, TooltipTrigger } from "../ui/tooltip"; import { useIsAdmin } from "@/hooks/use-is-admin"; -import { - DropdownMenu, - DropdownMenuContent, - DropdownMenuItem, - DropdownMenuTrigger, -} from "../ui/dropdown-menu"; -import { FaFolder } from "react-icons/fa"; -type CaseCardProps = { - className: string; - exportCase: ExportCase; - exports: Export[]; - onSelect: () => void; -}; -export function CaseCard({ - className, - exportCase, - exports, - onSelect, -}: CaseCardProps) { - const firstExport = useMemo( - () => exports.find((exp) => exp.thumb_path && exp.thumb_path.length > 0), - [exports], - ); - - return ( -
onSelect()} - > - {firstExport && ( - - )} -
-
- -
{exportCase.name}
-
-
- ); -} - -type ExportCardProps = { +type ExportProps = { className: string; exportedRecording: Export; onSelect: (selected: Export) => void; onRename: (original: string, update: string) => void; onDelete: ({ file, exportName }: DeleteClipType) => void; - onAssignToCase?: (selected: Export) => void; }; -export function ExportCard({ + +export default function ExportCard({ className, exportedRecording, onSelect, onRename, onDelete, - onAssignToCase, -}: ExportCardProps) { +}: ExportProps) { const { t } = useTranslation(["views/exports"]); const isAdmin = useIsAdmin(); + const [hovered, setHovered] = useState(false); const [loading, setLoading] = useState( exportedRecording.thumb_path.length > 0, ); @@ -181,14 +136,12 @@ export function ExportCard({
{ - if (!exportedRecording.in_progress) { - onSelect(exportedRecording); - } - }} + onMouseEnter={isDesktop ? () => setHovered(true) : undefined} + onMouseLeave={isDesktop ? () => setHovered(false) : undefined} + onClick={isDesktop ? undefined : () => setHovered(!hovered)} > {exportedRecording.in_progress ? ( @@ -205,88 +158,95 @@ export function ExportCard({ )} )} - {!exportedRecording.in_progress && ( -
- - - e.stopPropagation()} - > - - - - - { - e.stopPropagation(); - shareOrCopy( - `${baseUrl}export?id=${exportedRecording.id}`, - exportedRecording.name.replaceAll("_", " "), - ); - }} - > - {t("tooltip.shareExport")} - - + {hovered && ( + <> +
+
+
+ {!exportedRecording.in_progress && ( + + + + shareOrCopy( + `${baseUrl}export?id=${exportedRecording.id}`, + exportedRecording.name.replaceAll("_", " "), + ) + } + > + + + + {t("tooltip.shareExport")} + + )} + {!exportedRecording.in_progress && ( e.stopPropagation()} > - {t("tooltip.downloadVideo")} + + + + + + + + {t("tooltip.downloadVideo")} + + - - {isAdmin && onAssignToCase && ( - { - e.stopPropagation(); - onAssignToCase(exportedRecording); - }} - > - {t("tooltip.assignToCase")} - + )} + {isAdmin && !exportedRecording.in_progress && ( + + + + setEditName({ + original: exportedRecording.name, + update: undefined, + }) + } + > + + + + {t("tooltip.editName")} + )} {isAdmin && ( - { - e.stopPropagation(); - setEditName({ - original: exportedRecording.name, - update: undefined, - }); - }} - > - {t("tooltip.editName")} - + + + + onDelete({ + file: exportedRecording.id, + exportName: exportedRecording.name, + }) + } + > + + + + {t("tooltip.deleteExport")} + )} - {isAdmin && ( - { - e.stopPropagation(); - onDelete({ - file: exportedRecording.id, - exportName: exportedRecording.name, - }); - }} - > - {t("tooltip.deleteExport")} - - )} - - -
+
+
+ + {!exportedRecording.in_progress && ( + + )} + )} {loading && ( diff --git a/web/src/components/filter/ExportFilterGroup.tsx b/web/src/components/filter/ExportFilterGroup.tsx deleted file mode 100644 index c5fe4f33c..000000000 --- a/web/src/components/filter/ExportFilterGroup.tsx +++ /dev/null @@ -1,67 +0,0 @@ -import { cn } from "@/lib/utils"; -import { - DEFAULT_EXPORT_FILTERS, - ExportFilter, - ExportFilters, -} from "@/types/export"; -import { CamerasFilterButton } from "./CamerasFilterButton"; -import { useAllowedCameras } from "@/hooks/use-allowed-cameras"; -import { useMemo } from "react"; -import { FrigateConfig } from "@/types/frigateConfig"; -import useSWR from "swr"; - -type ExportFilterGroupProps = { - className: string; - filters?: ExportFilters[]; - filter?: ExportFilter; - onUpdateFilter: (filter: ExportFilter) => void; -}; -export default function ExportFilterGroup({ - className, - filter, - filters = DEFAULT_EXPORT_FILTERS, - onUpdateFilter, -}: ExportFilterGroupProps) { - const { data: config } = useSWR("config", { - revalidateOnFocus: false, - }); - const allowedCameras = useAllowedCameras(); - - const filterValues = useMemo( - () => ({ - cameras: allowedCameras, - }), - [allowedCameras], - ); - - const groups = useMemo(() => { - if (!config) { - return []; - } - - return Object.entries(config.camera_groups).sort( - (a, b) => a[1].order - b[1].order, - ); - }, [config]); - - return ( -
- {filters.includes("cameras") && ( - { - onUpdateFilter({ ...filter, cameras: newCameras }); - }} - /> - )} -
- ); -} diff --git a/web/src/components/overlay/ExportDialog.tsx b/web/src/components/overlay/ExportDialog.tsx index 738aa689e..b8b5b9911 100644 --- a/web/src/components/overlay/ExportDialog.tsx +++ b/web/src/components/overlay/ExportDialog.tsx @@ -22,14 +22,7 @@ import useSWR from "swr"; import { FrigateConfig } from "@/types/frigateConfig"; import { Popover, PopoverContent, PopoverTrigger } from "../ui/popover"; import { TimezoneAwareCalendar } from "./ReviewActivityCalendar"; -import { - Select, - SelectContent, - SelectItem, - SelectSeparator, - SelectTrigger, - SelectValue, -} from "../ui/select"; +import { SelectSeparator } from "../ui/select"; import { isDesktop, isIOS, isMobile } from "react-device-detect"; import { Drawer, DrawerContent, DrawerTrigger } from "../ui/drawer"; import SaveExportOverlay from "./SaveExportOverlay"; @@ -38,7 +31,6 @@ import { baseUrl } from "@/api/baseUrl"; import { cn } from "@/lib/utils"; import { GenericVideoPlayer } from "../player/GenericVideoPlayer"; import { useTranslation } from "react-i18next"; -import { ExportCase } from "@/types/export"; const EXPORT_OPTIONS = [ "1", @@ -75,9 +67,6 @@ export default function ExportDialog({ }: ExportDialogProps) { const { t } = useTranslation(["components/dialog"]); const [name, setName] = useState(""); - const [selectedCaseId, setSelectedCaseId] = useState( - undefined, - ); const onStartExport = useCallback(() => { if (!range) { @@ -100,7 +89,6 @@ export default function ExportDialog({ { playback: "realtime", name, - export_case_id: selectedCaseId || undefined, }, ) .then((response) => { @@ -114,7 +102,6 @@ export default function ExportDialog({ ), }); setName(""); - setSelectedCaseId(undefined); setRange(undefined); setMode("none"); } @@ -131,11 +118,10 @@ export default function ExportDialog({ { position: "top-center" }, ); }); - }, [camera, name, range, selectedCaseId, setRange, setName, setMode, t]); + }, [camera, name, range, setRange, setName, setMode, t]); const handleCancel = useCallback(() => { setName(""); - setSelectedCaseId(undefined); setMode("none"); setRange(undefined); }, [setMode, setRange]); @@ -204,10 +190,8 @@ export default function ExportDialog({ currentTime={currentTime} range={range} name={name} - selectedCaseId={selectedCaseId} onStartExport={onStartExport} setName={setName} - setSelectedCaseId={setSelectedCaseId} setRange={setRange} setMode={setMode} onCancel={handleCancel} @@ -223,10 +207,8 @@ type ExportContentProps = { currentTime: number; range?: TimeRange; name: string; - selectedCaseId?: string; onStartExport: () => void; setName: (name: string) => void; - setSelectedCaseId: (caseId: string | undefined) => void; setRange: (range: TimeRange | undefined) => void; setMode: (mode: ExportMode) => void; onCancel: () => void; @@ -236,17 +218,14 @@ export function ExportContent({ currentTime, range, name, - selectedCaseId, onStartExport, setName, - setSelectedCaseId, setRange, setMode, onCancel, }: ExportContentProps) { const { t } = useTranslation(["components/dialog"]); const [selectedOption, setSelectedOption] = useState("1"); - const { data: cases } = useSWR("cases"); const onSelectTime = useCallback( (option: ExportOption) => { @@ -341,44 +320,6 @@ export function ExportContent({ value={name} onChange={(e) => setName(e.target.value)} /> -
- - -
{isDesktop && } ( - undefined, - ); const onStartExport = useCallback(() => { if (!range) { toast.error(t("toast.error.noValidTimeSelected"), { @@ -99,7 +96,6 @@ export default function MobileReviewSettingsDrawer({ { playback: "realtime", name, - export_case_id: selectedCaseId || undefined, }, ) .then((response) => { @@ -118,7 +114,6 @@ export default function MobileReviewSettingsDrawer({ }, ); setName(""); - setSelectedCaseId(undefined); setRange(undefined); setMode("none"); } @@ -138,7 +133,7 @@ export default function MobileReviewSettingsDrawer({ }, ); }); - }, [camera, name, range, selectedCaseId, setRange, setName, setMode, t]); + }, [camera, name, range, setRange, setName, setMode, t]); // filters @@ -205,10 +200,8 @@ export default function MobileReviewSettingsDrawer({ currentTime={currentTime} range={range} name={name} - selectedCaseId={selectedCaseId} onStartExport={onStartExport} setName={setName} - setSelectedCaseId={setSelectedCaseId} setRange={setRange} setMode={(mode) => { setMode(mode); @@ -220,7 +213,6 @@ export default function MobileReviewSettingsDrawer({ onCancel={() => { setMode("none"); setRange(undefined); - setSelectedCaseId(undefined); setDrawerMode("select"); }} /> diff --git a/web/src/components/overlay/dialog/OptionAndInputDialog.tsx b/web/src/components/overlay/dialog/OptionAndInputDialog.tsx deleted file mode 100644 index cb6b23907..000000000 --- a/web/src/components/overlay/dialog/OptionAndInputDialog.tsx +++ /dev/null @@ -1,166 +0,0 @@ -import { Button } from "@/components/ui/button"; -import { - Dialog, - DialogContent, - DialogDescription, - DialogFooter, - DialogHeader, - DialogTitle, -} from "@/components/ui/dialog"; -import { Input } from "@/components/ui/input"; -import { - Select, - SelectContent, - SelectItem, - SelectTrigger, - SelectValue, -} from "@/components/ui/select"; -import { cn } from "@/lib/utils"; -import { isMobile } from "react-device-detect"; -import { useEffect, useMemo, useState } from "react"; -import { useTranslation } from "react-i18next"; - -type Option = { - value: string; - label: string; -}; - -type OptionAndInputDialogProps = { - open: boolean; - title: string; - description?: string; - options: Option[]; - newValueKey: string; - initialValue?: string; - nameLabel: string; - descriptionLabel: string; - setOpen: (open: boolean) => void; - onSave: (value: string) => void; - onCreateNew: (name: string, description: string) => void; -}; - -export default function OptionAndInputDialog({ - open, - title, - description, - options, - newValueKey, - initialValue, - nameLabel, - descriptionLabel, - setOpen, - onSave, - onCreateNew, -}: OptionAndInputDialogProps) { - const { t } = useTranslation("common"); - const firstOption = useMemo(() => options[0]?.value, [options]); - - const [selectedValue, setSelectedValue] = useState( - initialValue ?? firstOption, - ); - const [name, setName] = useState(""); - const [descriptionValue, setDescriptionValue] = useState(""); - - useEffect(() => { - if (open) { - setSelectedValue(initialValue ?? firstOption); - setName(""); - setDescriptionValue(""); - } - }, [open, initialValue, firstOption]); - - const isNew = selectedValue === newValueKey; - const disableSave = !selectedValue || (isNew && name.trim().length === 0); - - const handleSave = () => { - if (!selectedValue) { - return; - } - - const trimmedName = name.trim(); - const trimmedDescription = descriptionValue.trim(); - - if (isNew) { - onCreateNew(trimmedName, trimmedDescription); - } else { - onSave(selectedValue); - } - setOpen(false); - }; - - return ( - - { - if (isMobile) { - e.preventDefault(); - } - }} - > - - {title} - {description && {description}} - - -
- -
- - {isNew && ( -
-
- - setName(e.target.value)} /> -
-
- - setDescriptionValue(e.target.value)} - /> -
-
- )} - - - - - -
-
- ); -} diff --git a/web/src/components/player/JSMpegPlayer.tsx b/web/src/components/player/JSMpegPlayer.tsx index c522ff0a8..f85535013 100644 --- a/web/src/components/player/JSMpegPlayer.tsx +++ b/web/src/components/player/JSMpegPlayer.tsx @@ -118,8 +118,6 @@ export default function JSMpegPlayer({ const videoWrapper = videoRef.current; const canvas = canvasRef.current; let videoElement: JSMpeg.VideoElement | null = null; - let socket: WebSocket | null = null; - let socketMessageHandler: ((event: MessageEvent) => void) | null = null; let frameCount = 0; @@ -154,14 +152,12 @@ export default function JSMpegPlayer({ videoElement.player.source && videoElement.player.source.socket ) { - socket = videoElement.player.source.socket as WebSocket; - socketMessageHandler = (event: MessageEvent) => { + const socket = videoElement.player.source.socket; + socket.addEventListener("message", (event: MessageEvent) => { if (event.data instanceof ArrayBuffer) { bytesReceivedRef.current += event.data.byteLength; } - }; - - socket.addEventListener("message", socketMessageHandler); + }); } // Update stats every second @@ -201,23 +197,11 @@ export default function JSMpegPlayer({ } if (videoElement) { try { - videoElement.player?.destroy(); + // this causes issues in react strict mode + // https://stackoverflow.com/questions/76822128/issue-with-cycjimmy-jsmpeg-player-in-react-18-cannot-read-properties-of-null-o + videoElement.destroy(); // eslint-disable-next-line no-empty } catch (e) {} - - if (videoWrapper) { - videoWrapper.innerHTML = ""; - // @ts-expect-error playerInstance is set by jsmpeg - videoWrapper.playerInstance = null; - } - } - if (socket) { - if (socketMessageHandler) { - socket.removeEventListener("message", socketMessageHandler); - } - - socket = null; - socketMessageHandler = null; } }; } diff --git a/web/src/components/player/LivePlayer.tsx b/web/src/components/player/LivePlayer.tsx index f48a7d475..dbbc289c5 100644 --- a/web/src/components/player/LivePlayer.tsx +++ b/web/src/components/player/LivePlayer.tsx @@ -82,11 +82,6 @@ export default function LivePlayer({ const internalContainerRef = useRef(null); const cameraName = useCameraFriendlyName(cameraConfig); - - // player is showing on a dashboard if containerRef is not provided - - const inDashboard = containerRef?.current == null; - // stats const [stats, setStats] = useState({ @@ -421,28 +416,6 @@ export default function LivePlayer({ />
- {offline && inDashboard && ( - <> -
-
-
-
{t("streamOffline.title")}
- -

- - streamOffline.desc - -

-
-
- - )} - {offline && !showStillWithoutActivity && cameraEnabled && (
diff --git a/web/src/hooks/use-allowed-cameras.ts b/web/src/hooks/use-allowed-cameras.ts index 05941922a..9eae59fc2 100644 --- a/web/src/hooks/use-allowed-cameras.ts +++ b/web/src/hooks/use-allowed-cameras.ts @@ -12,7 +12,7 @@ export function useAllowedCameras() { if ( auth.user?.role === "viewer" || auth.user?.role === "admin" || - !auth.isAuthenticated // anonymous internal port + !auth.isAuthenticated // anonymous port 5000 ) { // return all cameras return config?.cameras ? Object.keys(config.cameras) : []; diff --git a/web/src/pages/Exports.tsx b/web/src/pages/Exports.tsx index 5b05439c6..26a75801a 100644 --- a/web/src/pages/Exports.tsx +++ b/web/src/pages/Exports.tsx @@ -1,5 +1,5 @@ import { baseUrl } from "@/api/baseUrl"; -import { CaseCard, ExportCard } from "@/components/card/ExportCard"; +import ExportCard from "@/components/card/ExportCard"; import { AlertDialog, AlertDialogCancel, @@ -11,144 +11,64 @@ import { } from "@/components/ui/alert-dialog"; import { Button } from "@/components/ui/button"; import { Dialog, DialogContent, DialogTitle } from "@/components/ui/dialog"; -import Heading from "@/components/ui/heading"; import { Input } from "@/components/ui/input"; import { Toaster } from "@/components/ui/sonner"; import useKeyboardListener from "@/hooks/use-keyboard-listener"; import { useSearchEffect } from "@/hooks/use-overlay-state"; -import { useHistoryBack } from "@/hooks/use-history-back"; -import { useApiFilterArgs } from "@/hooks/use-api-filter"; import { cn } from "@/lib/utils"; -import { - DeleteClipType, - Export, - ExportCase, - ExportFilter, -} from "@/types/export"; -import OptionAndInputDialog from "@/components/overlay/dialog/OptionAndInputDialog"; +import { DeleteClipType, Export } from "@/types/export"; import axios from "axios"; -import { - MutableRefObject, - useCallback, - useEffect, - useMemo, - useRef, - useState, -} from "react"; -import { isMobile, isMobileOnly } from "react-device-detect"; +import { useCallback, useEffect, useMemo, useRef, useState } from "react"; +import { isMobile } from "react-device-detect"; import { useTranslation } from "react-i18next"; import { LuFolderX } from "react-icons/lu"; import { toast } from "sonner"; import useSWR from "swr"; -import ExportFilterGroup from "@/components/filter/ExportFilterGroup"; - -// always parse these as string arrays -const EXPORT_FILTER_ARRAY_KEYS = ["cameras"]; function Exports() { const { t } = useTranslation(["views/exports"]); + const { data: exports, mutate } = useSWR("exports"); useEffect(() => { document.title = t("documentTitle"); }, [t]); - // Filters - - const [exportFilter, setExportFilter, exportSearchParams] = - useApiFilterArgs(EXPORT_FILTER_ARRAY_KEYS); - - // Data - - const { data: cases, mutate: updateCases } = useSWR("cases"); - const { data: rawExports, mutate: updateExports } = useSWR( - exportSearchParams && Object.keys(exportSearchParams).length > 0 - ? ["exports", exportSearchParams] - : "exports", - ); - - const exportsByCase = useMemo<{ [caseId: string]: Export[] }>(() => { - const grouped: { [caseId: string]: Export[] } = {}; - (rawExports ?? []).forEach((exp) => { - const caseId = exp.export_case || "none"; - if (!grouped[caseId]) { - grouped[caseId] = []; - } - - grouped[caseId].push(exp); - }); - return grouped; - }, [rawExports]); - - const filteredCases = useMemo(() => { - if (!cases) { - return []; - } - - return cases.filter((caseItem) => { - const caseExports = exportsByCase[caseItem.id]; - return caseExports?.length; - }); - }, [cases, exportsByCase]); - - const exports = useMemo( - () => exportsByCase["none"] || [], - [exportsByCase], - ); - - const mutate = useCallback(() => { - updateExports(); - updateCases(); - }, [updateExports, updateCases]); - // Search const [search, setSearch] = useState(""); + const filteredExports = useMemo(() => { + if (!search || !exports) { + return exports; + } + + return exports.filter((exp) => + exp.name + .toLowerCase() + .replaceAll("_", " ") + .includes(search.toLowerCase()), + ); + }, [exports, search]); + // Viewing const [selected, setSelected] = useState(); - const [selectedCaseId, setSelectedCaseId] = useState( - undefined, - ); const [selectedAspect, setSelectedAspect] = useState(0.0); - // Handle browser back button to deselect case before navigating away - useHistoryBack({ - enabled: true, - open: selectedCaseId !== undefined, - onClose: () => setSelectedCaseId(undefined), - }); - useSearchEffect("id", (id) => { - if (!rawExports) { + if (!exports) { return false; } - setSelected(rawExports.find((exp) => exp.id == id)); + setSelected(exports.find((exp) => exp.id == id)); return true; }); - useSearchEffect("caseId", (caseId: string) => { - if (!filteredCases) { - return false; - } - - const exists = filteredCases.some((c) => c.id === caseId); - - if (!exists) { - return false; - } - - setSelectedCaseId(caseId); - return true; - }); - - // Modifying + // Deleting const [deleteClip, setDeleteClip] = useState(); - const [exportToAssign, setExportToAssign] = useState(); const onHandleDelete = useCallback(() => { if (!deleteClip) { @@ -163,6 +83,8 @@ function Exports() { }); }, [deleteClip, mutate]); + // Renaming + const onHandleRename = useCallback( (id: string, update: string) => { axios @@ -185,7 +107,7 @@ function Exports() { }); }); }, - [mutate, setDeleteClip, t], + [mutate, t], ); // Keyboard Listener @@ -193,27 +115,10 @@ function Exports() { const contentRef = useRef(null); useKeyboardListener([], undefined, contentRef); - const selectedCase = useMemo( - () => filteredCases?.find((c) => c.id === selectedCaseId), - [filteredCases, selectedCaseId], - ); - - const resetCaseDialog = useCallback(() => { - setExportToAssign(undefined); - }, []); - return (
- - setDeleteClip(undefined)} @@ -282,364 +187,47 @@ function Exports() { -
-
+ {exports && ( +
setSearch(e.target.value)} />
- -
- - {selectedCase ? ( - - ) : ( - )} -
- ); -} -type AllExportsViewProps = { - contentRef: MutableRefObject; - search: string; - cases?: ExportCase[]; - exports: Export[]; - exportsByCase: { [caseId: string]: Export[] }; - setSelectedCaseId: (id: string) => void; - setSelected: (e: Export) => void; - renameClip: (id: string, update: string) => void; - setDeleteClip: (d: DeleteClipType | undefined) => void; - onAssignToCase: (e: Export) => void; -}; -function AllExportsView({ - contentRef, - search, - cases, - exports, - exportsByCase, - setSelectedCaseId, - setSelected, - renameClip, - setDeleteClip, - onAssignToCase, -}: AllExportsViewProps) { - const { t } = useTranslation(["views/exports"]); - - // Filter - - const filteredCases = useMemo(() => { - if (!search || !cases) { - return cases || []; - } - - return cases.filter( - (caseItem) => - caseItem.name.toLowerCase().includes(search.toLowerCase()) || - (caseItem.description && - caseItem.description.toLowerCase().includes(search.toLowerCase())), - ); - }, [search, cases]); - - const filteredExports = useMemo(() => { - if (!search) { - return exports; - } - - return exports.filter((exp) => - exp.name - .toLowerCase() - .replaceAll("_", " ") - .includes(search.toLowerCase()), - ); - }, [exports, search]); - - return ( -
- {filteredCases?.length || filteredExports.length ? ( -
- {filteredCases.length > 0 && ( -
- {t("headings.cases")} -
- {cases?.map((item) => ( - { - setSelectedCaseId(item.id); - }} - /> - ))} -
-
- )} - - {filteredExports.length > 0 && ( -
- {t("headings.uncategorizedExports")} -
- {exports.map((item) => ( - - setDeleteClip({ file, exportName }) - } - onAssignToCase={onAssignToCase} - /> - ))} -
-
- )} -
- ) : ( -
- - {t("noExports")} -
- )} -
- ); -} - -type CaseViewProps = { - contentRef: MutableRefObject; - selectedCase: ExportCase; - exports?: Export[]; - search: string; - setSelected: (e: Export) => void; - renameClip: (id: string, update: string) => void; - setDeleteClip: (d: DeleteClipType | undefined) => void; - onAssignToCase: (e: Export) => void; -}; -function CaseView({ - contentRef, - selectedCase, - exports, - search, - setSelected, - renameClip, - setDeleteClip, - onAssignToCase, -}: CaseViewProps) { - const filteredExports = useMemo(() => { - const caseExports = (exports || []).filter( - (e) => e.export_case == selectedCase.id, - ); - - if (!search) { - return caseExports; - } - - return caseExports.filter((exp) => - exp.name - .toLowerCase() - .replaceAll("_", " ") - .includes(search.toLowerCase()), - ); - }, [selectedCase, exports, search]); - - return ( -
-
- - {selectedCase.name} - -
- {selectedCase.description} -
-
-
- {exports?.map((item) => ( - - setDeleteClip({ file, exportName }) - } - onAssignToCase={onAssignToCase} - /> - ))} +
+ {exports && filteredExports && filteredExports.length > 0 ? ( +
+ {Object.values(exports).map((item) => ( + + setDeleteClip({ file, exportName }) + } + /> + ))} +
+ ) : exports !== undefined ? ( +
+ + {t("noExports")} +
+ ) : null}
); } -type CaseAssignmentDialogProps = { - exportToAssign?: Export; - cases?: ExportCase[]; - selectedCaseId?: string; - onClose: () => void; - mutate: () => void; -}; -function CaseAssignmentDialog({ - exportToAssign, - cases, - selectedCaseId, - onClose, - mutate, -}: CaseAssignmentDialogProps) { - const { t } = useTranslation(["views/exports"]); - const caseOptions = useMemo( - () => [ - ...(cases ?? []) - .map((c) => ({ - value: c.id, - label: c.name, - })) - .sort((cA, cB) => cA.label.localeCompare(cB.label)), - { - value: "new", - label: t("caseDialog.newCaseOption"), - }, - ], - [cases, t], - ); - - const handleSave = useCallback( - async (caseId: string) => { - if (!exportToAssign) return; - - try { - await axios.patch(`export/${exportToAssign.id}/case`, { - export_case_id: caseId, - }); - mutate(); - onClose(); - } catch (error: unknown) { - const apiError = error as { - response?: { data?: { message?: string; detail?: string } }; - }; - const errorMessage = - apiError.response?.data?.message || - apiError.response?.data?.detail || - "Unknown error"; - toast.error(t("toast.error.assignCaseFailed", { errorMessage }), { - position: "top-center", - }); - } - }, - [exportToAssign, mutate, onClose, t], - ); - - const handleCreateNew = useCallback( - async (name: string, description: string) => { - if (!exportToAssign) return; - - try { - const createResp = await axios.post("cases", { - name, - description, - }); - - const newCaseId: string | undefined = createResp.data?.id; - - if (newCaseId) { - await axios.patch(`export/${exportToAssign.id}/case`, { - export_case_id: newCaseId, - }); - } - - mutate(); - onClose(); - } catch (error: unknown) { - const apiError = error as { - response?: { data?: { message?: string; detail?: string } }; - }; - const errorMessage = - apiError.response?.data?.message || - apiError.response?.data?.detail || - "Unknown error"; - toast.error(t("toast.error.assignCaseFailed", { errorMessage }), { - position: "top-center", - }); - } - }, - [exportToAssign, mutate, onClose, t], - ); - - if (!exportToAssign) { - return null; - } - - return ( - { - if (!open) { - onClose(); - } - }} - options={caseOptions} - nameLabel={t("caseDialog.nameLabel")} - descriptionLabel={t("caseDialog.descriptionLabel")} - initialValue={selectedCaseId} - newValueKey="new" - onSave={handleSave} - onCreateNew={handleCreateNew} - /> - ); -} - -export default Exports; \ No newline at end of file +export default Exports; diff --git a/web/src/pages/Settings.tsx b/web/src/pages/Settings.tsx index 50b72ab80..1d44125cb 100644 --- a/web/src/pages/Settings.tsx +++ b/web/src/pages/Settings.tsx @@ -36,7 +36,6 @@ import NotificationView from "@/views/settings/NotificationsSettingsView"; import EnrichmentsSettingsView from "@/views/settings/EnrichmentsSettingsView"; import UiSettingsView from "@/views/settings/UiSettingsView"; import FrigatePlusSettingsView from "@/views/settings/FrigatePlusSettingsView"; -import MaintenanceSettingsView from "@/views/settings/MaintenanceSettingsView"; import { useSearchEffect } from "@/hooks/use-overlay-state"; import { useNavigate, useSearchParams } from "react-router-dom"; import { useInitialCameraState } from "@/api/ws"; @@ -82,7 +81,6 @@ const allSettingsViews = [ "roles", "notifications", "frigateplus", - "maintenance", ] as const; type SettingsType = (typeof allSettingsViews)[number]; @@ -122,10 +120,6 @@ const settingsGroups = [ label: "frigateplus", items: [{ key: "frigateplus", component: FrigatePlusSettingsView }], }, - { - label: "maintenance", - items: [{ key: "maintenance", component: MaintenanceSettingsView }], - }, ]; const CAMERA_SELECT_BUTTON_PAGES = [ diff --git a/web/src/types/export.ts b/web/src/types/export.ts index c606855f2..fc62bbeec 100644 --- a/web/src/types/export.ts +++ b/web/src/types/export.ts @@ -6,28 +6,9 @@ export type Export = { video_path: string; thumb_path: string; in_progress: boolean; - export_case?: string; -}; - -export type ExportCase = { - id: string; - name: string; - description: string; - created_at: number; - updated_at: number; }; export type DeleteClipType = { file: string; exportName: string; }; - -// filtering - -const EXPORT_FILTERS = ["cameras"] as const; -export type ExportFilters = (typeof EXPORT_FILTERS)[number]; -export const DEFAULT_EXPORT_FILTERS: ExportFilters[] = ["cameras"]; - -export type ExportFilter = { - cameras?: string[]; -}; diff --git a/web/src/types/frigateConfig.ts b/web/src/types/frigateConfig.ts index 7c69ef808..94c9ba6e9 100644 --- a/web/src/types/frigateConfig.ts +++ b/web/src/types/frigateConfig.ts @@ -197,6 +197,7 @@ export interface CameraConfig { days: number; mode: string; }; + sync_recordings: boolean; }; review: { alerts: { @@ -541,6 +542,7 @@ export interface FrigateConfig { days: number; mode: string; }; + sync_recordings: boolean; }; rtmp: { diff --git a/web/src/types/stats.ts b/web/src/types/stats.ts index 8b22849be..c98ebe80f 100644 --- a/web/src/types/stats.ts +++ b/web/src/types/stats.ts @@ -24,10 +24,6 @@ export type CameraStats = { pid: number; process_fps: number; skipped_fps: number; - connection_quality: "excellent" | "fair" | "poor" | "unusable"; - expected_fps: number; - reconnects_last_hour: number; - stalls_last_hour: number; }; export type CpuStats = { @@ -41,7 +37,6 @@ export type DetectorStats = { detection_start: number; inference_speed: number; pid: number; - temperature?: number; }; export type EmbeddingsStats = { @@ -61,13 +56,11 @@ export type GpuStats = { enc?: string; dec?: string; pstate?: string; - temp?: number; }; export type NpuStats = { npu: number; mem: string; - temp?: number; }; export type GpuInfo = "vainfo" | "nvinfo"; @@ -75,6 +68,7 @@ export type GpuInfo = "vainfo" | "nvinfo"; export type ServiceStats = { last_updated: number; storage: { [path: string]: StorageStats }; + temperatures: { [apex: string]: number }; uptime: number; latest_version: string; version: string; diff --git a/web/src/types/ws.ts b/web/src/types/ws.ts index 6e22345eb..1d98b7b01 100644 --- a/web/src/types/ws.ts +++ b/web/src/types/ws.ts @@ -126,32 +126,3 @@ export type TriggerStatus = { type: string; score: number; }; - -export type MediaSyncStats = { - files_checked: number; - orphans_found: number; - orphans_deleted: number; - aborted: boolean; - error: string | null; -}; - -export type MediaSyncTotals = { - files_checked: number; - orphans_found: number; - orphans_deleted: number; -}; - -export type MediaSyncResults = { - [mediaType: string]: MediaSyncStats | MediaSyncTotals; - totals: MediaSyncTotals; -}; - -export type Job = { - id: string; - job_type: string; - status: string; - results?: MediaSyncResults; - start_time?: number; - end_time?: number; - error_message?: string; -}; diff --git a/web/src/views/settings/MaintenanceSettingsView.tsx b/web/src/views/settings/MaintenanceSettingsView.tsx deleted file mode 100644 index f2d1bad30..000000000 --- a/web/src/views/settings/MaintenanceSettingsView.tsx +++ /dev/null @@ -1,442 +0,0 @@ -import Heading from "@/components/ui/heading"; -import { Button } from "@/components/ui/button"; -import { Label } from "@/components/ui/label"; -import { Separator } from "@/components/ui/separator"; -import { Toaster } from "@/components/ui/sonner"; -import ActivityIndicator from "@/components/indicators/activity-indicator"; -import { useCallback, useState } from "react"; -import { useTranslation } from "react-i18next"; -import axios from "axios"; -import { toast } from "sonner"; -import { useJobStatus } from "@/api/ws"; -import { Switch } from "@/components/ui/switch"; -import { LuCheck, LuX } from "react-icons/lu"; -import { cn } from "@/lib/utils"; -import { formatUnixTimestampToDateTime } from "@/utils/dateUtil"; -import { MediaSyncStats } from "@/types/ws"; - -export default function MaintenanceSettingsView() { - const { t } = useTranslation("views/settings"); - const [selectedMediaTypes, setSelectedMediaTypes] = useState([ - "all", - ]); - const [dryRun, setDryRun] = useState(true); - const [force, setForce] = useState(false); - const [isSubmitting, setIsSubmitting] = useState(false); - - const MEDIA_TYPES = [ - { id: "event_snapshots", label: t("maintenance.sync.event_snapshots") }, - { id: "event_thumbnails", label: t("maintenance.sync.event_thumbnails") }, - { id: "review_thumbnails", label: t("maintenance.sync.review_thumbnails") }, - { id: "previews", label: t("maintenance.sync.previews") }, - { id: "exports", label: t("maintenance.sync.exports") }, - { id: "recordings", label: t("maintenance.sync.recordings") }, - ]; - - // Subscribe to media sync status via WebSocket - const { payload: currentJob } = useJobStatus("media_sync"); - - const isJobRunning = Boolean( - currentJob && - (currentJob.status === "queued" || currentJob.status === "running"), - ); - - const handleMediaTypeChange = useCallback((id: string, checked: boolean) => { - setSelectedMediaTypes((prev) => { - if (id === "all") { - return checked ? ["all"] : []; - } - - let next = prev.filter((t) => t !== "all"); - if (checked) { - next.push(id); - } else { - next = next.filter((t) => t !== id); - } - return next.length === 0 ? ["all"] : next; - }); - }, []); - - const handleStartSync = useCallback(async () => { - setIsSubmitting(true); - - try { - const response = await axios.post( - "/media/sync", - { - dry_run: dryRun, - media_types: selectedMediaTypes, - force: force, - }, - { - headers: { - "Content-Type": "application/json", - }, - }, - ); - - if (response.status === 202) { - toast.success(t("maintenance.sync.started"), { - position: "top-center", - closeButton: true, - }); - } else if (response.status === 409) { - toast.error(t("maintenance.sync.alreadyRunning"), { - position: "top-center", - closeButton: true, - }); - } - } catch { - toast.error(t("maintenance.sync.error"), { - position: "top-center", - closeButton: true, - }); - } finally { - setIsSubmitting(false); - } - }, [selectedMediaTypes, dryRun, force, t]); - - return ( - <> -
- -
-
-
- - {t("maintenance.sync.title")} - - -
-
-

{t("maintenance.sync.desc")}

-
-
- -
- {/* Media Types Selection */} -
- -
-
- - - handleMediaTypeChange("all", checked) - } - disabled={isJobRunning} - /> -
-
- {MEDIA_TYPES.map((type) => ( -
- - - handleMediaTypeChange(type.id, checked) - } - disabled={ - isJobRunning || selectedMediaTypes.includes("all") - } - /> -
- ))} -
-
-
- - {/* Options */} -
-
-
- -
- -

- {dryRun - ? t("maintenance.sync.dryRunEnabled") - : t("maintenance.sync.dryRunDisabled")} -

-
-
-
- -
-
- -
- -

- {t("maintenance.sync.forceDesc")} -

-
-
-
-
- - {/* Action Buttons */} -
- -
-
-
- -
-
- -
- - {t("maintenance.sync.currentStatus")} - -
- {currentJob?.status === "success" && ( - - )} - {currentJob?.status === "failed" && ( - - )} - {(currentJob?.status === "running" || - currentJob?.status === "queued") && ( - - )} - {t( - `maintenance.sync.status.${currentJob?.status ?? "notRunning"}`, - )} -
-
- - {/* Current Job Status */} -
- {currentJob?.start_time && ( -
- - {t("maintenance.sync.startTime")}: - - - {formatUnixTimestampToDateTime( - currentJob?.start_time ?? "-", - )} - -
- )} - {currentJob?.end_time && ( -
- - {t("maintenance.sync.endTime")}: - - - {formatUnixTimestampToDateTime(currentJob?.end_time)} - -
- )} - {currentJob?.results && ( -
-

- {t("maintenance.sync.results")} -

-
- {/* Individual media type results */} -
- {Object.entries(currentJob.results) - .filter(([key]) => key !== "totals") - .map(([mediaType, stats]) => { - const mediaStats = stats as MediaSyncStats; - return ( -
-

- {t(`maintenance.sync.${mediaType}`)} -

-
-
- - {t( - "maintenance.sync.resultsFields.filesChecked", - )} - - {mediaStats.files_checked} -
-
- - {t( - "maintenance.sync.resultsFields.orphansFound", - )} - - 0 - ? "text-yellow-500" - : "" - } - > - {mediaStats.orphans_found} - -
-
- - {t( - "maintenance.sync.resultsFields.orphansDeleted", - )} - - 0 && - "text-success", - mediaStats.orphans_deleted === 0 && - mediaStats.aborted && - "text-destructive", - )} - > - {mediaStats.orphans_deleted} - -
- {mediaStats.aborted && ( -
- - - {t( - "maintenance.sync.resultsFields.aborted", - )} -
- )} - {mediaStats.error && ( -
- {t( - "maintenance.sync.resultsFields.error", - )} - {": "} - {mediaStats.error} -
- )} -
-
- ); - })} -
- {/* Totals */} - {currentJob.results.totals && ( -
-

- {t("maintenance.sync.resultsFields.totals")} -

-
-
- - {t( - "maintenance.sync.resultsFields.filesChecked", - )} - - - {currentJob.results.totals.files_checked} - -
-
- - {t( - "maintenance.sync.resultsFields.orphansFound", - )} - - 0 - ? "font-medium text-yellow-500" - : "font-medium" - } - > - {currentJob.results.totals.orphans_found} - -
-
- - {t( - "maintenance.sync.resultsFields.orphansDeleted", - )} - - - 0 - ? "text-success" - : "text-muted-foreground", - )} - > - {currentJob.results.totals.orphans_deleted} - -
-
-
- )} -
-
- )} - {currentJob?.error_message && ( -
-

- {t("maintenance.sync.errorLabel")} -

-

{currentJob?.error_message}

-
- )} -
-
-
-
-
-
- - ); -} diff --git a/web/src/views/system/CameraMetrics.tsx b/web/src/views/system/CameraMetrics.tsx index b6c5be4fa..6e24ef5d0 100644 --- a/web/src/views/system/CameraMetrics.tsx +++ b/web/src/views/system/CameraMetrics.tsx @@ -1,7 +1,6 @@ import { useFrigateStats } from "@/api/ws"; import { CameraLineGraph } from "@/components/graph/LineGraph"; import CameraInfoDialog from "@/components/overlay/CameraInfoDialog"; -import { ConnectionQualityIndicator } from "@/components/camera/ConnectionQualityIndicator"; import { Skeleton } from "@/components/ui/skeleton"; import { FrigateConfig } from "@/types/frigateConfig"; import { FrigateStats } from "@/types/stats"; @@ -283,37 +282,8 @@ export default function CameraMetrics({ )}
-
-
- -
- {statsHistory.length > 0 && - statsHistory[statsHistory.length - 1]?.cameras[ - camera.name - ] && ( - - )} +
+
diff --git a/web/src/views/system/GeneralMetrics.tsx b/web/src/views/system/GeneralMetrics.tsx index cdf35c28b..f8ce64851 100644 --- a/web/src/views/system/GeneralMetrics.tsx +++ b/web/src/views/system/GeneralMetrics.tsx @@ -127,6 +127,13 @@ export default function GeneralMetrics({ return undefined; } + if ( + statsHistory.length > 0 && + Object.keys(statsHistory[0].service.temperatures).length == 0 + ) { + return undefined; + } + const series: { [key: string]: { name: string; data: { x: number; y: number }[] }; } = {}; @@ -136,22 +143,22 @@ export default function GeneralMetrics({ return; } - Object.entries(stats.detectors).forEach(([key, detectorStats]) => { - if (detectorStats.temperature === undefined) { + Object.entries(stats.detectors).forEach(([key], cIdx) => { + if (!key.includes("coral")) { return; } - if (!(key in series)) { - series[key] = { - name: key, - data: [], - }; - } + if (cIdx <= Object.keys(stats.service.temperatures).length) { + if (!(key in series)) { + series[key] = { + name: key, + data: [], + }; + } - series[key].data.push({ - x: statsIdx + 1, - y: Math.round(detectorStats.temperature), - }); + const temp = Object.values(stats.service.temperatures)[cIdx]; + series[key].data.push({ x: statsIdx + 1, y: Math.round(temp) }); + } }); }); @@ -368,40 +375,6 @@ export default function GeneralMetrics({ return Object.keys(series).length > 0 ? Object.values(series) : undefined; }, [statsHistory]); - const gpuTempSeries = useMemo(() => { - if (!statsHistory) { - return []; - } - - const series: { - [key: string]: { name: string; data: { x: number; y: number }[] }; - } = {}; - let hasValidGpu = false; - - statsHistory.forEach((stats, statsIdx) => { - if (!stats) { - return; - } - - Object.entries(stats.gpu_usages || {}).forEach(([key, stats]) => { - if (!(key in series)) { - series[key] = { name: key, data: [] }; - } - - if (stats.temp !== undefined) { - hasValidGpu = true; - series[key].data.push({ x: statsIdx + 1, y: stats.temp }); - } - }); - }); - - if (!hasValidGpu) { - return []; - } - - return Object.keys(series).length > 0 ? Object.values(series) : undefined; - }, [statsHistory]); - // Check if Intel GPU has all 0% usage values (known bug) const showIntelGpuWarning = useMemo(() => { if (!statsHistory || statsHistory.length < 3) { @@ -482,40 +455,6 @@ export default function GeneralMetrics({ return Object.keys(series).length > 0 ? Object.values(series) : []; }, [statsHistory]); - const npuTempSeries = useMemo(() => { - if (!statsHistory) { - return []; - } - - const series: { - [key: string]: { name: string; data: { x: number; y: number }[] }; - } = {}; - let hasValidNpu = false; - - statsHistory.forEach((stats, statsIdx) => { - if (!stats) { - return; - } - - Object.entries(stats.npu_usages || {}).forEach(([key, stats]) => { - if (!(key in series)) { - series[key] = { name: key, data: [] }; - } - - if (stats.temp !== undefined) { - hasValidNpu = true; - series[key].data.push({ x: statsIdx + 1, y: stats.temp }); - } - }); - }); - - if (!hasValidNpu) { - return []; - } - - return Object.keys(series).length > 0 ? Object.values(series) : undefined; - }, [statsHistory]); - // other processes stats const hardwareType = useMemo(() => { @@ -737,11 +676,7 @@ export default function GeneralMetrics({
{statsHistory[0]?.gpu_usages && ( @@ -876,30 +811,6 @@ export default function GeneralMetrics({ ) : ( )} - {statsHistory.length != 0 ? ( - <> - {gpuTempSeries && gpuTempSeries?.length != 0 && ( -
-
- {t("general.hardwareInfo.gpuTemperature")} -
- {gpuTempSeries.map((series) => ( - - ))} -
- )} - - ) : ( - - )} {statsHistory[0]?.npu_usages && ( <> @@ -923,30 +834,6 @@ export default function GeneralMetrics({ ) : ( )} - {statsHistory.length != 0 ? ( - <> - {npuTempSeries && npuTempSeries?.length != 0 && ( -
-
- {t("general.hardwareInfo.npuTemperature")} -
- {npuTempSeries.map((series) => ( - - ))} -
- )} - - ) : ( - - )} )} diff --git a/web/vite.config.ts b/web/vite.config.ts index 148048995..cb1a580bf 100644 --- a/web/vite.config.ts +++ b/web/vite.config.ts @@ -4,7 +4,7 @@ import { defineConfig } from "vite"; import react from "@vitejs/plugin-react-swc"; import monacoEditorPlugin from "vite-plugin-monaco-editor"; -const proxyHost = process.env.PROXY_HOST || "1ocalhost:5000"; +const proxyHost = process.env.PROXY_HOST || "localhost:5000"; // https://vitejs.dev/config/ export default defineConfig({ From bc457743b6bdb39d7936a337800e42f79b3cb830 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Thu, 11 Dec 2025 10:12:55 -0700 Subject: [PATCH 03/56] Update version --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index d1427b6df..1226a9e01 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ default_target: local COMMIT_HASH := $(shell git log -1 --pretty=format:"%h"|tail -1) -VERSION = 0.17.0 +VERSION = 0.18.0 IMAGE_REPO ?= ghcr.io/blakeblackshear/frigate GITHUB_REF_NAME ?= $(shell git rev-parse --abbrev-ref HEAD) BOARDS= #Initialized empty From 48164f6dfca60ee1dd6816f5550da28228628896 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Mon, 15 Dec 2025 08:28:52 -0700 Subject: [PATCH 04/56] Create scaffolding for case management (#21293) --- frigate/api/defs/request/export_case_body.py | 25 ++++ .../api/defs/response/export_case_response.py | 22 ++++ frigate/api/defs/response/export_response.py | 3 + frigate/api/export.py | 114 +++++++++++++++++- frigate/models.py | 14 +++ migrations/033_create_export_case_table.py | 50 ++++++++ migrations/034_add_export_case_to_exports.py | 40 ++++++ 7 files changed, 267 insertions(+), 1 deletion(-) create mode 100644 frigate/api/defs/request/export_case_body.py create mode 100644 frigate/api/defs/response/export_case_response.py create mode 100644 migrations/033_create_export_case_table.py create mode 100644 migrations/034_add_export_case_to_exports.py diff --git a/frigate/api/defs/request/export_case_body.py b/frigate/api/defs/request/export_case_body.py new file mode 100644 index 000000000..66cba58ea --- /dev/null +++ b/frigate/api/defs/request/export_case_body.py @@ -0,0 +1,25 @@ +from typing import Optional + +from pydantic import BaseModel, Field + + +class ExportCaseCreateBody(BaseModel): + """Request body for creating a new export case.""" + + name: str = Field(max_length=100, description="Friendly name of the export case") + description: Optional[str] = Field( + default=None, description="Optional description of the export case" + ) + + +class ExportCaseUpdateBody(BaseModel): + """Request body for updating an existing export case.""" + + name: Optional[str] = Field( + default=None, + max_length=100, + description="Updated friendly name of the export case", + ) + description: Optional[str] = Field( + default=None, description="Updated description of the export case" + ) diff --git a/frigate/api/defs/response/export_case_response.py b/frigate/api/defs/response/export_case_response.py new file mode 100644 index 000000000..713e16683 --- /dev/null +++ b/frigate/api/defs/response/export_case_response.py @@ -0,0 +1,22 @@ +from typing import List, Optional + +from pydantic import BaseModel, Field + + +class ExportCaseModel(BaseModel): + """Model representing a single export case.""" + + id: str = Field(description="Unique identifier for the export case") + name: str = Field(description="Friendly name of the export case") + description: Optional[str] = Field( + default=None, description="Optional description of the export case" + ) + created_at: float = Field( + description="Unix timestamp when the export case was created" + ) + updated_at: float = Field( + description="Unix timestamp when the export case was last updated" + ) + + +ExportCasesResponse = List[ExportCaseModel] diff --git a/frigate/api/defs/response/export_response.py b/frigate/api/defs/response/export_response.py index 63a9e91a1..600794f97 100644 --- a/frigate/api/defs/response/export_response.py +++ b/frigate/api/defs/response/export_response.py @@ -15,6 +15,9 @@ class ExportModel(BaseModel): in_progress: bool = Field( description="Whether the export is currently being processed" ) + export_case_id: Optional[str] = Field( + default=None, description="ID of the export case this export belongs to" + ) class StartExportResponse(BaseModel): diff --git a/frigate/api/export.py b/frigate/api/export.py index 24fed93b0..a6051ecb9 100644 --- a/frigate/api/export.py +++ b/frigate/api/export.py @@ -19,8 +19,16 @@ from frigate.api.auth import ( require_camera_access, require_role, ) +from frigate.api.defs.request.export_case_body import ( + ExportCaseCreateBody, + ExportCaseUpdateBody, +) from frigate.api.defs.request.export_recordings_body import ExportRecordingsBody from frigate.api.defs.request.export_rename_body import ExportRenameBody +from frigate.api.defs.response.export_case_response import ( + ExportCaseModel, + ExportCasesResponse, +) from frigate.api.defs.response.export_response import ( ExportModel, ExportsResponse, @@ -29,7 +37,7 @@ from frigate.api.defs.response.export_response import ( from frigate.api.defs.response.generic_response import GenericResponse from frigate.api.defs.tags import Tags from frigate.const import CLIPS_DIR, EXPORT_DIR -from frigate.models import Export, Previews, Recordings +from frigate.models import Export, ExportCase, Previews, Recordings from frigate.record.export import ( PlaybackFactorEnum, PlaybackSourceEnum, @@ -63,6 +71,110 @@ def get_exports( return JSONResponse(content=[e for e in exports]) +@router.get( + "/cases", + response_model=ExportCasesResponse, + dependencies=[Depends(allow_any_authenticated())], + summary="Get export cases", + description="Gets all export cases from the database.", +) +def get_export_cases(): + cases = ( + ExportCase.select().order_by(ExportCase.created_at.desc()).dicts().iterator() + ) + return JSONResponse(content=[c for c in cases]) + + +@router.post( + "/cases", + response_model=ExportCaseModel, + dependencies=[Depends(require_role(["admin"]))], + summary="Create export case", + description="Creates a new export case.", +) +def create_export_case(body: ExportCaseCreateBody): + case = ExportCase.create( + id="".join(random.choices(string.ascii_lowercase + string.digits, k=12)), + name=body.name, + description=body.description, + created_at=Path().stat().st_mtime, + updated_at=Path().stat().st_mtime, + ) + return JSONResponse(content=model_to_dict(case)) + + +@router.get( + "/cases/{case_id}", + response_model=ExportCaseModel, + dependencies=[Depends(allow_any_authenticated())], + summary="Get a single export case", + description="Gets a specific export case by ID.", +) +def get_export_case(case_id: str): + try: + case = ExportCase.get(ExportCase.id == case_id) + return JSONResponse(content=model_to_dict(case)) + except DoesNotExist: + return JSONResponse( + content={"success": False, "message": "Export case not found"}, + status_code=404, + ) + + +@router.patch( + "/cases/{case_id}", + response_model=GenericResponse, + dependencies=[Depends(require_role(["admin"]))], + summary="Update export case", + description="Updates an existing export case.", +) +def update_export_case(case_id: str, body: ExportCaseUpdateBody): + try: + case = ExportCase.get(ExportCase.id == case_id) + except DoesNotExist: + return JSONResponse( + content={"success": False, "message": "Export case not found"}, + status_code=404, + ) + + if body.name is not None: + case.name = body.name + if body.description is not None: + case.description = body.description + + case.save() + + return JSONResponse( + content={"success": True, "message": "Successfully updated export case."} + ) + + +@router.delete( + "/cases/{case_id}", + response_model=GenericResponse, + dependencies=[Depends(require_role(["admin"]))], + summary="Delete export case", + description="""Deletes an export case.\n Exports that reference this case will have their export_case set to null.\n """, +) +def delete_export_case(case_id: str): + try: + case = ExportCase.get(ExportCase.id == case_id) + except DoesNotExist: + return JSONResponse( + content={"success": False, "message": "Export case not found"}, + status_code=404, + ) + + # Unassign exports from this case but keep the exports themselves + Export.update(export_case=None).where(Export.export_case == case).execute() + + case.delete_instance() + + return JSONResponse( + content={"success": True, "message": "Successfully deleted export case."} + ) + + @router.post( "/export/{camera_name}/start/{start_time}/end/{end_time}", response_model=StartExportResponse, diff --git a/frigate/models.py b/frigate/models.py index 93f6cb54f..fd5061613 100644 --- a/frigate/models.py +++ b/frigate/models.py @@ -80,6 +80,14 @@ class Recordings(Model): regions = IntegerField(null=True) +class ExportCase(Model): + id = CharField(null=False, primary_key=True, max_length=30) + name = CharField(index=True, max_length=100) + description = TextField(null=True) + created_at = DateTimeField() + updated_at = DateTimeField() + + class Export(Model): id = CharField(null=False, primary_key=True, max_length=30) camera = CharField(index=True, max_length=20) @@ -88,6 +96,12 @@ class Export(Model): video_path = CharField(unique=True) thumb_path = CharField(unique=True) in_progress = BooleanField() + export_case = ForeignKeyField( + ExportCase, + null=True, + backref="exports", + column_name="export_case_id", + ) class ReviewSegment(Model): diff --git a/migrations/033_create_export_case_table.py b/migrations/033_create_export_case_table.py new file mode 100644 index 000000000..08edcbc32 --- /dev/null +++ b/migrations/033_create_export_case_table.py @@ -0,0 +1,50 @@ +"""Peewee migrations -- 033_create_export_case_table.py. + +Some examples (model - class or model name):: + + > Model = migrator.orm['model_name'] # Return model in current state by name + + > migrator.sql(sql) # Run custom SQL + > migrator.python(func, *args, **kwargs) # Run python code + > migrator.create_model(Model) # Create a model (could be used as decorator) + > migrator.remove_model(model, cascade=True) # Remove a model + > migrator.add_fields(model, **fields) # Add fields to a model + > migrator.change_fields(model, **fields) # Change fields + > migrator.remove_fields(model, *field_names, cascade=True) + > migrator.rename_field(model, old_field_name, new_field_name) + > migrator.rename_table(model, new_table_name) + > migrator.add_index(model, *col_names, unique=False) + > migrator.drop_index(model, *col_names) + > migrator.add_not_null(model, *field_names) + > migrator.drop_not_null(model, *field_names) + > migrator.add_default(model, field_name, default) + +""" + +import peewee as pw + +SQL = pw.SQL + + +def migrate(migrator, database, fake=False, **kwargs): + migrator.sql( + """ + CREATE TABLE IF NOT EXISTS "exportcase" ( + "id" VARCHAR(30) NOT NULL PRIMARY KEY, + "name" VARCHAR(100) NOT NULL, + "description" TEXT NULL, + "created_at" DATETIME NOT NULL, + "updated_at" DATETIME NOT NULL + ) + """ + ) + migrator.sql( + 'CREATE INDEX IF NOT EXISTS "exportcase_name" ON "exportcase" ("name")' + ) + migrator.sql( + 'CREATE INDEX IF NOT EXISTS "exportcase_created_at" ON "exportcase" ("created_at")' + ) + + +def rollback(migrator, database, fake=False, **kwargs): + pass diff --git a/migrations/034_add_export_case_to_exports.py b/migrations/034_add_export_case_to_exports.py new file mode 100644 index 000000000..da9e1d4ac --- /dev/null +++ b/migrations/034_add_export_case_to_exports.py @@ -0,0 +1,40 @@ +"""Peewee migrations -- 034_add_export_case_to_exports.py. + +Some examples (model - class or model name):: + + > Model = migrator.orm['model_name'] # Return model in current state by name + + > migrator.sql(sql) # Run custom SQL + > migrator.python(func, *args, **kwargs) # Run python code + > migrator.create_model(Model) # Create a model (could be used as decorator) + > migrator.remove_model(model, cascade=True) # Remove a model + > migrator.add_fields(model, **fields) # Add fields to a model + > migrator.change_fields(model, **fields) # Change fields + > migrator.remove_fields(model, *field_names, cascade=True) + > migrator.rename_field(model, old_field_name, new_field_name) + > migrator.rename_table(model, new_table_name) + > migrator.add_index(model, *col_names, unique=False) + > migrator.drop_index(model, *col_names) + > migrator.add_not_null(model, *field_names) + > migrator.drop_not_null(model, *field_names) + > migrator.add_default(model, field_name, default) + +""" + +import peewee as pw + +SQL = pw.SQL + + +def migrate(migrator, database, fake=False, **kwargs): + # Add nullable export_case_id column to export table + migrator.sql('ALTER TABLE "export" ADD COLUMN "export_case_id" VARCHAR(30) NULL') + + # Index for faster case-based queries + migrator.sql( + 'CREATE INDEX IF NOT EXISTS "export_export_case_id" ON "export" ("export_case_id")' + ) + + +def rollback(migrator, database, fake=False, **kwargs): + pass From cffa54c80d26a6c9bb03170c2e71b7570a70e073 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Mon, 15 Dec 2025 08:54:13 -0700 Subject: [PATCH 05/56] implement case management for export apis (#21295) --- frigate/api/defs/request/export_case_body.py | 10 +++ .../defs/request/export_recordings_body.py | 6 ++ frigate/api/export.py | 90 +++++++++++++++++-- frigate/record/export.py | 27 +++--- 4 files changed, 113 insertions(+), 20 deletions(-) diff --git a/frigate/api/defs/request/export_case_body.py b/frigate/api/defs/request/export_case_body.py index 66cba58ea..35cd8ff7f 100644 --- a/frigate/api/defs/request/export_case_body.py +++ b/frigate/api/defs/request/export_case_body.py @@ -23,3 +23,13 @@ class ExportCaseUpdateBody(BaseModel): description: Optional[str] = Field( default=None, description="Updated description of the export case" ) + + +class ExportCaseAssignBody(BaseModel): + """Request body for assigning or unassigning an export to a case.""" + + export_case_id: Optional[str] = Field( + default=None, + max_length=30, + description="Case ID to assign to the export, or null to unassign", + ) diff --git a/frigate/api/defs/request/export_recordings_body.py b/frigate/api/defs/request/export_recordings_body.py index 19fc2f019..1a6f609bf 100644 --- a/frigate/api/defs/request/export_recordings_body.py +++ b/frigate/api/defs/request/export_recordings_body.py @@ -18,3 +18,9 @@ class ExportRecordingsBody(BaseModel): ) name: Optional[str] = Field(title="Friendly name", default=None, max_length=256) image_path: Union[str, SkipJsonSchema[None]] = None + export_case_id: Optional[str] = Field( + default=None, + title="Export case ID", + max_length=30, + description="ID of the export case to assign this export to", + ) diff --git a/frigate/api/export.py b/frigate/api/export.py index a6051ecb9..812a1b4b2 100644 --- a/frigate/api/export.py +++ b/frigate/api/export.py @@ -4,10 +4,10 @@ import logging import random import string from pathlib import Path -from typing import List +from typing import List, Optional import psutil -from fastapi import APIRouter, Depends, Request +from fastapi import APIRouter, Depends, Query, Request from fastapi.responses import JSONResponse from pathvalidate import sanitize_filepath from peewee import DoesNotExist @@ -20,6 +20,7 @@ from frigate.api.auth import ( require_role, ) from frigate.api.defs.request.export_case_body import ( + ExportCaseAssignBody, ExportCaseCreateBody, ExportCaseUpdateBody, ) @@ -60,14 +61,32 @@ router = APIRouter(tags=[Tags.export]) ) def get_exports( allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter), + export_case_id: Optional[str] = None, + camera: Optional[List[str]] = Query(default=None), + start_date: Optional[float] = None, + end_date: Optional[float] = None, ): - exports = ( - Export.select() - .where(Export.camera << allowed_cameras) - .order_by(Export.date.desc()) - .dicts() - .iterator() - ) + query = Export.select().where(Export.camera << allowed_cameras) + + if export_case_id is not None: + if export_case_id == "unassigned": + query = query.where(Export.export_case.is_null(True)) + else: + query = query.where(Export.export_case == export_case_id) + + if camera: + filtered_cameras = [c for c in camera if c in allowed_cameras] + if not filtered_cameras: + return JSONResponse(content=[]) + query = query.where(Export.camera << filtered_cameras) + + if start_date is not None: + query = query.where(Export.date >= start_date) + + if end_date is not None: + query = query.where(Export.date <= end_date) + + exports = query.order_by(Export.date.desc()).dicts().iterator() return JSONResponse(content=[e for e in exports]) @@ -175,6 +194,48 @@ def delete_export_case(case_id: str): ) +@router.patch( + "/export/{export_id}/case", + response_model=GenericResponse, + dependencies=[Depends(require_role(["admin"]))], + summary="Assign export to case", + description=( + "Assigns an export to a case, or unassigns it if export_case_id is null." + ), +) +async def assign_export_case( + export_id: str, + body: ExportCaseAssignBody, + request: Request, +): + try: + export: Export = Export.get(Export.id == export_id) + await require_camera_access(export.camera, request=request) + except DoesNotExist: + return JSONResponse( + content={"success": False, "message": "Export not found."}, + status_code=404, + ) + + if body.export_case_id is not None: + try: + ExportCase.get(ExportCase.id == body.export_case_id) + except DoesNotExist: + return JSONResponse( + content={"success": False, "message": "Export case not found."}, + status_code=404, + ) + export.export_case = body.export_case_id + else: + export.export_case = None + + export.save() + + return JSONResponse( + content={"success": True, "message": "Successfully updated export case."} + ) + + @router.post( "/export/{camera_name}/start/{start_time}/end/{end_time}", response_model=StartExportResponse, @@ -205,6 +266,16 @@ def export_recording( friendly_name = body.name existing_image = sanitize_filepath(body.image_path) if body.image_path else None + export_case_id = body.export_case_id + if export_case_id is not None: + try: + ExportCase.get(ExportCase.id == export_case_id) + except DoesNotExist: + return JSONResponse( + content={"success": False, "message": "Export case not found"}, + status_code=404, + ) + # Ensure that existing_image is a valid path if existing_image and not existing_image.startswith(CLIPS_DIR): return JSONResponse( @@ -273,6 +344,7 @@ def export_recording( if playback_source in PlaybackSourceEnum.__members__.values() else PlaybackSourceEnum.recordings ), + export_case_id, ) exporter.start() return JSONResponse( diff --git a/frigate/record/export.py b/frigate/record/export.py index d4b49bb4b..9a2a77ebf 100644 --- a/frigate/record/export.py +++ b/frigate/record/export.py @@ -64,6 +64,7 @@ class RecordingExporter(threading.Thread): end_time: int, playback_factor: PlaybackFactorEnum, playback_source: PlaybackSourceEnum, + export_case_id: Optional[str] = None, ) -> None: super().__init__() self.config = config @@ -75,6 +76,7 @@ class RecordingExporter(threading.Thread): self.end_time = end_time self.playback_factor = playback_factor self.playback_source = playback_source + self.export_case_id = export_case_id # ensure export thumb dir Path(os.path.join(CLIPS_DIR, "export")).mkdir(exist_ok=True) @@ -348,17 +350,20 @@ class RecordingExporter(threading.Thread): video_path = f"{EXPORT_DIR}/{self.camera}_{filename_start_datetime}-{filename_end_datetime}_{cleaned_export_id}.mp4" thumb_path = self.save_thumbnail(self.export_id) - Export.insert( - { - Export.id: self.export_id, - Export.camera: self.camera, - Export.name: export_name, - Export.date: self.start_time, - Export.video_path: video_path, - Export.thumb_path: thumb_path, - Export.in_progress: True, - } - ).execute() + export_values = { + Export.id: self.export_id, + Export.camera: self.camera, + Export.name: export_name, + Export.date: self.start_time, + Export.video_path: video_path, + Export.thumb_path: thumb_path, + Export.in_progress: True, + } + + if self.export_case_id is not None: + export_values[Export.export_case] = self.export_case_id + + Export.insert(export_values).execute() try: if self.playback_source == PlaybackSourceEnum.recordings: From 85feb4edcba803a47552b6bb31f98b74e55bcb33 Mon Sep 17 00:00:00 2001 From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> Date: Mon, 15 Dec 2025 09:58:50 -0600 Subject: [PATCH 06/56] refactor vainfo to search for first GPU (#21296) use existing LibvaGpuSelector to pick appropritate libva device --- frigate/api/app.py | 11 ++++++++++- frigate/util/services.py | 17 +++++++++++------ 2 files changed, 21 insertions(+), 7 deletions(-) diff --git a/frigate/api/app.py b/frigate/api/app.py index 440adfce4..3a91c8ebb 100644 --- a/frigate/api/app.py +++ b/frigate/api/app.py @@ -37,6 +37,7 @@ from frigate.config.camera.updater import ( CameraConfigUpdateEnum, CameraConfigUpdateTopic, ) +from frigate.ffmpeg_presets import FFMPEG_HWACCEL_VAAPI, _gpu_selector from frigate.models import Event, Timeline from frigate.stats.prometheus import get_metrics, update_metrics from frigate.util.builtin import ( @@ -463,7 +464,15 @@ def config_set(request: Request, body: AppConfigSetBody): @router.get("/vainfo", dependencies=[Depends(allow_any_authenticated())]) def vainfo(): - vainfo = vainfo_hwaccel() + # Use LibvaGpuSelector to pick an appropriate libva device (if available) + selected_gpu = "" + try: + selected_gpu = _gpu_selector.get_gpu_arg(FFMPEG_HWACCEL_VAAPI, 0) or "" + except Exception: + selected_gpu = "" + + # If selected_gpu is empty, pass None to vainfo_hwaccel to run plain `vainfo`. + vainfo = vainfo_hwaccel(device_name=selected_gpu or None) return JSONResponse( content={ "return_code": vainfo.returncode, diff --git a/frigate/util/services.py b/frigate/util/services.py index 64d83833d..9420732be 100644 --- a/frigate/util/services.py +++ b/frigate/util/services.py @@ -591,12 +591,17 @@ def ffprobe_stream(ffmpeg, path: str, detailed: bool = False) -> sp.CompletedPro def vainfo_hwaccel(device_name: Optional[str] = None) -> sp.CompletedProcess: """Run vainfo.""" - ffprobe_cmd = ( - ["vainfo"] - if not device_name - else ["vainfo", "--display", "drm", "--device", f"/dev/dri/{device_name}"] - ) - return sp.run(ffprobe_cmd, capture_output=True) + if not device_name: + cmd = ["vainfo"] + else: + if os.path.isabs(device_name) and device_name.startswith("/dev/dri/"): + device_path = device_name + else: + device_path = f"/dev/dri/{device_name}" + + cmd = ["vainfo", "--display", "drm", "--device", device_path] + + return sp.run(cmd, capture_output=True) def get_nvidia_driver_info() -> dict[str, Any]: From 004bb7d80db79634fec173d941880368c3da7728 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Mon, 15 Dec 2025 13:10:50 -0700 Subject: [PATCH 07/56] Case management UI (#21299) * Refactor export cards to match existing cards in other UI pages * Show cases separately from exports * Add proper filtering and display of cases * Add ability to edit and select cases for exports * Cleanup typing * Hide if no unassigned * Cleanup hiding logic * fix scrolling * Improve layout --- web/public/locales/en/views/exports.json | 18 +- web/src/components/card/ExportCard.tsx | 213 +++---- .../overlay/dialog/OptionAndInputDialog.tsx | 166 ++++++ web/src/pages/Exports.tsx | 524 ++++++++++++++++-- web/src/types/export.ts | 9 + 5 files changed, 776 insertions(+), 154 deletions(-) create mode 100644 web/src/components/overlay/dialog/OptionAndInputDialog.tsx diff --git a/web/public/locales/en/views/exports.json b/web/public/locales/en/views/exports.json index 4a79d20e1..8f9e8205e 100644 --- a/web/public/locales/en/views/exports.json +++ b/web/public/locales/en/views/exports.json @@ -2,6 +2,10 @@ "documentTitle": "Export - Frigate", "search": "Search", "noExports": "No exports found", + "headings": { + "cases": "Cases", + "uncategorizedExports": "Uncategorized Exports" + }, "deleteExport": "Delete Export", "deleteExport.desc": "Are you sure you want to delete {{exportName}}?", "editExport": { @@ -13,11 +17,21 @@ "shareExport": "Share export", "downloadVideo": "Download video", "editName": "Edit name", - "deleteExport": "Delete export" + "deleteExport": "Delete export", + "assignToCase": "Add to case" }, "toast": { "error": { - "renameExportFailed": "Failed to rename export: {{errorMessage}}" + "renameExportFailed": "Failed to rename export: {{errorMessage}}", + "assignCaseFailed": "Failed to update case assignment: {{errorMessage}}" } + }, + "caseDialog": { + "title": "Add to case", + "description": "Choose an existing case or create a new one.", + "selectLabel": "Case", + "newCaseOption": "Create new case", + "nameLabel": "Case name", + "descriptionLabel": "Description" } } diff --git a/web/src/components/card/ExportCard.tsx b/web/src/components/card/ExportCard.tsx index 021524532..fc7964c18 100644 --- a/web/src/components/card/ExportCard.tsx +++ b/web/src/components/card/ExportCard.tsx @@ -1,9 +1,8 @@ import ActivityIndicator from "../indicators/activity-indicator"; -import { LuTrash } from "react-icons/lu"; import { Button } from "../ui/button"; import { useCallback, useState } from "react"; -import { isDesktop, isMobile } from "react-device-detect"; -import { FaDownload, FaPlay, FaShareAlt } from "react-icons/fa"; +import { isMobile } from "react-device-detect"; +import { FiMoreVertical } from "react-icons/fi"; import { Skeleton } from "../ui/skeleton"; import { Dialog, @@ -14,35 +13,62 @@ import { } from "../ui/dialog"; import { Input } from "../ui/input"; import useKeyboardListener from "@/hooks/use-keyboard-listener"; -import { DeleteClipType, Export } from "@/types/export"; -import { MdEditSquare } from "react-icons/md"; +import { DeleteClipType, Export, ExportCase } from "@/types/export"; import { baseUrl } from "@/api/baseUrl"; import { cn } from "@/lib/utils"; import { shareOrCopy } from "@/utils/browserUtil"; import { useTranslation } from "react-i18next"; import { ImageShadowOverlay } from "../overlay/ImageShadowOverlay"; import BlurredIconButton from "../button/BlurredIconButton"; -import { Tooltip, TooltipContent, TooltipTrigger } from "../ui/tooltip"; import { useIsAdmin } from "@/hooks/use-is-admin"; +import { + DropdownMenu, + DropdownMenuContent, + DropdownMenuItem, + DropdownMenuTrigger, +} from "../ui/dropdown-menu"; +import { FaFolder } from "react-icons/fa"; -type ExportProps = { +type CaseCardProps = { + className: string; + exportCase: ExportCase; + onSelect: () => void; +}; +export function CaseCard({ className, exportCase, onSelect }: CaseCardProps) { + return ( +
onSelect()} + > +
+ +
{exportCase.name}
+
+
+ ); +} + +type ExportCardProps = { className: string; exportedRecording: Export; onSelect: (selected: Export) => void; onRename: (original: string, update: string) => void; onDelete: ({ file, exportName }: DeleteClipType) => void; + onAssignToCase?: (selected: Export) => void; }; - -export default function ExportCard({ +export function ExportCard({ className, exportedRecording, onSelect, onRename, onDelete, -}: ExportProps) { + onAssignToCase, +}: ExportCardProps) { const { t } = useTranslation(["views/exports"]); const isAdmin = useIsAdmin(); - const [hovered, setHovered] = useState(false); const [loading, setLoading] = useState( exportedRecording.thumb_path.length > 0, ); @@ -136,12 +162,14 @@ export default function ExportCard({
setHovered(true) : undefined} - onMouseLeave={isDesktop ? () => setHovered(false) : undefined} - onClick={isDesktop ? undefined : () => setHovered(!hovered)} + onClick={() => { + if (!exportedRecording.in_progress) { + onSelect(exportedRecording); + } + }} > {exportedRecording.in_progress ? ( @@ -158,95 +186,88 @@ export default function ExportCard({ )} )} - {hovered && ( - <> -
-
-
- {!exportedRecording.in_progress && ( - - - - shareOrCopy( - `${baseUrl}export?id=${exportedRecording.id}`, - exportedRecording.name.replaceAll("_", " "), - ) - } - > - - - - {t("tooltip.shareExport")} - - )} - {!exportedRecording.in_progress && ( + {!exportedRecording.in_progress && ( +
+ + + e.stopPropagation()} + > + + + + + { + e.stopPropagation(); + shareOrCopy( + `${baseUrl}export?id=${exportedRecording.id}`, + exportedRecording.name.replaceAll("_", " "), + ); + }} + > + {t("tooltip.shareExport")} + + e.stopPropagation()} > - - - - - - - - {t("tooltip.downloadVideo")} - - + {t("tooltip.downloadVideo")} - )} - {isAdmin && !exportedRecording.in_progress && ( - - - - setEditName({ - original: exportedRecording.name, - update: undefined, - }) - } - > - - - - {t("tooltip.editName")} - + + {isAdmin && onAssignToCase && ( + { + e.stopPropagation(); + onAssignToCase(exportedRecording); + }} + > + {t("tooltip.assignToCase")} + )} {isAdmin && ( - - - - onDelete({ - file: exportedRecording.id, - exportName: exportedRecording.name, - }) - } - > - - - - {t("tooltip.deleteExport")} - + { + e.stopPropagation(); + setEditName({ + original: exportedRecording.name, + update: undefined, + }); + }} + > + {t("tooltip.editName")} + )} -
-
- - {!exportedRecording.in_progress && ( - - )} - + {isAdmin && ( + { + e.stopPropagation(); + onDelete({ + file: exportedRecording.id, + exportName: exportedRecording.name, + }); + }} + > + {t("tooltip.deleteExport")} + + )} + + +
)} {loading && ( diff --git a/web/src/components/overlay/dialog/OptionAndInputDialog.tsx b/web/src/components/overlay/dialog/OptionAndInputDialog.tsx new file mode 100644 index 000000000..cb6b23907 --- /dev/null +++ b/web/src/components/overlay/dialog/OptionAndInputDialog.tsx @@ -0,0 +1,166 @@ +import { Button } from "@/components/ui/button"; +import { + Dialog, + DialogContent, + DialogDescription, + DialogFooter, + DialogHeader, + DialogTitle, +} from "@/components/ui/dialog"; +import { Input } from "@/components/ui/input"; +import { + Select, + SelectContent, + SelectItem, + SelectTrigger, + SelectValue, +} from "@/components/ui/select"; +import { cn } from "@/lib/utils"; +import { isMobile } from "react-device-detect"; +import { useEffect, useMemo, useState } from "react"; +import { useTranslation } from "react-i18next"; + +type Option = { + value: string; + label: string; +}; + +type OptionAndInputDialogProps = { + open: boolean; + title: string; + description?: string; + options: Option[]; + newValueKey: string; + initialValue?: string; + nameLabel: string; + descriptionLabel: string; + setOpen: (open: boolean) => void; + onSave: (value: string) => void; + onCreateNew: (name: string, description: string) => void; +}; + +export default function OptionAndInputDialog({ + open, + title, + description, + options, + newValueKey, + initialValue, + nameLabel, + descriptionLabel, + setOpen, + onSave, + onCreateNew, +}: OptionAndInputDialogProps) { + const { t } = useTranslation("common"); + const firstOption = useMemo(() => options[0]?.value, [options]); + + const [selectedValue, setSelectedValue] = useState( + initialValue ?? firstOption, + ); + const [name, setName] = useState(""); + const [descriptionValue, setDescriptionValue] = useState(""); + + useEffect(() => { + if (open) { + setSelectedValue(initialValue ?? firstOption); + setName(""); + setDescriptionValue(""); + } + }, [open, initialValue, firstOption]); + + const isNew = selectedValue === newValueKey; + const disableSave = !selectedValue || (isNew && name.trim().length === 0); + + const handleSave = () => { + if (!selectedValue) { + return; + } + + const trimmedName = name.trim(); + const trimmedDescription = descriptionValue.trim(); + + if (isNew) { + onCreateNew(trimmedName, trimmedDescription); + } else { + onSave(selectedValue); + } + setOpen(false); + }; + + return ( + + { + if (isMobile) { + e.preventDefault(); + } + }} + > + + {title} + {description && {description}} + + +
+ +
+ + {isNew && ( +
+
+ + setName(e.target.value)} /> +
+
+ + setDescriptionValue(e.target.value)} + /> +
+
+ )} + + + + + +
+
+ ); +} diff --git a/web/src/pages/Exports.tsx b/web/src/pages/Exports.tsx index 26a75801a..5b05439c6 100644 --- a/web/src/pages/Exports.tsx +++ b/web/src/pages/Exports.tsx @@ -1,5 +1,5 @@ import { baseUrl } from "@/api/baseUrl"; -import ExportCard from "@/components/card/ExportCard"; +import { CaseCard, ExportCard } from "@/components/card/ExportCard"; import { AlertDialog, AlertDialogCancel, @@ -11,64 +11,144 @@ import { } from "@/components/ui/alert-dialog"; import { Button } from "@/components/ui/button"; import { Dialog, DialogContent, DialogTitle } from "@/components/ui/dialog"; +import Heading from "@/components/ui/heading"; import { Input } from "@/components/ui/input"; import { Toaster } from "@/components/ui/sonner"; import useKeyboardListener from "@/hooks/use-keyboard-listener"; import { useSearchEffect } from "@/hooks/use-overlay-state"; +import { useHistoryBack } from "@/hooks/use-history-back"; +import { useApiFilterArgs } from "@/hooks/use-api-filter"; import { cn } from "@/lib/utils"; -import { DeleteClipType, Export } from "@/types/export"; +import { + DeleteClipType, + Export, + ExportCase, + ExportFilter, +} from "@/types/export"; +import OptionAndInputDialog from "@/components/overlay/dialog/OptionAndInputDialog"; import axios from "axios"; -import { useCallback, useEffect, useMemo, useRef, useState } from "react"; -import { isMobile } from "react-device-detect"; +import { + MutableRefObject, + useCallback, + useEffect, + useMemo, + useRef, + useState, +} from "react"; +import { isMobile, isMobileOnly } from "react-device-detect"; import { useTranslation } from "react-i18next"; import { LuFolderX } from "react-icons/lu"; import { toast } from "sonner"; import useSWR from "swr"; +import ExportFilterGroup from "@/components/filter/ExportFilterGroup"; + +// always parse these as string arrays +const EXPORT_FILTER_ARRAY_KEYS = ["cameras"]; function Exports() { const { t } = useTranslation(["views/exports"]); - const { data: exports, mutate } = useSWR("exports"); useEffect(() => { document.title = t("documentTitle"); }, [t]); + // Filters + + const [exportFilter, setExportFilter, exportSearchParams] = + useApiFilterArgs(EXPORT_FILTER_ARRAY_KEYS); + + // Data + + const { data: cases, mutate: updateCases } = useSWR("cases"); + const { data: rawExports, mutate: updateExports } = useSWR( + exportSearchParams && Object.keys(exportSearchParams).length > 0 + ? ["exports", exportSearchParams] + : "exports", + ); + + const exportsByCase = useMemo<{ [caseId: string]: Export[] }>(() => { + const grouped: { [caseId: string]: Export[] } = {}; + (rawExports ?? []).forEach((exp) => { + const caseId = exp.export_case || "none"; + if (!grouped[caseId]) { + grouped[caseId] = []; + } + + grouped[caseId].push(exp); + }); + return grouped; + }, [rawExports]); + + const filteredCases = useMemo(() => { + if (!cases) { + return []; + } + + return cases.filter((caseItem) => { + const caseExports = exportsByCase[caseItem.id]; + return caseExports?.length; + }); + }, [cases, exportsByCase]); + + const exports = useMemo( + () => exportsByCase["none"] || [], + [exportsByCase], + ); + + const mutate = useCallback(() => { + updateExports(); + updateCases(); + }, [updateExports, updateCases]); + // Search const [search, setSearch] = useState(""); - const filteredExports = useMemo(() => { - if (!search || !exports) { - return exports; - } - - return exports.filter((exp) => - exp.name - .toLowerCase() - .replaceAll("_", " ") - .includes(search.toLowerCase()), - ); - }, [exports, search]); - // Viewing const [selected, setSelected] = useState(); + const [selectedCaseId, setSelectedCaseId] = useState( + undefined, + ); const [selectedAspect, setSelectedAspect] = useState(0.0); + // Handle browser back button to deselect case before navigating away + useHistoryBack({ + enabled: true, + open: selectedCaseId !== undefined, + onClose: () => setSelectedCaseId(undefined), + }); + useSearchEffect("id", (id) => { - if (!exports) { + if (!rawExports) { return false; } - setSelected(exports.find((exp) => exp.id == id)); + setSelected(rawExports.find((exp) => exp.id == id)); return true; }); - // Deleting + useSearchEffect("caseId", (caseId: string) => { + if (!filteredCases) { + return false; + } + + const exists = filteredCases.some((c) => c.id === caseId); + + if (!exists) { + return false; + } + + setSelectedCaseId(caseId); + return true; + }); + + // Modifying const [deleteClip, setDeleteClip] = useState(); + const [exportToAssign, setExportToAssign] = useState(); const onHandleDelete = useCallback(() => { if (!deleteClip) { @@ -83,8 +163,6 @@ function Exports() { }); }, [deleteClip, mutate]); - // Renaming - const onHandleRename = useCallback( (id: string, update: string) => { axios @@ -107,7 +185,7 @@ function Exports() { }); }); }, - [mutate, t], + [mutate, setDeleteClip, t], ); // Keyboard Listener @@ -115,10 +193,27 @@ function Exports() { const contentRef = useRef(null); useKeyboardListener([], undefined, contentRef); + const selectedCase = useMemo( + () => filteredCases?.find((c) => c.id === selectedCaseId), + [filteredCases, selectedCaseId], + ); + + const resetCaseDialog = useCallback(() => { + setExportToAssign(undefined); + }, []); + return (
+ + setDeleteClip(undefined)} @@ -187,47 +282,364 @@ function Exports() { - {exports && ( -
+
+
setSearch(e.target.value)} />
- )} + +
-
- {exports && filteredExports && filteredExports.length > 0 ? ( -
- {Object.values(exports).map((item) => ( - - setDeleteClip({ file, exportName }) - } - /> - ))} -
- ) : exports !== undefined ? ( -
- - {t("noExports")} -
- ) : null} + {selectedCase ? ( + + ) : ( + + )} +
+ ); +} + +type AllExportsViewProps = { + contentRef: MutableRefObject; + search: string; + cases?: ExportCase[]; + exports: Export[]; + exportsByCase: { [caseId: string]: Export[] }; + setSelectedCaseId: (id: string) => void; + setSelected: (e: Export) => void; + renameClip: (id: string, update: string) => void; + setDeleteClip: (d: DeleteClipType | undefined) => void; + onAssignToCase: (e: Export) => void; +}; +function AllExportsView({ + contentRef, + search, + cases, + exports, + exportsByCase, + setSelectedCaseId, + setSelected, + renameClip, + setDeleteClip, + onAssignToCase, +}: AllExportsViewProps) { + const { t } = useTranslation(["views/exports"]); + + // Filter + + const filteredCases = useMemo(() => { + if (!search || !cases) { + return cases || []; + } + + return cases.filter( + (caseItem) => + caseItem.name.toLowerCase().includes(search.toLowerCase()) || + (caseItem.description && + caseItem.description.toLowerCase().includes(search.toLowerCase())), + ); + }, [search, cases]); + + const filteredExports = useMemo(() => { + if (!search) { + return exports; + } + + return exports.filter((exp) => + exp.name + .toLowerCase() + .replaceAll("_", " ") + .includes(search.toLowerCase()), + ); + }, [exports, search]); + + return ( +
+ {filteredCases?.length || filteredExports.length ? ( +
+ {filteredCases.length > 0 && ( +
+ {t("headings.cases")} +
+ {cases?.map((item) => ( + { + setSelectedCaseId(item.id); + }} + /> + ))} +
+
+ )} + + {filteredExports.length > 0 && ( +
+ {t("headings.uncategorizedExports")} +
+ {exports.map((item) => ( + + setDeleteClip({ file, exportName }) + } + onAssignToCase={onAssignToCase} + /> + ))} +
+
+ )} +
+ ) : ( +
+ + {t("noExports")} +
+ )} +
+ ); +} + +type CaseViewProps = { + contentRef: MutableRefObject; + selectedCase: ExportCase; + exports?: Export[]; + search: string; + setSelected: (e: Export) => void; + renameClip: (id: string, update: string) => void; + setDeleteClip: (d: DeleteClipType | undefined) => void; + onAssignToCase: (e: Export) => void; +}; +function CaseView({ + contentRef, + selectedCase, + exports, + search, + setSelected, + renameClip, + setDeleteClip, + onAssignToCase, +}: CaseViewProps) { + const filteredExports = useMemo(() => { + const caseExports = (exports || []).filter( + (e) => e.export_case == selectedCase.id, + ); + + if (!search) { + return caseExports; + } + + return caseExports.filter((exp) => + exp.name + .toLowerCase() + .replaceAll("_", " ") + .includes(search.toLowerCase()), + ); + }, [selectedCase, exports, search]); + + return ( +
+
+ + {selectedCase.name} + +
+ {selectedCase.description} +
+
+
+ {exports?.map((item) => ( + + setDeleteClip({ file, exportName }) + } + onAssignToCase={onAssignToCase} + /> + ))}
); } -export default Exports; +type CaseAssignmentDialogProps = { + exportToAssign?: Export; + cases?: ExportCase[]; + selectedCaseId?: string; + onClose: () => void; + mutate: () => void; +}; +function CaseAssignmentDialog({ + exportToAssign, + cases, + selectedCaseId, + onClose, + mutate, +}: CaseAssignmentDialogProps) { + const { t } = useTranslation(["views/exports"]); + const caseOptions = useMemo( + () => [ + ...(cases ?? []) + .map((c) => ({ + value: c.id, + label: c.name, + })) + .sort((cA, cB) => cA.label.localeCompare(cB.label)), + { + value: "new", + label: t("caseDialog.newCaseOption"), + }, + ], + [cases, t], + ); + + const handleSave = useCallback( + async (caseId: string) => { + if (!exportToAssign) return; + + try { + await axios.patch(`export/${exportToAssign.id}/case`, { + export_case_id: caseId, + }); + mutate(); + onClose(); + } catch (error: unknown) { + const apiError = error as { + response?: { data?: { message?: string; detail?: string } }; + }; + const errorMessage = + apiError.response?.data?.message || + apiError.response?.data?.detail || + "Unknown error"; + toast.error(t("toast.error.assignCaseFailed", { errorMessage }), { + position: "top-center", + }); + } + }, + [exportToAssign, mutate, onClose, t], + ); + + const handleCreateNew = useCallback( + async (name: string, description: string) => { + if (!exportToAssign) return; + + try { + const createResp = await axios.post("cases", { + name, + description, + }); + + const newCaseId: string | undefined = createResp.data?.id; + + if (newCaseId) { + await axios.patch(`export/${exportToAssign.id}/case`, { + export_case_id: newCaseId, + }); + } + + mutate(); + onClose(); + } catch (error: unknown) { + const apiError = error as { + response?: { data?: { message?: string; detail?: string } }; + }; + const errorMessage = + apiError.response?.data?.message || + apiError.response?.data?.detail || + "Unknown error"; + toast.error(t("toast.error.assignCaseFailed", { errorMessage }), { + position: "top-center", + }); + } + }, + [exportToAssign, mutate, onClose, t], + ); + + if (!exportToAssign) { + return null; + } + + return ( + { + if (!open) { + onClose(); + } + }} + options={caseOptions} + nameLabel={t("caseDialog.nameLabel")} + descriptionLabel={t("caseDialog.descriptionLabel")} + initialValue={selectedCaseId} + newValueKey="new" + onSave={handleSave} + onCreateNew={handleCreateNew} + /> + ); +} + +export default Exports; \ No newline at end of file diff --git a/web/src/types/export.ts b/web/src/types/export.ts index fc62bbeec..1184becf0 100644 --- a/web/src/types/export.ts +++ b/web/src/types/export.ts @@ -6,6 +6,15 @@ export type Export = { video_path: string; thumb_path: string; in_progress: boolean; + export_case?: string; +}; + +export type ExportCase = { + id: string; + name: string; + description: string; + created_at: number; + updated_at: number; }; export type DeleteClipType = { From dde738cfdcc827c78ded6bb03f7d478a822a036a Mon Sep 17 00:00:00 2001 From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> Date: Mon, 15 Dec 2025 15:02:03 -0600 Subject: [PATCH 08/56] Camera connection quality indicator (#21297) * add camera connection quality metrics and indicator * formatting * move stall calcs to watchdog * clean up * change watchdog to 1s and separately track time for ffmpeg retry_interval * implement status caching to reduce message volume --- frigate/camera/__init__.py | 4 + frigate/stats/util.py | 27 ++++ frigate/video.py | 121 +++++++++++++++--- web/public/locales/en/views/system.json | 11 ++ .../camera/ConnectionQualityIndicator.tsx | 76 +++++++++++ web/src/types/stats.ts | 4 + web/src/views/system/CameraMetrics.tsx | 34 ++++- 7 files changed, 256 insertions(+), 21 deletions(-) create mode 100644 web/src/components/camera/ConnectionQualityIndicator.tsx diff --git a/frigate/camera/__init__.py b/frigate/camera/__init__.py index 77b1fd424..0461c98cb 100644 --- a/frigate/camera/__init__.py +++ b/frigate/camera/__init__.py @@ -19,6 +19,8 @@ class CameraMetrics: process_pid: Synchronized capture_process_pid: Synchronized ffmpeg_pid: Synchronized + reconnects_last_hour: Synchronized + stalls_last_hour: Synchronized def __init__(self, manager: SyncManager): self.camera_fps = manager.Value("d", 0) @@ -35,6 +37,8 @@ class CameraMetrics: self.process_pid = manager.Value("i", 0) self.capture_process_pid = manager.Value("i", 0) self.ffmpeg_pid = manager.Value("i", 0) + self.reconnects_last_hour = manager.Value("i", 0) + self.stalls_last_hour = manager.Value("i", 0) class PTZMetrics: diff --git a/frigate/stats/util.py b/frigate/stats/util.py index 410350d96..69291df0a 100644 --- a/frigate/stats/util.py +++ b/frigate/stats/util.py @@ -278,6 +278,32 @@ def stats_snapshot( if camera_stats.capture_process_pid.value else None ) + # Calculate connection quality based on current state + # This is computed at stats-collection time so offline cameras + # correctly show as unusable rather than excellent + expected_fps = config.cameras[name].detect.fps + current_fps = camera_stats.camera_fps.value + reconnects = camera_stats.reconnects_last_hour.value + stalls = camera_stats.stalls_last_hour.value + + if current_fps < 0.1: + quality_str = "unusable" + elif reconnects == 0 and current_fps >= 0.9 * expected_fps and stalls < 5: + quality_str = "excellent" + elif reconnects <= 2 and current_fps >= 0.6 * expected_fps: + quality_str = "fair" + elif reconnects > 10 or current_fps < 1.0 or stalls > 100: + quality_str = "unusable" + else: + quality_str = "poor" + + connection_quality = { + "connection_quality": quality_str, + "expected_fps": expected_fps, + "reconnects_last_hour": reconnects, + "stalls_last_hour": stalls, + } + stats["cameras"][name] = { "camera_fps": round(camera_stats.camera_fps.value, 2), "process_fps": round(camera_stats.process_fps.value, 2), @@ -289,6 +315,7 @@ def stats_snapshot( "ffmpeg_pid": ffmpeg_pid, "audio_rms": round(camera_stats.audio_rms.value, 4), "audio_dBFS": round(camera_stats.audio_dBFS.value, 4), + **connection_quality, } stats["detectors"] = {} diff --git a/frigate/video.py b/frigate/video.py index 112844543..5e42619dd 100755 --- a/frigate/video.py +++ b/frigate/video.py @@ -3,6 +3,7 @@ import queue import subprocess as sp import threading import time +from collections import deque from datetime import datetime, timedelta, timezone from multiprocessing import Queue, Value from multiprocessing.synchronize import Event as MpEvent @@ -117,6 +118,7 @@ def capture_frames( frame_rate.start() skipped_eps = EventsPerSecond() skipped_eps.start() + config_subscriber = CameraConfigUpdateSubscriber( None, {config.name: config}, [CameraConfigUpdateEnum.enabled] ) @@ -181,6 +183,9 @@ class CameraWatchdog(threading.Thread): camera_fps, skipped_fps, ffmpeg_pid, + stalls, + reconnects, + detection_frame, stop_event, ): threading.Thread.__init__(self) @@ -201,6 +206,10 @@ class CameraWatchdog(threading.Thread): self.frame_index = 0 self.stop_event = stop_event self.sleeptime = self.config.ffmpeg.retry_interval + self.reconnect_timestamps = deque() + self.stalls = stalls + self.reconnects = reconnects + self.detection_frame = detection_frame self.config_subscriber = CameraConfigUpdateSubscriber( None, @@ -216,6 +225,35 @@ class CameraWatchdog(threading.Thread): self.latest_cache_segment_time: float = 0 self.record_enable_time: datetime | None = None + # Stall tracking (based on last processed frame) + self._stall_timestamps: deque[float] = deque() + self._stall_active: bool = False + + # Status caching to reduce message volume + self._last_detect_status: str | None = None + self._last_record_status: str | None = None + self._last_status_update_time: float = 0.0 + + def _send_detect_status(self, status: str, now: float) -> None: + """Send detect status only if changed or retry_interval has elapsed.""" + if ( + status != self._last_detect_status + or (now - self._last_status_update_time) >= self.sleeptime + ): + self.requestor.send_data(f"{self.config.name}/status/detect", status) + self._last_detect_status = status + self._last_status_update_time = now + + def _send_record_status(self, status: str, now: float) -> None: + """Send record status only if changed or retry_interval has elapsed.""" + if ( + status != self._last_record_status + or (now - self._last_status_update_time) >= self.sleeptime + ): + self.requestor.send_data(f"{self.config.name}/status/record", status) + self._last_record_status = status + self._last_status_update_time = now + def _update_enabled_state(self) -> bool: """Fetch the latest config and update enabled state.""" self.config_subscriber.check_for_updates() @@ -242,6 +280,14 @@ class CameraWatchdog(threading.Thread): else: self.ffmpeg_detect_process.wait() + # Update reconnects + now = datetime.now().timestamp() + self.reconnect_timestamps.append(now) + while self.reconnect_timestamps and self.reconnect_timestamps[0] < now - 3600: + self.reconnect_timestamps.popleft() + if self.reconnects: + self.reconnects.value = len(self.reconnect_timestamps) + # Wait for old capture thread to fully exit before starting a new one if self.capture_thread is not None and self.capture_thread.is_alive(): self.logger.info("Waiting for capture thread to exit...") @@ -267,7 +313,10 @@ class CameraWatchdog(threading.Thread): self.record_enable_time = datetime.now().astimezone(timezone.utc) time.sleep(self.sleeptime) - while not self.stop_event.wait(self.sleeptime): + last_restart_time = datetime.now().timestamp() + + # 1 second watchdog loop + while not self.stop_event.wait(1): enabled = self._update_enabled_state() if enabled != self.was_enabled: if enabled: @@ -285,12 +334,9 @@ class CameraWatchdog(threading.Thread): self.record_enable_time = None # update camera status - self.requestor.send_data( - f"{self.config.name}/status/detect", "disabled" - ) - self.requestor.send_data( - f"{self.config.name}/status/record", "disabled" - ) + now = datetime.now().timestamp() + self._send_detect_status("disabled", now) + self._send_record_status("disabled", now) self.was_enabled = enabled continue @@ -329,36 +375,44 @@ class CameraWatchdog(threading.Thread): now = datetime.now().timestamp() + # Check if enough time has passed to allow ffmpeg restart (backoff pacing) + time_since_last_restart = now - last_restart_time + can_restart = time_since_last_restart >= self.sleeptime + if not self.capture_thread.is_alive(): - self.requestor.send_data(f"{self.config.name}/status/detect", "offline") + self._send_detect_status("offline", now) self.camera_fps.value = 0 self.logger.error( f"Ffmpeg process crashed unexpectedly for {self.config.name}." ) - self.reset_capture_thread(terminate=False) + if can_restart: + self.reset_capture_thread(terminate=False) + last_restart_time = now elif self.camera_fps.value >= (self.config.detect.fps + 10): self.fps_overflow_count += 1 if self.fps_overflow_count == 3: - self.requestor.send_data( - f"{self.config.name}/status/detect", "offline" - ) + self._send_detect_status("offline", now) self.fps_overflow_count = 0 self.camera_fps.value = 0 self.logger.info( f"{self.config.name} exceeded fps limit. Exiting ffmpeg..." ) - self.reset_capture_thread(drain_output=False) + if can_restart: + self.reset_capture_thread(drain_output=False) + last_restart_time = now elif now - self.capture_thread.current_frame.value > 20: - self.requestor.send_data(f"{self.config.name}/status/detect", "offline") + self._send_detect_status("offline", now) self.camera_fps.value = 0 self.logger.info( f"No frames received from {self.config.name} in 20 seconds. Exiting ffmpeg..." ) - self.reset_capture_thread() + if can_restart: + self.reset_capture_thread() + last_restart_time = now else: # process is running normally - self.requestor.send_data(f"{self.config.name}/status/detect", "online") + self._send_detect_status("online", now) self.fps_overflow_count = 0 for p in self.ffmpeg_other_processes: @@ -441,9 +495,7 @@ class CameraWatchdog(threading.Thread): continue else: - self.requestor.send_data( - f"{self.config.name}/status/record", "online" - ) + self._send_record_status("online", now) p["latest_segment_time"] = self.latest_cache_segment_time if poll is None: @@ -459,6 +511,34 @@ class CameraWatchdog(threading.Thread): p["cmd"], self.logger, p["logpipe"], ffmpeg_process=p["process"] ) + # Update stall metrics based on last processed frame timestamp + now = datetime.now().timestamp() + processed_ts = ( + float(self.detection_frame.value) if self.detection_frame else 0.0 + ) + if processed_ts > 0: + delta = now - processed_ts + observed_fps = ( + self.camera_fps.value + if self.camera_fps.value > 0 + else self.config.detect.fps + ) + interval = 1.0 / max(observed_fps, 0.1) + stall_threshold = max(2.0 * interval, 2.0) + + if delta > stall_threshold: + if not self._stall_active: + self._stall_timestamps.append(now) + self._stall_active = True + else: + self._stall_active = False + + while self._stall_timestamps and self._stall_timestamps[0] < now - 3600: + self._stall_timestamps.popleft() + + if self.stalls: + self.stalls.value = len(self._stall_timestamps) + self.stop_all_ffmpeg() self.logpipe.close() self.config_subscriber.stop() @@ -596,6 +676,9 @@ class CameraCapture(FrigateProcess): self.camera_metrics.camera_fps, self.camera_metrics.skipped_fps, self.camera_metrics.ffmpeg_pid, + self.camera_metrics.stalls_last_hour, + self.camera_metrics.reconnects_last_hour, + self.camera_metrics.detection_frame, self.stop_event, ) camera_watchdog.start() diff --git a/web/public/locales/en/views/system.json b/web/public/locales/en/views/system.json index da774e302..202e0c70f 100644 --- a/web/public/locales/en/views/system.json +++ b/web/public/locales/en/views/system.json @@ -158,6 +158,17 @@ "cameraDetectionsPerSecond": "{{camName}} detections per second", "cameraSkippedDetectionsPerSecond": "{{camName}} skipped detections per second" }, + "connectionQuality": { + "title": "Connection Quality", + "excellent": "Excellent", + "fair": "Fair", + "poor": "Poor", + "unusable": "Unusable", + "fps": "FPS", + "expectedFps": "Expected FPS", + "reconnectsLastHour": "Reconnects (last hour)", + "stallsLastHour": "Stalls (last hour)" + }, "toast": { "success": { "copyToClipboard": "Copied probe data to clipboard." diff --git a/web/src/components/camera/ConnectionQualityIndicator.tsx b/web/src/components/camera/ConnectionQualityIndicator.tsx new file mode 100644 index 000000000..3ea3c4f19 --- /dev/null +++ b/web/src/components/camera/ConnectionQualityIndicator.tsx @@ -0,0 +1,76 @@ +import { useTranslation } from "react-i18next"; +import { + Tooltip, + TooltipContent, + TooltipTrigger, +} from "@/components/ui/tooltip"; +import { cn } from "@/lib/utils"; + +type ConnectionQualityIndicatorProps = { + quality: "excellent" | "fair" | "poor" | "unusable"; + expectedFps: number; + reconnects: number; + stalls: number; +}; + +export function ConnectionQualityIndicator({ + quality, + expectedFps, + reconnects, + stalls, +}: ConnectionQualityIndicatorProps) { + const { t } = useTranslation(["views/system"]); + + const getColorClass = (quality: string): string => { + switch (quality) { + case "excellent": + return "bg-success"; + case "fair": + return "bg-yellow-500"; + case "poor": + return "bg-orange-500"; + case "unusable": + return "bg-destructive"; + default: + return "bg-gray-500"; + } + }; + + const qualityLabel = t(`cameras.connectionQuality.${quality}`); + + return ( + + +
+ + +
+
+ {t("cameras.connectionQuality.title")} +
+
+
{qualityLabel}
+
+
+ {t("cameras.connectionQuality.expectedFps")}:{" "} + {expectedFps.toFixed(1)} {t("cameras.connectionQuality.fps")} +
+
+ {t("cameras.connectionQuality.reconnectsLastHour")}:{" "} + {reconnects} +
+
+ {t("cameras.connectionQuality.stallsLastHour")}: {stalls} +
+
+
+
+
+ + ); +} diff --git a/web/src/types/stats.ts b/web/src/types/stats.ts index c98ebe80f..5432f3154 100644 --- a/web/src/types/stats.ts +++ b/web/src/types/stats.ts @@ -24,6 +24,10 @@ export type CameraStats = { pid: number; process_fps: number; skipped_fps: number; + connection_quality: "excellent" | "fair" | "poor" | "unusable"; + expected_fps: number; + reconnects_last_hour: number; + stalls_last_hour: number; }; export type CpuStats = { diff --git a/web/src/views/system/CameraMetrics.tsx b/web/src/views/system/CameraMetrics.tsx index 6e24ef5d0..b6c5be4fa 100644 --- a/web/src/views/system/CameraMetrics.tsx +++ b/web/src/views/system/CameraMetrics.tsx @@ -1,6 +1,7 @@ import { useFrigateStats } from "@/api/ws"; import { CameraLineGraph } from "@/components/graph/LineGraph"; import CameraInfoDialog from "@/components/overlay/CameraInfoDialog"; +import { ConnectionQualityIndicator } from "@/components/camera/ConnectionQualityIndicator"; import { Skeleton } from "@/components/ui/skeleton"; import { FrigateConfig } from "@/types/frigateConfig"; import { FrigateStats } from "@/types/stats"; @@ -282,8 +283,37 @@ export default function CameraMetrics({ )}
-
- +
+
+ +
+ {statsHistory.length > 0 && + statsHistory[statsHistory.length - 1]?.cameras[ + camera.name + ] && ( + + )}
From fc3545310c9e3e9892f796d7f6ff5309adc2f39a Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Tue, 16 Dec 2025 15:10:48 -0700 Subject: [PATCH 09/56] Export filter UI (#21322) * Get started on export filters * implement basic filter * Implement filtering and adjust api * Improve filter handling * Improve navigation * Cleanup * handle scrolling --- frigate/api/export.py | 7 +- .../components/filter/ExportFilterGroup.tsx | 67 +++++++++++++++++++ web/src/types/export.ts | 10 +++ web/vite.config.ts | 2 +- 4 files changed, 82 insertions(+), 4 deletions(-) create mode 100644 web/src/components/filter/ExportFilterGroup.tsx diff --git a/frigate/api/export.py b/frigate/api/export.py index 812a1b4b2..c2cf66a34 100644 --- a/frigate/api/export.py +++ b/frigate/api/export.py @@ -62,7 +62,7 @@ router = APIRouter(tags=[Tags.export]) def get_exports( allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter), export_case_id: Optional[str] = None, - camera: Optional[List[str]] = Query(default=None), + cameras: Optional[str] = Query(default="all"), start_date: Optional[float] = None, end_date: Optional[float] = None, ): @@ -74,8 +74,9 @@ def get_exports( else: query = query.where(Export.export_case == export_case_id) - if camera: - filtered_cameras = [c for c in camera if c in allowed_cameras] + if cameras and cameras != "all": + requested = set(cameras.split(",")) + filtered_cameras = list(requested.intersection(allowed_cameras)) if not filtered_cameras: return JSONResponse(content=[]) query = query.where(Export.camera << filtered_cameras) diff --git a/web/src/components/filter/ExportFilterGroup.tsx b/web/src/components/filter/ExportFilterGroup.tsx new file mode 100644 index 000000000..c5fe4f33c --- /dev/null +++ b/web/src/components/filter/ExportFilterGroup.tsx @@ -0,0 +1,67 @@ +import { cn } from "@/lib/utils"; +import { + DEFAULT_EXPORT_FILTERS, + ExportFilter, + ExportFilters, +} from "@/types/export"; +import { CamerasFilterButton } from "./CamerasFilterButton"; +import { useAllowedCameras } from "@/hooks/use-allowed-cameras"; +import { useMemo } from "react"; +import { FrigateConfig } from "@/types/frigateConfig"; +import useSWR from "swr"; + +type ExportFilterGroupProps = { + className: string; + filters?: ExportFilters[]; + filter?: ExportFilter; + onUpdateFilter: (filter: ExportFilter) => void; +}; +export default function ExportFilterGroup({ + className, + filter, + filters = DEFAULT_EXPORT_FILTERS, + onUpdateFilter, +}: ExportFilterGroupProps) { + const { data: config } = useSWR("config", { + revalidateOnFocus: false, + }); + const allowedCameras = useAllowedCameras(); + + const filterValues = useMemo( + () => ({ + cameras: allowedCameras, + }), + [allowedCameras], + ); + + const groups = useMemo(() => { + if (!config) { + return []; + } + + return Object.entries(config.camera_groups).sort( + (a, b) => a[1].order - b[1].order, + ); + }, [config]); + + return ( +
+ {filters.includes("cameras") && ( + { + onUpdateFilter({ ...filter, cameras: newCameras }); + }} + /> + )} +
+ ); +} diff --git a/web/src/types/export.ts b/web/src/types/export.ts index 1184becf0..c606855f2 100644 --- a/web/src/types/export.ts +++ b/web/src/types/export.ts @@ -21,3 +21,13 @@ export type DeleteClipType = { file: string; exportName: string; }; + +// filtering + +const EXPORT_FILTERS = ["cameras"] as const; +export type ExportFilters = (typeof EXPORT_FILTERS)[number]; +export const DEFAULT_EXPORT_FILTERS: ExportFilters[] = ["cameras"]; + +export type ExportFilter = { + cameras?: string[]; +}; diff --git a/web/vite.config.ts b/web/vite.config.ts index cb1a580bf..148048995 100644 --- a/web/vite.config.ts +++ b/web/vite.config.ts @@ -4,7 +4,7 @@ import { defineConfig } from "vite"; import react from "@vitejs/plugin-react-swc"; import monacoEditorPlugin from "vite-plugin-monaco-editor"; -const proxyHost = process.env.PROXY_HOST || "localhost:5000"; +const proxyHost = process.env.PROXY_HOST || "1ocalhost:5000"; // https://vitejs.dev/config/ export default defineConfig({ From 3297cab3474396ed3006d98c7773149a41f24a79 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Mon, 22 Dec 2025 08:25:38 -0700 Subject: [PATCH 10/56] Refactor temperature reporting for detectors and implement Hailo temp reading (#21395) * Add Hailo temperature retrieval * Refactor `get_hailo_temps()` to use ctxmanager * Show Hailo temps in system UI * Move hailo_platform import to get_hailo_temps * Refactor temperatures calculations to use within detector block * Adjust webUI to handle new location --------- Co-authored-by: tigattack <10629864+tigattack@users.noreply.github.com> --- frigate/stats/util.py | 82 +++++++++++++++++++++---- frigate/util/services.py | 47 ++++++++++++++ web/src/types/stats.ts | 2 +- web/src/views/system/GeneralMetrics.tsx | 31 ++++------ 4 files changed, 129 insertions(+), 33 deletions(-) diff --git a/frigate/stats/util.py b/frigate/stats/util.py index 69291df0a..f0ed57eaa 100644 --- a/frigate/stats/util.py +++ b/frigate/stats/util.py @@ -22,6 +22,7 @@ from frigate.util.services import ( get_bandwidth_stats, get_cpu_stats, get_fs_type, + get_hailo_temps, get_intel_gpu_stats, get_jetson_stats, get_nvidia_gpu_stats, @@ -90,9 +91,76 @@ def get_temperatures() -> dict[str, float]: if temp is not None: temps[apex] = temp + # Get temperatures for Hailo devices + temps.update(get_hailo_temps()) + return temps +def get_detector_temperature( + detector_type: str, + detector_index_by_type: dict[str, int], +) -> Optional[float]: + """Get temperature for a specific detector based on its type.""" + if detector_type == "edgetpu": + # Get temperatures for all attached Corals + base = "/sys/class/apex/" + if os.path.isdir(base): + apex_devices = sorted(os.listdir(base)) + index = detector_index_by_type.get("edgetpu", 0) + if index < len(apex_devices): + apex_name = apex_devices[index] + temp = read_temperature(os.path.join(base, apex_name, "temp")) + if temp is not None: + return temp + elif detector_type == "hailo8l": + # Get temperatures for Hailo devices + hailo_temps = get_hailo_temps() + if hailo_temps: + hailo_device_names = sorted(hailo_temps.keys()) + index = detector_index_by_type.get("hailo8l", 0) + if index < len(hailo_device_names): + device_name = hailo_device_names[index] + return hailo_temps[device_name] + + return None + + +def get_detector_stats( + stats_tracking: StatsTrackingTypes, +) -> dict[str, dict[str, Any]]: + """Get stats for all detectors, including temperatures based on detector type.""" + detector_stats: dict[str, dict[str, Any]] = {} + detector_type_indices: dict[str, int] = {} + + for name, detector in stats_tracking["detectors"].items(): + pid = detector.detect_process.pid if detector.detect_process else None + detector_type = detector.detector_config.type + + # Keep track of the index for each detector type to match temperatures correctly + current_index = detector_type_indices.get(detector_type, 0) + detector_type_indices[detector_type] = current_index + 1 + + detector_stat = { + "inference_speed": round(detector.avg_inference_speed.value * 1000, 2), # type: ignore[attr-defined] + # issue https://github.com/python/typeshed/issues/8799 + # from mypy 0.981 onwards + "detection_start": detector.detection_start.value, # type: ignore[attr-defined] + # issue https://github.com/python/typeshed/issues/8799 + # from mypy 0.981 onwards + "pid": pid, + } + + temp = get_detector_temperature(detector_type, {detector_type: current_index}) + + if temp is not None: + detector_stat["temperature"] = round(temp, 1) + + detector_stats[name] = detector_stat + + return detector_stats + + def get_processing_stats( config: FrigateConfig, stats: dict[str, str], hwaccel_errors: list[str] ) -> None: @@ -318,18 +386,7 @@ def stats_snapshot( **connection_quality, } - stats["detectors"] = {} - for name, detector in stats_tracking["detectors"].items(): - pid = detector.detect_process.pid if detector.detect_process else None - stats["detectors"][name] = { - "inference_speed": round(detector.avg_inference_speed.value * 1000, 2), # type: ignore[attr-defined] - # issue https://github.com/python/typeshed/issues/8799 - # from mypy 0.981 onwards - "detection_start": detector.detection_start.value, # type: ignore[attr-defined] - # issue https://github.com/python/typeshed/issues/8799 - # from mypy 0.981 onwards - "pid": pid, - } + stats["detectors"] = get_detector_stats(stats_tracking) stats["camera_fps"] = round(total_camera_fps, 2) stats["process_fps"] = round(total_process_fps, 2) stats["skipped_fps"] = round(total_skipped_fps, 2) @@ -415,7 +472,6 @@ def stats_snapshot( "version": VERSION, "latest_version": stats_tracking["latest_frigate_version"], "storage": {}, - "temperatures": get_temperatures(), "last_updated": int(time.time()), } diff --git a/frigate/util/services.py b/frigate/util/services.py index 9420732be..28be620e3 100644 --- a/frigate/util/services.py +++ b/frigate/util/services.py @@ -556,6 +556,53 @@ def get_jetson_stats() -> Optional[dict[int, dict]]: return results +def get_hailo_temps() -> dict[str, float]: + """Get temperatures for Hailo devices.""" + try: + from hailo_platform import Device + except ModuleNotFoundError: + return {} + + temps = {} + + try: + device_ids = Device.scan() + for i, device_id in enumerate(device_ids): + try: + with Device(device_id) as device: + temp_info = device.control.get_chip_temperature() + + # Get board name and normalise it + identity = device.control.identify() + board_name = None + for line in str(identity).split("\n"): + if line.startswith("Board Name:"): + board_name = ( + line.split(":", 1)[1].strip().lower().replace("-", "") + ) + break + + if not board_name: + board_name = f"hailo{i}" + + # Use indexed name if multiple devices, otherwise just the board name + device_name = ( + f"{board_name}-{i}" if len(device_ids) > 1 else board_name + ) + + # ts1_temperature is also available, but appeared to be the same as ts0 in testing. + temps[device_name] = round(temp_info.ts0_temperature, 1) + except Exception as e: + logger.debug( + f"Failed to get temperature for Hailo device {device_id}: {e}" + ) + continue + except Exception as e: + logger.debug(f"Failed to scan for Hailo devices: {e}") + + return temps + + def ffprobe_stream(ffmpeg, path: str, detailed: bool = False) -> sp.CompletedProcess: """Run ffprobe on stream.""" clean_path = escape_special_characters(path) diff --git a/web/src/types/stats.ts b/web/src/types/stats.ts index 5432f3154..1fd38a1c3 100644 --- a/web/src/types/stats.ts +++ b/web/src/types/stats.ts @@ -41,6 +41,7 @@ export type DetectorStats = { detection_start: number; inference_speed: number; pid: number; + temperature?: number; }; export type EmbeddingsStats = { @@ -72,7 +73,6 @@ export type GpuInfo = "vainfo" | "nvinfo"; export type ServiceStats = { last_updated: number; storage: { [path: string]: StorageStats }; - temperatures: { [apex: string]: number }; uptime: number; latest_version: string; version: string; diff --git a/web/src/views/system/GeneralMetrics.tsx b/web/src/views/system/GeneralMetrics.tsx index f8ce64851..cd63594e8 100644 --- a/web/src/views/system/GeneralMetrics.tsx +++ b/web/src/views/system/GeneralMetrics.tsx @@ -127,13 +127,6 @@ export default function GeneralMetrics({ return undefined; } - if ( - statsHistory.length > 0 && - Object.keys(statsHistory[0].service.temperatures).length == 0 - ) { - return undefined; - } - const series: { [key: string]: { name: string; data: { x: number; y: number }[] }; } = {}; @@ -143,22 +136,22 @@ export default function GeneralMetrics({ return; } - Object.entries(stats.detectors).forEach(([key], cIdx) => { - if (!key.includes("coral")) { + Object.entries(stats.detectors).forEach(([key, detectorStats]) => { + if (detectorStats.temperature === undefined) { return; } - if (cIdx <= Object.keys(stats.service.temperatures).length) { - if (!(key in series)) { - series[key] = { - name: key, - data: [], - }; - } - - const temp = Object.values(stats.service.temperatures)[cIdx]; - series[key].data.push({ x: statsIdx + 1, y: Math.round(temp) }); + if (!(key in series)) { + series[key] = { + name: key, + data: [], + }; } + + series[key].data.push({ + x: statsIdx + 1, + y: Math.round(detectorStats.temperature), + }); }); }); From 3745f5ff937b9898358d66a82a113d24f3c92abd Mon Sep 17 00:00:00 2001 From: Andrew Roberts Date: Mon, 22 Dec 2025 11:10:40 -0500 Subject: [PATCH 11/56] Camera-specific hwaccel settings for timelapse exports (correct base) (#21386) * added hwaccel_args to camera.record.export config struct * populate camera.record.export.hwaccel_args with a cascade up to camera then global if 'auto' * use new hwaccel args in export * added documentation for camera-specific hwaccel export * fix c/p error * missed an import * fleshed out the docs and comments a bit * ruff lint * separated out the tips in the doc * fix documentation * fix and simplify reference config doc --- docs/docs/configuration/record.md | 6 +++++- docs/docs/configuration/reference.md | 7 +++++++ frigate/config/camera/record.py | 5 ++++- frigate/config/config.py | 8 ++++++++ frigate/record/export.py | 4 ++-- 5 files changed, 26 insertions(+), 4 deletions(-) diff --git a/docs/docs/configuration/record.md b/docs/docs/configuration/record.md index 4dfd8b77c..ddbf0f612 100644 --- a/docs/docs/configuration/record.md +++ b/docs/docs/configuration/record.md @@ -139,7 +139,11 @@ record: :::tip -When using `hwaccel_args` globally hardware encoding is used for time lapse generation. The encoder determines its own behavior so the resulting file size may be undesirably large. +When using `hwaccel_args`, hardware encoding is used for timelapse generation. This setting can be overridden for a specific camera (e.g., when camera resolution exceeds hardware encoder limits); set `cameras..record.export.hwaccel_args` with the appropriate settings. Using an unrecognized value or empty string will fall back to software encoding (libx264). + +:::tip + +The encoder determines its own behavior so the resulting file size may be undesirably large. To reduce the output file size the ffmpeg parameter `-qp n` can be utilized (where `n` stands for the value of the quantisation parameter). The value can be adjusted to get an acceptable tradeoff between quality and file size for the given scenario. ::: diff --git a/docs/docs/configuration/reference.md b/docs/docs/configuration/reference.md index 206d7012e..ad1695f22 100644 --- a/docs/docs/configuration/reference.md +++ b/docs/docs/configuration/reference.md @@ -534,6 +534,8 @@ record: # The -r (framerate) dictates how smooth the output video is. # So the args would be -vf setpts=0.02*PTS -r 30 in that case. timelapse_args: "-vf setpts=0.04*PTS -r 30" + # Optional: Global hardware acceleration settings for timelapse exports. (default: inherit) + hwaccel_args: auto # Optional: Recording Preview Settings preview: # Optional: Quality of recording preview (default: shown below). @@ -838,6 +840,11 @@ cameras: # Optional: camera specific output args (default: inherit) # output_args: + # Optional: camera specific hwaccel args for timelapse export (default: inherit) + # record: + # export: + # hwaccel_args: + # Optional: timeout for highest scoring image before allowing it # to be replaced by a newer image. (default: shown below) best_image_timeout: 60 diff --git a/frigate/config/camera/record.py b/frigate/config/camera/record.py index 09a7a84d5..90881f448 100644 --- a/frigate/config/camera/record.py +++ b/frigate/config/camera/record.py @@ -1,5 +1,5 @@ from enum import Enum -from typing import Optional +from typing import Optional, Union from pydantic import Field @@ -70,6 +70,9 @@ class RecordExportConfig(FrigateBaseModel): timelapse_args: str = Field( default=DEFAULT_TIME_LAPSE_FFMPEG_ARGS, title="Timelapse Args" ) + hwaccel_args: Union[str, list[str]] = Field( + default="auto", title="Export-specific FFmpeg hardware acceleration arguments." + ) class RecordConfig(FrigateBaseModel): diff --git a/frigate/config/config.py b/frigate/config/config.py index a26d4c50e..370c89458 100644 --- a/frigate/config/config.py +++ b/frigate/config/config.py @@ -525,6 +525,14 @@ class FrigateConfig(FrigateBaseModel): if camera_config.ffmpeg.hwaccel_args == "auto": camera_config.ffmpeg.hwaccel_args = self.ffmpeg.hwaccel_args + # Resolve export hwaccel_args: camera export -> camera ffmpeg -> global ffmpeg + # This allows per-camera override for exports (e.g., when camera resolution + # exceeds hardware encoder limits) + if camera_config.record.export.hwaccel_args == "auto": + camera_config.record.export.hwaccel_args = ( + camera_config.ffmpeg.hwaccel_args + ) + for input in camera_config.ffmpeg.inputs: need_detect_dimensions = "detect" in input.roles and ( camera_config.detect.height is None diff --git a/frigate/record/export.py b/frigate/record/export.py index 9a2a77ebf..9a8b5dbdb 100644 --- a/frigate/record/export.py +++ b/frigate/record/export.py @@ -228,7 +228,7 @@ class RecordingExporter(threading.Thread): ffmpeg_cmd = ( parse_preset_hardware_acceleration_encode( self.config.ffmpeg.ffmpeg_path, - self.config.ffmpeg.hwaccel_args, + self.config.cameras[self.camera].record.export.hwaccel_args, f"-an {ffmpeg_input}", f"{self.config.cameras[self.camera].record.export.timelapse_args} -movflags +faststart", EncodeTypeEnum.timelapse, @@ -319,7 +319,7 @@ class RecordingExporter(threading.Thread): ffmpeg_cmd = ( parse_preset_hardware_acceleration_encode( self.config.ffmpeg.ffmpeg_path, - self.config.ffmpeg.hwaccel_args, + self.config.cameras[self.camera].record.export.hwaccel_args, f"{TIMELAPSE_DATA_INPUT_ARGS} {ffmpeg_input}", f"{self.config.cameras[self.camera].record.export.timelapse_args} -movflags +faststart {video_path}", EncodeTypeEnum.timelapse, From 6b77952b72a067126b4f24e1ead2aeae9d9291f1 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Wed, 31 Dec 2025 13:32:07 -0700 Subject: [PATCH 12/56] Add support for GPU and NPU temperatures (#21495) * Add rockchip temps * Add support for GPU and NPU temperatures in the frontend * Add support for Nvidia temperature * Improve separation * Adjust graph scaling --- frigate/stats/util.py | 5 + frigate/util/services.py | 42 ++++++-- web/public/locales/en/views/system.json | 2 + web/src/types/stats.ts | 2 + web/src/views/system/GeneralMetrics.tsx | 122 +++++++++++++++++++++++- 5 files changed, 166 insertions(+), 7 deletions(-) diff --git a/frigate/stats/util.py b/frigate/stats/util.py index f0ed57eaa..f4f91f83f 100644 --- a/frigate/stats/util.py +++ b/frigate/stats/util.py @@ -122,6 +122,10 @@ def get_detector_temperature( if index < len(hailo_device_names): device_name = hailo_device_names[index] return hailo_temps[device_name] + elif detector_type == "rknn": + # Rockchip temperatures are handled by the GPU / NPU stats + # as there are not detector specific temperatures + pass return None @@ -241,6 +245,7 @@ async def set_gpu_stats( "mem": str(round(float(nvidia_usage[i]["mem"]), 2)) + "%", "enc": str(round(float(nvidia_usage[i]["enc"]), 2)) + "%", "dec": str(round(float(nvidia_usage[i]["dec"]), 2)) + "%", + "temp": str(nvidia_usage[i]["temp"]), } else: diff --git a/frigate/util/services.py b/frigate/util/services.py index 28be620e3..19ec4efdf 100644 --- a/frigate/util/services.py +++ b/frigate/util/services.py @@ -417,12 +417,12 @@ def get_openvino_npu_stats() -> Optional[dict[str, str]]: else: usage = 0.0 - return {"npu": f"{round(usage, 2)}", "mem": "-"} + return {"npu": f"{round(usage, 2)}", "mem": "-%"} except (FileNotFoundError, PermissionError, ValueError): return None -def get_rockchip_gpu_stats() -> Optional[dict[str, str]]: +def get_rockchip_gpu_stats() -> Optional[dict[str, str | float]]: """Get GPU stats using rk.""" try: with open("/sys/kernel/debug/rkrga/load", "r") as f: @@ -440,7 +440,16 @@ def get_rockchip_gpu_stats() -> Optional[dict[str, str]]: return None average_load = f"{round(sum(load_values) / len(load_values), 2)}%" - return {"gpu": average_load, "mem": "-"} + stats: dict[str, str | float] = {"gpu": average_load, "mem": "-%"} + + try: + with open("/sys/class/thermal/thermal_zone5/temp", "r") as f: + line = f.readline().strip() + stats["temp"] = round(int(line) / 1000, 1) + except (FileNotFoundError, OSError, ValueError): + pass + + return stats def get_rockchip_npu_stats() -> Optional[dict[str, float | str]]: @@ -463,13 +472,25 @@ def get_rockchip_npu_stats() -> Optional[dict[str, float | str]]: percentages = [int(load) for load in core_loads] mean = round(sum(percentages) / len(percentages), 2) - return {"npu": mean, "mem": "-"} + stats: dict[str, float | str] = {"npu": mean, "mem": "-%"} + + try: + with open("/sys/class/thermal/thermal_zone6/temp", "r") as f: + line = f.readline().strip() + stats["temp"] = round(int(line) / 1000, 1) + except (FileNotFoundError, OSError, ValueError): + pass + + return stats -def try_get_info(f, h, default="N/A"): +def try_get_info(f, h, default="N/A", sensor=None): try: if h: - v = f(h) + if sensor is not None: + v = f(h, sensor) + else: + v = f(h) else: v = f() except nvml.NVMLError_NotSupported: @@ -498,6 +519,9 @@ def get_nvidia_gpu_stats() -> dict[int, dict]: util = try_get_info(nvml.nvmlDeviceGetUtilizationRates, handle) enc = try_get_info(nvml.nvmlDeviceGetEncoderUtilization, handle) dec = try_get_info(nvml.nvmlDeviceGetDecoderUtilization, handle) + temp = try_get_info( + nvml.nvmlDeviceGetTemperature, handle, default=None, sensor=0 + ) pstate = try_get_info(nvml.nvmlDeviceGetPowerState, handle, default=None) if util != "N/A": @@ -510,6 +534,11 @@ def get_nvidia_gpu_stats() -> dict[int, dict]: else: gpu_mem_util = -1 + if temp != "N/A" and temp is not None: + temp = float(temp) + else: + temp = None + if enc != "N/A": enc_util = enc[0] else: @@ -527,6 +556,7 @@ def get_nvidia_gpu_stats() -> dict[int, dict]: "enc": enc_util, "dec": dec_util, "pstate": pstate or "unknown", + "temp": temp, } except Exception: pass diff --git a/web/public/locales/en/views/system.json b/web/public/locales/en/views/system.json index 202e0c70f..8ddbc03e1 100644 --- a/web/public/locales/en/views/system.json +++ b/web/public/locales/en/views/system.json @@ -51,6 +51,7 @@ "gpuMemory": "GPU Memory", "gpuEncoder": "GPU Encoder", "gpuDecoder": "GPU Decoder", + "gpuTemperature": "GPU Temperature", "gpuInfo": { "vainfoOutput": { "title": "Vainfo Output", @@ -77,6 +78,7 @@ }, "npuUsage": "NPU Usage", "npuMemory": "NPU Memory", + "npuTemperature": "NPU Temperature", "intelGpuWarning": { "title": "Intel GPU Stats Warning", "message": "GPU stats unavailable", diff --git a/web/src/types/stats.ts b/web/src/types/stats.ts index 1fd38a1c3..8b22849be 100644 --- a/web/src/types/stats.ts +++ b/web/src/types/stats.ts @@ -61,11 +61,13 @@ export type GpuStats = { enc?: string; dec?: string; pstate?: string; + temp?: number; }; export type NpuStats = { npu: number; mem: string; + temp?: number; }; export type GpuInfo = "vainfo" | "nvinfo"; diff --git a/web/src/views/system/GeneralMetrics.tsx b/web/src/views/system/GeneralMetrics.tsx index cd63594e8..cdf35c28b 100644 --- a/web/src/views/system/GeneralMetrics.tsx +++ b/web/src/views/system/GeneralMetrics.tsx @@ -368,6 +368,40 @@ export default function GeneralMetrics({ return Object.keys(series).length > 0 ? Object.values(series) : undefined; }, [statsHistory]); + const gpuTempSeries = useMemo(() => { + if (!statsHistory) { + return []; + } + + const series: { + [key: string]: { name: string; data: { x: number; y: number }[] }; + } = {}; + let hasValidGpu = false; + + statsHistory.forEach((stats, statsIdx) => { + if (!stats) { + return; + } + + Object.entries(stats.gpu_usages || {}).forEach(([key, stats]) => { + if (!(key in series)) { + series[key] = { name: key, data: [] }; + } + + if (stats.temp !== undefined) { + hasValidGpu = true; + series[key].data.push({ x: statsIdx + 1, y: stats.temp }); + } + }); + }); + + if (!hasValidGpu) { + return []; + } + + return Object.keys(series).length > 0 ? Object.values(series) : undefined; + }, [statsHistory]); + // Check if Intel GPU has all 0% usage values (known bug) const showIntelGpuWarning = useMemo(() => { if (!statsHistory || statsHistory.length < 3) { @@ -448,6 +482,40 @@ export default function GeneralMetrics({ return Object.keys(series).length > 0 ? Object.values(series) : []; }, [statsHistory]); + const npuTempSeries = useMemo(() => { + if (!statsHistory) { + return []; + } + + const series: { + [key: string]: { name: string; data: { x: number; y: number }[] }; + } = {}; + let hasValidNpu = false; + + statsHistory.forEach((stats, statsIdx) => { + if (!stats) { + return; + } + + Object.entries(stats.npu_usages || {}).forEach(([key, stats]) => { + if (!(key in series)) { + series[key] = { name: key, data: [] }; + } + + if (stats.temp !== undefined) { + hasValidNpu = true; + series[key].data.push({ x: statsIdx + 1, y: stats.temp }); + } + }); + }); + + if (!hasValidNpu) { + return []; + } + + return Object.keys(series).length > 0 ? Object.values(series) : undefined; + }, [statsHistory]); + // other processes stats const hardwareType = useMemo(() => { @@ -669,7 +737,11 @@ export default function GeneralMetrics({
{statsHistory[0]?.gpu_usages && ( @@ -804,6 +876,30 @@ export default function GeneralMetrics({ ) : ( )} + {statsHistory.length != 0 ? ( + <> + {gpuTempSeries && gpuTempSeries?.length != 0 && ( +
+
+ {t("general.hardwareInfo.gpuTemperature")} +
+ {gpuTempSeries.map((series) => ( + + ))} +
+ )} + + ) : ( + + )} {statsHistory[0]?.npu_usages && ( <> @@ -827,6 +923,30 @@ export default function GeneralMetrics({ ) : ( )} + {statsHistory.length != 0 ? ( + <> + {npuTempSeries && npuTempSeries?.length != 0 && ( +
+
+ {t("general.hardwareInfo.npuTemperature")} +
+ {npuTempSeries.map((series) => ( + + ))} +
+ )} + + ) : ( + + )} )} From 9ef8b70208b40097c9295049f91cfb77bfd5ac38 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Sat, 3 Jan 2026 08:03:33 -0700 Subject: [PATCH 13/56] Exports Improvements (#21521) * Add images to case folder view * Add ability to select case in export dialog * Add to mobile review too --- web/public/locales/en/components/dialog.json | 4 ++ web/src/components/card/ExportCard.tsx | 27 ++++++-- web/src/components/overlay/ExportDialog.tsx | 63 ++++++++++++++++++- .../overlay/MobileReviewSettingsDrawer.tsx | 10 ++- 4 files changed, 97 insertions(+), 7 deletions(-) diff --git a/web/public/locales/en/components/dialog.json b/web/public/locales/en/components/dialog.json index 91ff38d82..9a6f68daf 100644 --- a/web/public/locales/en/components/dialog.json +++ b/web/public/locales/en/components/dialog.json @@ -49,6 +49,10 @@ "name": { "placeholder": "Name the Export" }, + "case": { + "label": "Case", + "placeholder": "Select a case" + }, "select": "Select", "export": "Export", "selectOrExport": "Select or Export", diff --git a/web/src/components/card/ExportCard.tsx b/web/src/components/card/ExportCard.tsx index fc7964c18..c8d9c4c65 100644 --- a/web/src/components/card/ExportCard.tsx +++ b/web/src/components/card/ExportCard.tsx @@ -1,6 +1,6 @@ import ActivityIndicator from "../indicators/activity-indicator"; import { Button } from "../ui/button"; -import { useCallback, useState } from "react"; +import { useCallback, useMemo, useState } from "react"; import { isMobile } from "react-device-detect"; import { FiMoreVertical } from "react-icons/fi"; import { Skeleton } from "../ui/skeleton"; @@ -32,18 +32,37 @@ import { FaFolder } from "react-icons/fa"; type CaseCardProps = { className: string; exportCase: ExportCase; + exports: Export[]; onSelect: () => void; }; -export function CaseCard({ className, exportCase, onSelect }: CaseCardProps) { +export function CaseCard({ + className, + exportCase, + exports, + onSelect, +}: CaseCardProps) { + const firstExport = useMemo( + () => exports.find((exp) => exp.thumb_path && exp.thumb_path.length > 0), + [exports], + ); + return (
onSelect()} > -
+ {firstExport && ( + + )} +
+
{exportCase.name}
diff --git a/web/src/components/overlay/ExportDialog.tsx b/web/src/components/overlay/ExportDialog.tsx index b8b5b9911..738aa689e 100644 --- a/web/src/components/overlay/ExportDialog.tsx +++ b/web/src/components/overlay/ExportDialog.tsx @@ -22,7 +22,14 @@ import useSWR from "swr"; import { FrigateConfig } from "@/types/frigateConfig"; import { Popover, PopoverContent, PopoverTrigger } from "../ui/popover"; import { TimezoneAwareCalendar } from "./ReviewActivityCalendar"; -import { SelectSeparator } from "../ui/select"; +import { + Select, + SelectContent, + SelectItem, + SelectSeparator, + SelectTrigger, + SelectValue, +} from "../ui/select"; import { isDesktop, isIOS, isMobile } from "react-device-detect"; import { Drawer, DrawerContent, DrawerTrigger } from "../ui/drawer"; import SaveExportOverlay from "./SaveExportOverlay"; @@ -31,6 +38,7 @@ import { baseUrl } from "@/api/baseUrl"; import { cn } from "@/lib/utils"; import { GenericVideoPlayer } from "../player/GenericVideoPlayer"; import { useTranslation } from "react-i18next"; +import { ExportCase } from "@/types/export"; const EXPORT_OPTIONS = [ "1", @@ -67,6 +75,9 @@ export default function ExportDialog({ }: ExportDialogProps) { const { t } = useTranslation(["components/dialog"]); const [name, setName] = useState(""); + const [selectedCaseId, setSelectedCaseId] = useState( + undefined, + ); const onStartExport = useCallback(() => { if (!range) { @@ -89,6 +100,7 @@ export default function ExportDialog({ { playback: "realtime", name, + export_case_id: selectedCaseId || undefined, }, ) .then((response) => { @@ -102,6 +114,7 @@ export default function ExportDialog({ ), }); setName(""); + setSelectedCaseId(undefined); setRange(undefined); setMode("none"); } @@ -118,10 +131,11 @@ export default function ExportDialog({ { position: "top-center" }, ); }); - }, [camera, name, range, setRange, setName, setMode, t]); + }, [camera, name, range, selectedCaseId, setRange, setName, setMode, t]); const handleCancel = useCallback(() => { setName(""); + setSelectedCaseId(undefined); setMode("none"); setRange(undefined); }, [setMode, setRange]); @@ -190,8 +204,10 @@ export default function ExportDialog({ currentTime={currentTime} range={range} name={name} + selectedCaseId={selectedCaseId} onStartExport={onStartExport} setName={setName} + setSelectedCaseId={setSelectedCaseId} setRange={setRange} setMode={setMode} onCancel={handleCancel} @@ -207,8 +223,10 @@ type ExportContentProps = { currentTime: number; range?: TimeRange; name: string; + selectedCaseId?: string; onStartExport: () => void; setName: (name: string) => void; + setSelectedCaseId: (caseId: string | undefined) => void; setRange: (range: TimeRange | undefined) => void; setMode: (mode: ExportMode) => void; onCancel: () => void; @@ -218,14 +236,17 @@ export function ExportContent({ currentTime, range, name, + selectedCaseId, onStartExport, setName, + setSelectedCaseId, setRange, setMode, onCancel, }: ExportContentProps) { const { t } = useTranslation(["components/dialog"]); const [selectedOption, setSelectedOption] = useState("1"); + const { data: cases } = useSWR("cases"); const onSelectTime = useCallback( (option: ExportOption) => { @@ -320,6 +341,44 @@ export function ExportContent({ value={name} onChange={(e) => setName(e.target.value)} /> +
+ + +
{isDesktop && } ( + undefined, + ); const onStartExport = useCallback(() => { if (!range) { toast.error(t("toast.error.noValidTimeSelected"), { @@ -96,6 +99,7 @@ export default function MobileReviewSettingsDrawer({ { playback: "realtime", name, + export_case_id: selectedCaseId || undefined, }, ) .then((response) => { @@ -114,6 +118,7 @@ export default function MobileReviewSettingsDrawer({ }, ); setName(""); + setSelectedCaseId(undefined); setRange(undefined); setMode("none"); } @@ -133,7 +138,7 @@ export default function MobileReviewSettingsDrawer({ }, ); }); - }, [camera, name, range, setRange, setName, setMode, t]); + }, [camera, name, range, selectedCaseId, setRange, setName, setMode, t]); // filters @@ -200,8 +205,10 @@ export default function MobileReviewSettingsDrawer({ currentTime={currentTime} range={range} name={name} + selectedCaseId={selectedCaseId} onStartExport={onStartExport} setName={setName} + setSelectedCaseId={setSelectedCaseId} setRange={setRange} setMode={(mode) => { setMode(mode); @@ -213,6 +220,7 @@ export default function MobileReviewSettingsDrawer({ onCancel={() => { setMode("none"); setRange(undefined); + setSelectedCaseId(undefined); setDrawerMode("select"); }} /> From 39ad565f811f2272e7c57cce2dde1fa68cccd00d Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Sat, 3 Jan 2026 08:19:41 -0700 Subject: [PATCH 14/56] Add API to handle deleting recordings (#21520) * Add recording delete API * Re-organize recordings apis * Fix import * Consolidate query types --- .../api/defs/query/media_query_parameters.py | 15 +- .../defs/query/recordings_query_parameters.py | 21 + frigate/api/defs/tags.py | 11 +- frigate/api/fastapi_app.py | 2 + frigate/api/media.py | 337 +----------- frigate/api/record.py | 479 ++++++++++++++++++ 6 files changed, 511 insertions(+), 354 deletions(-) create mode 100644 frigate/api/defs/query/recordings_query_parameters.py create mode 100644 frigate/api/record.py diff --git a/frigate/api/defs/query/media_query_parameters.py b/frigate/api/defs/query/media_query_parameters.py index a16f0d53f..7438f2f2f 100644 --- a/frigate/api/defs/query/media_query_parameters.py +++ b/frigate/api/defs/query/media_query_parameters.py @@ -1,8 +1,7 @@ from enum import Enum -from typing import Optional, Union +from typing import Optional from pydantic import BaseModel -from pydantic.json_schema import SkipJsonSchema class Extension(str, Enum): @@ -48,15 +47,3 @@ class MediaMjpegFeedQueryParams(BaseModel): mask: Optional[int] = None motion: Optional[int] = None regions: Optional[int] = None - - -class MediaRecordingsSummaryQueryParams(BaseModel): - timezone: str = "utc" - cameras: Optional[str] = "all" - - -class MediaRecordingsAvailabilityQueryParams(BaseModel): - cameras: str = "all" - before: Union[float, SkipJsonSchema[None]] = None - after: Union[float, SkipJsonSchema[None]] = None - scale: int = 30 diff --git a/frigate/api/defs/query/recordings_query_parameters.py b/frigate/api/defs/query/recordings_query_parameters.py new file mode 100644 index 000000000..d4f1b0a7b --- /dev/null +++ b/frigate/api/defs/query/recordings_query_parameters.py @@ -0,0 +1,21 @@ +from typing import Optional, Union + +from pydantic import BaseModel +from pydantic.json_schema import SkipJsonSchema + + +class MediaRecordingsSummaryQueryParams(BaseModel): + timezone: str = "utc" + cameras: Optional[str] = "all" + + +class MediaRecordingsAvailabilityQueryParams(BaseModel): + cameras: str = "all" + before: Union[float, SkipJsonSchema[None]] = None + after: Union[float, SkipJsonSchema[None]] = None + scale: int = 30 + + +class RecordingsDeleteQueryParams(BaseModel): + keep: Optional[str] = None + cameras: Optional[str] = "all" diff --git a/frigate/api/defs/tags.py b/frigate/api/defs/tags.py index f804385d1..20e4ac31b 100644 --- a/frigate/api/defs/tags.py +++ b/frigate/api/defs/tags.py @@ -3,13 +3,14 @@ from enum import Enum class Tags(Enum): app = "App" + auth = "Auth" camera = "Camera" - preview = "Preview" + events = "Events" + export = "Export" + classification = "Classification" logs = "Logs" media = "Media" notifications = "Notifications" + preview = "Preview" + recordings = "Recordings" review = "Review" - export = "Export" - events = "Events" - classification = "Classification" - auth = "Auth" diff --git a/frigate/api/fastapi_app.py b/frigate/api/fastapi_app.py index 48c97dfaf..27d844b8a 100644 --- a/frigate/api/fastapi_app.py +++ b/frigate/api/fastapi_app.py @@ -22,6 +22,7 @@ from frigate.api import ( media, notification, preview, + record, review, ) from frigate.api.auth import get_jwt_secret, limiter, require_admin_by_default @@ -128,6 +129,7 @@ def create_fastapi_app( app.include_router(export.router) app.include_router(event.router) app.include_router(media.router) + app.include_router(record.router) # App Properties app.frigate_config = frigate_config app.embeddings = embeddings diff --git a/frigate/api/media.py b/frigate/api/media.py index 971bfef83..b488ba360 100644 --- a/frigate/api/media.py +++ b/frigate/api/media.py @@ -8,9 +8,8 @@ import os import subprocess as sp import time from datetime import datetime, timedelta, timezone -from functools import reduce from pathlib import Path as FilePath -from typing import Any, List +from typing import Any from urllib.parse import unquote import cv2 @@ -19,12 +18,11 @@ import pytz from fastapi import APIRouter, Depends, Path, Query, Request, Response from fastapi.responses import FileResponse, JSONResponse, StreamingResponse from pathvalidate import sanitize_filename -from peewee import DoesNotExist, fn, operator +from peewee import DoesNotExist, fn from tzlocal import get_localzone_name from frigate.api.auth import ( allow_any_authenticated, - get_allowed_cameras_for_filter, require_camera_access, ) from frigate.api.defs.query.media_query_parameters import ( @@ -32,8 +30,6 @@ from frigate.api.defs.query.media_query_parameters import ( MediaEventsSnapshotQueryParams, MediaLatestFrameQueryParams, MediaMjpegFeedQueryParams, - MediaRecordingsAvailabilityQueryParams, - MediaRecordingsSummaryQueryParams, ) from frigate.api.defs.tags import Tags from frigate.camera.state import CameraState @@ -44,13 +40,11 @@ from frigate.const import ( INSTALL_DIR, MAX_SEGMENT_DURATION, PREVIEW_FRAME_TYPE, - RECORD_DIR, ) from frigate.models import Event, Previews, Recordings, Regions, ReviewSegment from frigate.track.object_processing import TrackedObjectProcessor from frigate.util.file import get_event_thumbnail_bytes from frigate.util.image import get_image_from_recording -from frigate.util.time import get_dst_transitions logger = logging.getLogger(__name__) @@ -397,333 +391,6 @@ async def submit_recording_snapshot_to_plus( ) -@router.get("/recordings/storage", dependencies=[Depends(allow_any_authenticated())]) -def get_recordings_storage_usage(request: Request): - recording_stats = request.app.stats_emitter.get_latest_stats()["service"][ - "storage" - ][RECORD_DIR] - - if not recording_stats: - return JSONResponse({}) - - total_mb = recording_stats["total"] - - camera_usages: dict[str, dict] = ( - request.app.storage_maintainer.calculate_camera_usages() - ) - - for camera_name in camera_usages.keys(): - if camera_usages.get(camera_name, {}).get("usage"): - camera_usages[camera_name]["usage_percent"] = ( - camera_usages.get(camera_name, {}).get("usage", 0) / total_mb - ) * 100 - - return JSONResponse(content=camera_usages) - - -@router.get("/recordings/summary", dependencies=[Depends(allow_any_authenticated())]) -def all_recordings_summary( - request: Request, - params: MediaRecordingsSummaryQueryParams = Depends(), - allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter), -): - """Returns true/false by day indicating if recordings exist""" - - cameras = params.cameras - if cameras != "all": - requested = set(unquote(cameras).split(",")) - filtered = requested.intersection(allowed_cameras) - if not filtered: - return JSONResponse(content={}) - camera_list = list(filtered) - else: - camera_list = allowed_cameras - - time_range_query = ( - Recordings.select( - fn.MIN(Recordings.start_time).alias("min_time"), - fn.MAX(Recordings.start_time).alias("max_time"), - ) - .where(Recordings.camera << camera_list) - .dicts() - .get() - ) - - min_time = time_range_query.get("min_time") - max_time = time_range_query.get("max_time") - - if min_time is None or max_time is None: - return JSONResponse(content={}) - - dst_periods = get_dst_transitions(params.timezone, min_time, max_time) - - days: dict[str, bool] = {} - - for period_start, period_end, period_offset in dst_periods: - hours_offset = int(period_offset / 60 / 60) - minutes_offset = int(period_offset / 60 - hours_offset * 60) - period_hour_modifier = f"{hours_offset} hour" - period_minute_modifier = f"{minutes_offset} minute" - - period_query = ( - Recordings.select( - fn.strftime( - "%Y-%m-%d", - fn.datetime( - Recordings.start_time, - "unixepoch", - period_hour_modifier, - period_minute_modifier, - ), - ).alias("day") - ) - .where( - (Recordings.camera << camera_list) - & (Recordings.end_time >= period_start) - & (Recordings.start_time <= period_end) - ) - .group_by( - fn.strftime( - "%Y-%m-%d", - fn.datetime( - Recordings.start_time, - "unixepoch", - period_hour_modifier, - period_minute_modifier, - ), - ) - ) - .order_by(Recordings.start_time.desc()) - .namedtuples() - ) - - for g in period_query: - days[g.day] = True - - return JSONResponse(content=dict(sorted(days.items()))) - - -@router.get( - "/{camera_name}/recordings/summary", dependencies=[Depends(require_camera_access)] -) -async def recordings_summary(camera_name: str, timezone: str = "utc"): - """Returns hourly summary for recordings of given camera""" - - time_range_query = ( - Recordings.select( - fn.MIN(Recordings.start_time).alias("min_time"), - fn.MAX(Recordings.start_time).alias("max_time"), - ) - .where(Recordings.camera == camera_name) - .dicts() - .get() - ) - - min_time = time_range_query.get("min_time") - max_time = time_range_query.get("max_time") - - days: dict[str, dict] = {} - - if min_time is None or max_time is None: - return JSONResponse(content=list(days.values())) - - dst_periods = get_dst_transitions(timezone, min_time, max_time) - - for period_start, period_end, period_offset in dst_periods: - hours_offset = int(period_offset / 60 / 60) - minutes_offset = int(period_offset / 60 - hours_offset * 60) - period_hour_modifier = f"{hours_offset} hour" - period_minute_modifier = f"{minutes_offset} minute" - - recording_groups = ( - Recordings.select( - fn.strftime( - "%Y-%m-%d %H", - fn.datetime( - Recordings.start_time, - "unixepoch", - period_hour_modifier, - period_minute_modifier, - ), - ).alias("hour"), - fn.SUM(Recordings.duration).alias("duration"), - fn.SUM(Recordings.motion).alias("motion"), - fn.SUM(Recordings.objects).alias("objects"), - ) - .where( - (Recordings.camera == camera_name) - & (Recordings.end_time >= period_start) - & (Recordings.start_time <= period_end) - ) - .group_by((Recordings.start_time + period_offset).cast("int") / 3600) - .order_by(Recordings.start_time.desc()) - .namedtuples() - ) - - event_groups = ( - Event.select( - fn.strftime( - "%Y-%m-%d %H", - fn.datetime( - Event.start_time, - "unixepoch", - period_hour_modifier, - period_minute_modifier, - ), - ).alias("hour"), - fn.COUNT(Event.id).alias("count"), - ) - .where(Event.camera == camera_name, Event.has_clip) - .where( - (Event.start_time >= period_start) & (Event.start_time <= period_end) - ) - .group_by((Event.start_time + period_offset).cast("int") / 3600) - .namedtuples() - ) - - event_map = {g.hour: g.count for g in event_groups} - - for recording_group in recording_groups: - parts = recording_group.hour.split() - hour = parts[1] - day = parts[0] - events_count = event_map.get(recording_group.hour, 0) - hour_data = { - "hour": hour, - "events": events_count, - "motion": recording_group.motion, - "objects": recording_group.objects, - "duration": round(recording_group.duration), - } - if day in days: - # merge counts if already present (edge-case at DST boundary) - days[day]["events"] += events_count or 0 - days[day]["hours"].append(hour_data) - else: - days[day] = { - "events": events_count or 0, - "hours": [hour_data], - "day": day, - } - - return JSONResponse(content=list(days.values())) - - -@router.get("/{camera_name}/recordings", dependencies=[Depends(require_camera_access)]) -async def recordings( - camera_name: str, - after: float = (datetime.now() - timedelta(hours=1)).timestamp(), - before: float = datetime.now().timestamp(), -): - """Return specific camera recordings between the given 'after'/'end' times. If not provided the last hour will be used""" - recordings = ( - Recordings.select( - Recordings.id, - Recordings.start_time, - Recordings.end_time, - Recordings.segment_size, - Recordings.motion, - Recordings.objects, - Recordings.duration, - ) - .where( - Recordings.camera == camera_name, - Recordings.end_time >= after, - Recordings.start_time <= before, - ) - .order_by(Recordings.start_time) - .dicts() - .iterator() - ) - - return JSONResponse(content=list(recordings)) - - -@router.get( - "/recordings/unavailable", - response_model=list[dict], - dependencies=[Depends(allow_any_authenticated())], -) -async def no_recordings( - request: Request, - params: MediaRecordingsAvailabilityQueryParams = Depends(), - allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter), -): - """Get time ranges with no recordings.""" - cameras = params.cameras - if cameras != "all": - requested = set(unquote(cameras).split(",")) - filtered = requested.intersection(allowed_cameras) - if not filtered: - return JSONResponse(content=[]) - cameras = ",".join(filtered) - else: - cameras = allowed_cameras - - before = params.before or datetime.datetime.now().timestamp() - after = ( - params.after - or (datetime.datetime.now() - datetime.timedelta(hours=1)).timestamp() - ) - scale = params.scale - - clauses = [(Recordings.end_time >= after) & (Recordings.start_time <= before)] - if cameras != "all": - camera_list = cameras.split(",") - clauses.append((Recordings.camera << camera_list)) - else: - camera_list = allowed_cameras - - # Get recording start times - data: list[Recordings] = ( - Recordings.select(Recordings.start_time, Recordings.end_time) - .where(reduce(operator.and_, clauses)) - .order_by(Recordings.start_time.asc()) - .dicts() - .iterator() - ) - - # Convert recordings to list of (start, end) tuples - recordings = [(r["start_time"], r["end_time"]) for r in data] - - # Iterate through time segments and check if each has any recording - no_recording_segments = [] - current = after - current_gap_start = None - - while current < before: - segment_end = min(current + scale, before) - - # Check if this segment overlaps with any recording - has_recording = any( - rec_start < segment_end and rec_end > current - for rec_start, rec_end in recordings - ) - - if not has_recording: - # This segment has no recordings - if current_gap_start is None: - current_gap_start = current # Start a new gap - else: - # This segment has recordings - if current_gap_start is not None: - # End the current gap and append it - no_recording_segments.append( - {"start_time": int(current_gap_start), "end_time": int(current)} - ) - current_gap_start = None - - current = segment_end - - # Append the last gap if it exists - if current_gap_start is not None: - no_recording_segments.append( - {"start_time": int(current_gap_start), "end_time": int(before)} - ) - - return JSONResponse(content=no_recording_segments) - - @router.get( "/{camera_name}/start/{start_ts}/end/{end_ts}/clip.mp4", dependencies=[Depends(require_camera_access)], diff --git a/frigate/api/record.py b/frigate/api/record.py new file mode 100644 index 000000000..789aa4a80 --- /dev/null +++ b/frigate/api/record.py @@ -0,0 +1,479 @@ +"""Recording APIs.""" + +import logging +from datetime import datetime, timedelta +from functools import reduce +from pathlib import Path +from typing import List +from urllib.parse import unquote + +from fastapi import APIRouter, Depends, Request +from fastapi import Path as PathParam +from fastapi.responses import JSONResponse +from peewee import fn, operator + +from frigate.api.auth import ( + allow_any_authenticated, + get_allowed_cameras_for_filter, + require_camera_access, + require_role, +) +from frigate.api.defs.query.recordings_query_parameters import ( + MediaRecordingsAvailabilityQueryParams, + MediaRecordingsSummaryQueryParams, + RecordingsDeleteQueryParams, +) +from frigate.api.defs.response.generic_response import GenericResponse +from frigate.api.defs.tags import Tags +from frigate.const import RECORD_DIR +from frigate.models import Event, Recordings +from frigate.util.time import get_dst_transitions + +logger = logging.getLogger(__name__) + +router = APIRouter(tags=[Tags.recordings]) + + +@router.get("/recordings/storage", dependencies=[Depends(allow_any_authenticated())]) +def get_recordings_storage_usage(request: Request): + recording_stats = request.app.stats_emitter.get_latest_stats()["service"][ + "storage" + ][RECORD_DIR] + + if not recording_stats: + return JSONResponse({}) + + total_mb = recording_stats["total"] + + camera_usages: dict[str, dict] = ( + request.app.storage_maintainer.calculate_camera_usages() + ) + + for camera_name in camera_usages.keys(): + if camera_usages.get(camera_name, {}).get("usage"): + camera_usages[camera_name]["usage_percent"] = ( + camera_usages.get(camera_name, {}).get("usage", 0) / total_mb + ) * 100 + + return JSONResponse(content=camera_usages) + + +@router.get("/recordings/summary", dependencies=[Depends(allow_any_authenticated())]) +def all_recordings_summary( + request: Request, + params: MediaRecordingsSummaryQueryParams = Depends(), + allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter), +): + """Returns true/false by day indicating if recordings exist""" + + cameras = params.cameras + if cameras != "all": + requested = set(unquote(cameras).split(",")) + filtered = requested.intersection(allowed_cameras) + if not filtered: + return JSONResponse(content={}) + camera_list = list(filtered) + else: + camera_list = allowed_cameras + + time_range_query = ( + Recordings.select( + fn.MIN(Recordings.start_time).alias("min_time"), + fn.MAX(Recordings.start_time).alias("max_time"), + ) + .where(Recordings.camera << camera_list) + .dicts() + .get() + ) + + min_time = time_range_query.get("min_time") + max_time = time_range_query.get("max_time") + + if min_time is None or max_time is None: + return JSONResponse(content={}) + + dst_periods = get_dst_transitions(params.timezone, min_time, max_time) + + days: dict[str, bool] = {} + + for period_start, period_end, period_offset in dst_periods: + hours_offset = int(period_offset / 60 / 60) + minutes_offset = int(period_offset / 60 - hours_offset * 60) + period_hour_modifier = f"{hours_offset} hour" + period_minute_modifier = f"{minutes_offset} minute" + + period_query = ( + Recordings.select( + fn.strftime( + "%Y-%m-%d", + fn.datetime( + Recordings.start_time, + "unixepoch", + period_hour_modifier, + period_minute_modifier, + ), + ).alias("day") + ) + .where( + (Recordings.camera << camera_list) + & (Recordings.end_time >= period_start) + & (Recordings.start_time <= period_end) + ) + .group_by( + fn.strftime( + "%Y-%m-%d", + fn.datetime( + Recordings.start_time, + "unixepoch", + period_hour_modifier, + period_minute_modifier, + ), + ) + ) + .order_by(Recordings.start_time.desc()) + .namedtuples() + ) + + for g in period_query: + days[g.day] = True + + return JSONResponse(content=dict(sorted(days.items()))) + + +@router.get( + "/{camera_name}/recordings/summary", dependencies=[Depends(require_camera_access)] +) +async def recordings_summary(camera_name: str, timezone: str = "utc"): + """Returns hourly summary for recordings of given camera""" + + time_range_query = ( + Recordings.select( + fn.MIN(Recordings.start_time).alias("min_time"), + fn.MAX(Recordings.start_time).alias("max_time"), + ) + .where(Recordings.camera == camera_name) + .dicts() + .get() + ) + + min_time = time_range_query.get("min_time") + max_time = time_range_query.get("max_time") + + days: dict[str, dict] = {} + + if min_time is None or max_time is None: + return JSONResponse(content=list(days.values())) + + dst_periods = get_dst_transitions(timezone, min_time, max_time) + + for period_start, period_end, period_offset in dst_periods: + hours_offset = int(period_offset / 60 / 60) + minutes_offset = int(period_offset / 60 - hours_offset * 60) + period_hour_modifier = f"{hours_offset} hour" + period_minute_modifier = f"{minutes_offset} minute" + + recording_groups = ( + Recordings.select( + fn.strftime( + "%Y-%m-%d %H", + fn.datetime( + Recordings.start_time, + "unixepoch", + period_hour_modifier, + period_minute_modifier, + ), + ).alias("hour"), + fn.SUM(Recordings.duration).alias("duration"), + fn.SUM(Recordings.motion).alias("motion"), + fn.SUM(Recordings.objects).alias("objects"), + ) + .where( + (Recordings.camera == camera_name) + & (Recordings.end_time >= period_start) + & (Recordings.start_time <= period_end) + ) + .group_by((Recordings.start_time + period_offset).cast("int") / 3600) + .order_by(Recordings.start_time.desc()) + .namedtuples() + ) + + event_groups = ( + Event.select( + fn.strftime( + "%Y-%m-%d %H", + fn.datetime( + Event.start_time, + "unixepoch", + period_hour_modifier, + period_minute_modifier, + ), + ).alias("hour"), + fn.COUNT(Event.id).alias("count"), + ) + .where(Event.camera == camera_name, Event.has_clip) + .where( + (Event.start_time >= period_start) & (Event.start_time <= period_end) + ) + .group_by((Event.start_time + period_offset).cast("int") / 3600) + .namedtuples() + ) + + event_map = {g.hour: g.count for g in event_groups} + + for recording_group in recording_groups: + parts = recording_group.hour.split() + hour = parts[1] + day = parts[0] + events_count = event_map.get(recording_group.hour, 0) + hour_data = { + "hour": hour, + "events": events_count, + "motion": recording_group.motion, + "objects": recording_group.objects, + "duration": round(recording_group.duration), + } + if day in days: + # merge counts if already present (edge-case at DST boundary) + days[day]["events"] += events_count or 0 + days[day]["hours"].append(hour_data) + else: + days[day] = { + "events": events_count or 0, + "hours": [hour_data], + "day": day, + } + + return JSONResponse(content=list(days.values())) + + +@router.get("/{camera_name}/recordings", dependencies=[Depends(require_camera_access)]) +async def recordings( + camera_name: str, + after: float = (datetime.now() - timedelta(hours=1)).timestamp(), + before: float = datetime.now().timestamp(), +): + """Return specific camera recordings between the given 'after'/'end' times. If not provided the last hour will be used""" + recordings = ( + Recordings.select( + Recordings.id, + Recordings.start_time, + Recordings.end_time, + Recordings.segment_size, + Recordings.motion, + Recordings.objects, + Recordings.duration, + ) + .where( + Recordings.camera == camera_name, + Recordings.end_time >= after, + Recordings.start_time <= before, + ) + .order_by(Recordings.start_time) + .dicts() + .iterator() + ) + + return JSONResponse(content=list(recordings)) + + +@router.get( + "/recordings/unavailable", + response_model=list[dict], + dependencies=[Depends(allow_any_authenticated())], +) +async def no_recordings( + request: Request, + params: MediaRecordingsAvailabilityQueryParams = Depends(), + allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter), +): + """Get time ranges with no recordings.""" + cameras = params.cameras + if cameras != "all": + requested = set(unquote(cameras).split(",")) + filtered = requested.intersection(allowed_cameras) + if not filtered: + return JSONResponse(content=[]) + cameras = ",".join(filtered) + else: + cameras = allowed_cameras + + before = params.before or datetime.datetime.now().timestamp() + after = ( + params.after + or (datetime.datetime.now() - datetime.timedelta(hours=1)).timestamp() + ) + scale = params.scale + + clauses = [(Recordings.end_time >= after) & (Recordings.start_time <= before)] + if cameras != "all": + camera_list = cameras.split(",") + clauses.append((Recordings.camera << camera_list)) + else: + camera_list = allowed_cameras + + # Get recording start times + data: list[Recordings] = ( + Recordings.select(Recordings.start_time, Recordings.end_time) + .where(reduce(operator.and_, clauses)) + .order_by(Recordings.start_time.asc()) + .dicts() + .iterator() + ) + + # Convert recordings to list of (start, end) tuples + recordings = [(r["start_time"], r["end_time"]) for r in data] + + # Iterate through time segments and check if each has any recording + no_recording_segments = [] + current = after + current_gap_start = None + + while current < before: + segment_end = min(current + scale, before) + + # Check if this segment overlaps with any recording + has_recording = any( + rec_start < segment_end and rec_end > current + for rec_start, rec_end in recordings + ) + + if not has_recording: + # This segment has no recordings + if current_gap_start is None: + current_gap_start = current # Start a new gap + else: + # This segment has recordings + if current_gap_start is not None: + # End the current gap and append it + no_recording_segments.append( + {"start_time": int(current_gap_start), "end_time": int(current)} + ) + current_gap_start = None + + current = segment_end + + # Append the last gap if it exists + if current_gap_start is not None: + no_recording_segments.append( + {"start_time": int(current_gap_start), "end_time": int(before)} + ) + + return JSONResponse(content=no_recording_segments) + + +@router.delete( + "/recordings/start/{start}/end/{end}", + response_model=GenericResponse, + dependencies=[Depends(require_role(["admin"]))], + summary="Delete recordings", + description="""Deletes recordings within the specified time range. + Recordings can be filtered by cameras and kept based on motion, objects, or audio attributes. + """, +) +async def delete_recordings( + start: float = PathParam(..., description="Start timestamp (unix)"), + end: float = PathParam(..., description="End timestamp (unix)"), + params: RecordingsDeleteQueryParams = Depends(), + allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter), +): + """Delete recordings in the specified time range.""" + if start >= end: + return JSONResponse( + content={ + "success": False, + "message": "Start time must be less than end time.", + }, + status_code=400, + ) + + cameras = params.cameras + + if cameras != "all": + requested = set(cameras.split(",")) + filtered = requested.intersection(allowed_cameras) + + if not filtered: + return JSONResponse( + content={ + "success": False, + "message": "No valid cameras found in the request.", + }, + status_code=400, + ) + + camera_list = list(filtered) + else: + camera_list = allowed_cameras + + # Parse keep parameter + keep_set = set() + + if params.keep: + keep_set = set(params.keep.split(",")) + + # Build query to find overlapping recordings + clauses = [ + ( + Recordings.start_time.between(start, end) + | Recordings.end_time.between(start, end) + | ((start > Recordings.start_time) & (end < Recordings.end_time)) + ), + (Recordings.camera << camera_list), + ] + + keep_clauses = [] + + if "motion" in keep_set: + keep_clauses.append(Recordings.motion.is_null(False) & (Recordings.motion > 0)) + + if "object" in keep_set: + keep_clauses.append( + Recordings.objects.is_null(False) & (Recordings.objects > 0) + ) + + if "audio" in keep_set: + keep_clauses.append(Recordings.dBFS.is_null(False)) + + if keep_clauses: + keep_condition = reduce(operator.or_, keep_clauses) + clauses.append(~keep_condition) + + recordings_to_delete = ( + Recordings.select(Recordings.id, Recordings.path) + .where(reduce(operator.and_, clauses)) + .dicts() + .iterator() + ) + + recording_ids = [] + deleted_count = 0 + error_count = 0 + + for recording in recordings_to_delete: + recording_ids.append(recording["id"]) + + try: + Path(recording["path"]).unlink(missing_ok=True) + deleted_count += 1 + except Exception as e: + logger.error(f"Failed to delete recording file {recording['path']}: {e}") + error_count += 1 + + if recording_ids: + max_deletes = 100000 + recording_ids_list = list(recording_ids) + + for i in range(0, len(recording_ids_list), max_deletes): + Recordings.delete().where( + Recordings.id << recording_ids_list[i : i + max_deletes] + ).execute() + + message = f"Successfully deleted {deleted_count} recording(s)." + + if error_count > 0: + message += f" {error_count} file deletion error(s) occurred." + + return JSONResponse( + content={"success": True, "message": message}, + status_code=200, + ) From 63e7bf8b28c3d889a7956cef33e62600f673b51a Mon Sep 17 00:00:00 2001 From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> Date: Sun, 4 Jan 2026 12:21:55 -0600 Subject: [PATCH 15/56] Add media sync API endpoint (#21526) * add media cleanup functions * add endpoint * remove scheduled sync recordings from cleanup * move to utils dir * tweak import * remove sync_recordings and add config migrator * remove sync_recordings * docs * remove key * clean up docs * docs fix * docs tweak --- docs/docs/configuration/record.md | 33 +- docs/docs/configuration/reference.md | 2 - frigate/api/app.py | 65 +- frigate/api/defs/request/app_body.py | 17 +- frigate/config/camera/record.py | 3 - frigate/record/cleanup.py | 16 +- frigate/record/util.py | 147 ---- frigate/util/config.py | 30 +- frigate/util/media.py | 785 ++++++++++++++++++++++ web/public/locales/en/config/cameras.json | 5 +- web/public/locales/en/config/record.json | 5 +- web/src/types/frigateConfig.ts | 2 - 12 files changed, 922 insertions(+), 188 deletions(-) delete mode 100644 frigate/record/util.py create mode 100644 frigate/util/media.py diff --git a/docs/docs/configuration/record.md b/docs/docs/configuration/record.md index ddbf0f612..f25eac863 100644 --- a/docs/docs/configuration/record.md +++ b/docs/docs/configuration/record.md @@ -141,6 +141,8 @@ record: When using `hwaccel_args`, hardware encoding is used for timelapse generation. This setting can be overridden for a specific camera (e.g., when camera resolution exceeds hardware encoder limits); set `cameras..record.export.hwaccel_args` with the appropriate settings. Using an unrecognized value or empty string will fall back to software encoding (libx264). +::: + :::tip The encoder determines its own behavior so the resulting file size may be undesirably large. @@ -152,19 +154,36 @@ To reduce the output file size the ffmpeg parameter `-qp n` can be utilized (whe Apple devices running the Safari browser may fail to playback h.265 recordings. The [apple compatibility option](../configuration/camera_specific.md#h265-cameras-via-safari) should be used to ensure seamless playback on Apple devices. -## Syncing Recordings With Disk +## Syncing Media Files With Disk -In some cases the recordings files may be deleted but Frigate will not know this has happened. Recordings sync can be enabled which will tell Frigate to check the file system and delete any db entries for files which don't exist. +Media files (event snapshots, event thumbnails, review thumbnails, previews, exports, and recordings) can become orphaned when database entries are deleted but the corresponding files remain on disk. -```yaml -record: - sync_recordings: True +This feature checks the file system for media files and removes any that are not referenced in the database. + +The API endpoint `POST /api/media/sync` can be used to trigger a media sync. The endpoint accepts a JSON request body to control the operation. + +Request body schema (JSON): + +```json +{ + "dry_run": true, + "media_types": ["all"], + "force": false +} ``` -This feature is meant to fix variations in files, not completely delete entries in the database. If you delete all of your media, don't use `sync_recordings`, just stop Frigate, delete the `frigate.db` database, and restart. +- `dry_run` (boolean): If `true` (default) the service will only report orphaned files without deleting them. Set to `false` to allow deletions. +- `media_types` (array of strings): Which media types to sync. Use `"all"` to sync everything, or a list of one or more of: + - `event_snapshots` + - `event_thumbnails` + - `review_thumbnails` + - `previews` + - `exports` + - `recordings` +- `force` (boolean): If `true` the safety threshold is bypassed and deletions proceed even if the operation would remove a large proportion of files. Use with extreme caution. :::warning -The sync operation uses considerable CPU resources and in most cases is not needed, only enable when necessary. +This operation uses considerable CPU resources and includes a safety threshold that aborts if more than 50% of files would be deleted. Only run when necessary. If you set `force: true` the safety threshold will be bypassed; do not use `force` unless you are certain the deletions are intended. ::: diff --git a/docs/docs/configuration/reference.md b/docs/docs/configuration/reference.md index ad1695f22..b287a7e9b 100644 --- a/docs/docs/configuration/reference.md +++ b/docs/docs/configuration/reference.md @@ -510,8 +510,6 @@ record: # Optional: Number of minutes to wait between cleanup runs (default: shown below) # This can be used to reduce the frequency of deleting recording segments from disk if you want to minimize i/o expire_interval: 60 - # Optional: Two-way sync recordings database with disk on startup and once a day (default: shown below). - sync_recordings: False # Optional: Continuous retention settings continuous: # Optional: Number of days to retain recordings regardless of tracked objects or motion (default: shown below) diff --git a/frigate/api/app.py b/frigate/api/app.py index 3a91c8ebb..7e97a0570 100644 --- a/frigate/api/app.py +++ b/frigate/api/app.py @@ -30,7 +30,7 @@ from frigate.api.auth import ( require_role, ) from frigate.api.defs.query.app_query_parameters import AppTimelineHourlyQueryParameters -from frigate.api.defs.request.app_body import AppConfigSetBody +from frigate.api.defs.request.app_body import AppConfigSetBody, MediaSyncBody from frigate.api.defs.tags import Tags from frigate.config import FrigateConfig from frigate.config.camera.updater import ( @@ -47,6 +47,7 @@ from frigate.util.builtin import ( update_yaml_file_bulk, ) from frigate.util.config import find_config_file +from frigate.util.media import sync_all_media from frigate.util.services import ( get_nvidia_driver_info, process_logs, @@ -607,6 +608,68 @@ def restart(): ) +@router.post("/media/sync", dependencies=[Depends(require_role(["admin"]))]) +def sync_media(body: MediaSyncBody = Body(...)): + """Sync media files with database - remove orphaned files. + + Syncs specified media types: event snapshots, event thumbnails, review thumbnails, + previews, exports, and/or recordings. + + Args: + body: MediaSyncBody with dry_run flag and media_types list. + media_types can include: 'all', 'event_snapshots', 'event_thumbnails', + 'review_thumbnails', 'previews', 'exports', 'recordings' + + Returns: + JSON response with sync results for each requested media type. + """ + try: + results = sync_all_media( + dry_run=body.dry_run, media_types=body.media_types, force=body.force + ) + + # Check if any operations were aborted or had errors + has_errors = False + for result_name in [ + "event_snapshots", + "event_thumbnails", + "review_thumbnails", + "previews", + "exports", + "recordings", + ]: + result = getattr(results, result_name, None) + if result and (result.aborted or result.error): + has_errors = True + break + + content = { + "success": not has_errors, + "dry_run": body.dry_run, + "media_types": body.media_types, + "results": results.to_dict(), + } + + if has_errors: + content["message"] = ( + "Some sync operations were aborted or had errors; check logs for details." + ) + + return JSONResponse( + content=content, + status_code=200, + ) + except Exception as e: + logger.error(f"Error syncing media files: {e}") + return JSONResponse( + content={ + "success": False, + "message": f"Error syncing media files: {str(e)}", + }, + status_code=500, + ) + + @router.get("/labels", dependencies=[Depends(allow_any_authenticated())]) def get_labels(camera: str = ""): try: diff --git a/frigate/api/defs/request/app_body.py b/frigate/api/defs/request/app_body.py index c4129d8da..6059daf6e 100644 --- a/frigate/api/defs/request/app_body.py +++ b/frigate/api/defs/request/app_body.py @@ -1,6 +1,6 @@ -from typing import Any, Dict, Optional +from typing import Any, Dict, List, Optional -from pydantic import BaseModel +from pydantic import BaseModel, Field class AppConfigSetBody(BaseModel): @@ -27,3 +27,16 @@ class AppPostLoginBody(BaseModel): class AppPutRoleBody(BaseModel): role: str + + +class MediaSyncBody(BaseModel): + dry_run: bool = Field( + default=True, description="If True, only report orphans without deleting them" + ) + media_types: List[str] = Field( + default=["all"], + description="Types of media to sync: 'all', 'event_snapshots', 'event_thumbnails', 'review_thumbnails', 'previews', 'exports', 'recordings'", + ) + force: bool = Field( + default=False, description="If True, bypass safety threshold checks" + ) diff --git a/frigate/config/camera/record.py b/frigate/config/camera/record.py index 90881f448..21816523a 100644 --- a/frigate/config/camera/record.py +++ b/frigate/config/camera/record.py @@ -77,9 +77,6 @@ class RecordExportConfig(FrigateBaseModel): class RecordConfig(FrigateBaseModel): enabled: bool = Field(default=False, title="Enable record on all cameras.") - sync_recordings: bool = Field( - default=False, title="Sync recordings with disk on startup and once a day." - ) expire_interval: int = Field( default=60, title="Number of minutes to wait between cleanup runs.", diff --git a/frigate/record/cleanup.py b/frigate/record/cleanup.py index 94dd43eba..29c68a53c 100644 --- a/frigate/record/cleanup.py +++ b/frigate/record/cleanup.py @@ -13,9 +13,8 @@ from playhouse.sqlite_ext import SqliteExtDatabase from frigate.config import CameraConfig, FrigateConfig, RetainModeEnum from frigate.const import CACHE_DIR, CLIPS_DIR, MAX_WAL_SIZE, RECORD_DIR from frigate.models import Previews, Recordings, ReviewSegment, UserReviewStatus -from frigate.record.util import remove_empty_directories, sync_recordings from frigate.util.builtin import clear_and_unlink -from frigate.util.time import get_tomorrow_at_time +from frigate.util.media import remove_empty_directories logger = logging.getLogger(__name__) @@ -350,11 +349,6 @@ class RecordingCleanup(threading.Thread): logger.debug("End expire recordings.") def run(self) -> None: - # on startup sync recordings with disk if enabled - if self.config.record.sync_recordings: - sync_recordings(limited=False) - next_sync = get_tomorrow_at_time(3) - # Expire tmp clips every minute, recordings and clean directories every hour. for counter in itertools.cycle(range(self.config.record.expire_interval)): if self.stop_event.wait(60): @@ -363,14 +357,6 @@ class RecordingCleanup(threading.Thread): self.clean_tmp_previews() - if ( - self.config.record.sync_recordings - and datetime.datetime.now().astimezone(datetime.timezone.utc) - > next_sync - ): - sync_recordings(limited=True) - next_sync = get_tomorrow_at_time(3) - if counter == 0: self.clean_tmp_clips() self.expire_recordings() diff --git a/frigate/record/util.py b/frigate/record/util.py deleted file mode 100644 index 6a91c1aaf..000000000 --- a/frigate/record/util.py +++ /dev/null @@ -1,147 +0,0 @@ -"""Recordings Utilities.""" - -import datetime -import logging -import os - -from peewee import DatabaseError, chunked - -from frigate.const import RECORD_DIR -from frigate.models import Recordings, RecordingsToDelete - -logger = logging.getLogger(__name__) - - -def remove_empty_directories(directory: str) -> None: - # list all directories recursively and sort them by path, - # longest first - paths = sorted( - [x[0] for x in os.walk(directory)], - key=lambda p: len(str(p)), - reverse=True, - ) - for path in paths: - # don't delete the parent - if path == directory: - continue - if len(os.listdir(path)) == 0: - os.rmdir(path) - - -def sync_recordings(limited: bool) -> None: - """Check the db for stale recordings entries that don't exist in the filesystem.""" - - def delete_db_entries_without_file(check_timestamp: float) -> bool: - """Delete db entries where file was deleted outside of frigate.""" - - if limited: - recordings = Recordings.select(Recordings.id, Recordings.path).where( - Recordings.start_time >= check_timestamp - ) - else: - # get all recordings in the db - recordings = Recordings.select(Recordings.id, Recordings.path) - - # Use pagination to process records in chunks - page_size = 1000 - num_pages = (recordings.count() + page_size - 1) // page_size - recordings_to_delete = set() - - for page in range(num_pages): - for recording in recordings.paginate(page, page_size): - if not os.path.exists(recording.path): - recordings_to_delete.add(recording.id) - - if len(recordings_to_delete) == 0: - return True - - logger.info( - f"Deleting {len(recordings_to_delete)} recording DB entries with missing files" - ) - - # convert back to list of dictionaries for insertion - recordings_to_delete = [ - {"id": recording_id} for recording_id in recordings_to_delete - ] - - if float(len(recordings_to_delete)) / max(1, recordings.count()) > 0.5: - logger.warning( - f"Deleting {(len(recordings_to_delete) / max(1, recordings.count()) * 100):.2f}% of recordings DB entries, could be due to configuration error. Aborting..." - ) - return False - - # create a temporary table for deletion - RecordingsToDelete.create_table(temporary=True) - - # insert ids to the temporary table - max_inserts = 1000 - for batch in chunked(recordings_to_delete, max_inserts): - RecordingsToDelete.insert_many(batch).execute() - - try: - # delete records in the main table that exist in the temporary table - query = Recordings.delete().where( - Recordings.id.in_(RecordingsToDelete.select(RecordingsToDelete.id)) - ) - query.execute() - except DatabaseError as e: - logger.error(f"Database error during recordings db cleanup: {e}") - - return True - - def delete_files_without_db_entry(files_on_disk: list[str]): - """Delete files where file is not inside frigate db.""" - files_to_delete = [] - - for file in files_on_disk: - if not Recordings.select().where(Recordings.path == file).exists(): - files_to_delete.append(file) - - if len(files_to_delete) == 0: - return True - - logger.info( - f"Deleting {len(files_to_delete)} recordings files with missing DB entries" - ) - - if float(len(files_to_delete)) / max(1, len(files_on_disk)) > 0.5: - logger.debug( - f"Deleting {(len(files_to_delete) / max(1, len(files_on_disk)) * 100):.2f}% of recordings DB entries, could be due to configuration error. Aborting..." - ) - return False - - for file in files_to_delete: - os.unlink(file) - - return True - - logger.debug("Start sync recordings.") - - # start checking on the hour 36 hours ago - check_point = datetime.datetime.now().replace( - minute=0, second=0, microsecond=0 - ).astimezone(datetime.timezone.utc) - datetime.timedelta(hours=36) - db_success = delete_db_entries_without_file(check_point.timestamp()) - - # only try to cleanup files if db cleanup was successful - if db_success: - if limited: - # get recording files from last 36 hours - hour_check = f"{RECORD_DIR}/{check_point.strftime('%Y-%m-%d/%H')}" - files_on_disk = { - os.path.join(root, file) - for root, _, files in os.walk(RECORD_DIR) - for file in files - if root > hour_check - } - else: - # get all recordings files on disk and put them in a set - files_on_disk = { - os.path.join(root, file) - for root, _, files in os.walk(RECORD_DIR) - for file in files - } - - delete_files_without_db_entry(files_on_disk) - - logger.debug("End sync recordings.") diff --git a/frigate/util/config.py b/frigate/util/config.py index c3d796397..b9e3fccb8 100644 --- a/frigate/util/config.py +++ b/frigate/util/config.py @@ -13,7 +13,7 @@ from frigate.util.services import get_video_properties logger = logging.getLogger(__name__) -CURRENT_CONFIG_VERSION = "0.17-0" +CURRENT_CONFIG_VERSION = "0.18-0" DEFAULT_CONFIG_FILE = os.path.join(CONFIG_DIR, "config.yml") @@ -98,6 +98,13 @@ def migrate_frigate_config(config_file: str): yaml.dump(new_config, f) previous_version = "0.17-0" + if previous_version < "0.18-0": + logger.info(f"Migrating frigate config from {previous_version} to 0.18-0...") + new_config = migrate_018_0(config) + with open(config_file, "w") as f: + yaml.dump(new_config, f) + previous_version = "0.18-0" + logger.info("Finished frigate config migration...") @@ -427,6 +434,27 @@ def migrate_017_0(config: dict[str, dict[str, Any]]) -> dict[str, dict[str, Any] return new_config +def migrate_018_0(config: dict[str, dict[str, Any]]) -> dict[str, dict[str, Any]]: + """Handle migrating frigate config to 0.18-0""" + new_config = config.copy() + + # Remove deprecated sync_recordings from global record config + if new_config.get("record", {}).get("sync_recordings") is not None: + del new_config["record"]["sync_recordings"] + + # Remove deprecated sync_recordings from camera-specific record configs + for name, camera in config.get("cameras", {}).items(): + camera_config: dict[str, dict[str, Any]] = camera.copy() + + if camera_config.get("record", {}).get("sync_recordings") is not None: + del camera_config["record"]["sync_recordings"] + + new_config["cameras"][name] = camera_config + + new_config["version"] = "0.18-0" + return new_config + + def get_relative_coordinates( mask: Optional[Union[str, list]], frame_shape: tuple[int, int] ) -> Union[str, list]: diff --git a/frigate/util/media.py b/frigate/util/media.py new file mode 100644 index 000000000..a31b93e91 --- /dev/null +++ b/frigate/util/media.py @@ -0,0 +1,785 @@ +"""Recordings Utilities.""" + +import datetime +import logging +import os +from dataclasses import dataclass, field + +from peewee import DatabaseError, chunked + +from frigate.const import CLIPS_DIR, EXPORT_DIR, RECORD_DIR, THUMB_DIR +from frigate.models import ( + Event, + Export, + Previews, + Recordings, + RecordingsToDelete, + ReviewSegment, +) + +logger = logging.getLogger(__name__) + + +# Safety threshold - abort if more than 50% of files would be deleted +SAFETY_THRESHOLD = 0.5 + + +@dataclass +class SyncResult: + """Result of a sync operation.""" + + media_type: str + files_checked: int = 0 + orphans_found: int = 0 + orphans_deleted: int = 0 + orphan_paths: list[str] = field(default_factory=list) + aborted: bool = False + error: str | None = None + + def to_dict(self) -> dict: + return { + "media_type": self.media_type, + "files_checked": self.files_checked, + "orphans_found": self.orphans_found, + "orphans_deleted": self.orphans_deleted, + "aborted": self.aborted, + "error": self.error, + } + + +def remove_empty_directories(directory: str) -> None: + # list all directories recursively and sort them by path, + # longest first + paths = sorted( + [x[0] for x in os.walk(directory)], + key=lambda p: len(str(p)), + reverse=True, + ) + for path in paths: + # don't delete the parent + if path == directory: + continue + if len(os.listdir(path)) == 0: + os.rmdir(path) + + +def sync_recordings( + limited: bool = False, dry_run: bool = False, force: bool = False +) -> SyncResult: + """Sync recordings between the database and disk using the SyncResult format.""" + + result = SyncResult(media_type="recordings") + + try: + logger.debug("Start sync recordings.") + + # start checking on the hour 36 hours ago + check_point = datetime.datetime.now().replace( + minute=0, second=0, microsecond=0 + ).astimezone(datetime.timezone.utc) - datetime.timedelta(hours=36) + + # Gather DB recordings to inspect + if limited: + recordings_query = Recordings.select(Recordings.id, Recordings.path).where( + Recordings.start_time >= check_point.timestamp() + ) + else: + recordings_query = Recordings.select(Recordings.id, Recordings.path) + + recordings_count = recordings_query.count() + page_size = 1000 + num_pages = (recordings_count + page_size - 1) // page_size + recordings_to_delete: list[dict] = [] + + for page in range(num_pages): + for recording in recordings_query.paginate(page, page_size): + if not os.path.exists(recording.path): + recordings_to_delete.append( + {"id": recording.id, "path": recording.path} + ) + + result.files_checked += recordings_count + result.orphans_found += len(recordings_to_delete) + result.orphan_paths.extend( + [ + recording["path"] + for recording in recordings_to_delete + if recording.get("path") + ] + ) + + if ( + recordings_count + and len(recordings_to_delete) / recordings_count > SAFETY_THRESHOLD + ): + if force: + logger.warning( + f"Deleting {(len(recordings_to_delete) / max(1, recordings_count) * 100):.2f}% of recordings DB entries (force=True, bypassing safety threshold)" + ) + else: + logger.warning( + f"Deleting {(len(recordings_to_delete) / max(1, recordings_count) * 100):.2f}% of recordings DB entries, could be due to configuration error. Aborting..." + ) + result.aborted = True + return result + + if recordings_to_delete and not dry_run: + logger.info( + f"Deleting {len(recordings_to_delete)} recording DB entries with missing files" + ) + + RecordingsToDelete.create_table(temporary=True) + + max_inserts = 1000 + for batch in chunked(recordings_to_delete, max_inserts): + RecordingsToDelete.insert_many(batch).execute() + + try: + deleted = ( + Recordings.delete() + .where( + Recordings.id.in_( + RecordingsToDelete.select(RecordingsToDelete.id) + ) + ) + .execute() + ) + result.orphans_deleted += int(deleted) + except DatabaseError as e: + logger.error(f"Database error during recordings db cleanup: {e}") + result.error = str(e) + result.aborted = True + return result + + if result.aborted: + logger.warning("Recording DB sync aborted; skipping file cleanup.") + return result + + # Only try to cleanup files if db cleanup was successful or dry_run + if limited: + # get recording files from last 36 hours + hour_check = f"{RECORD_DIR}/{check_point.strftime('%Y-%m-%d/%H')}" + files_on_disk = { + os.path.join(root, file) + for root, _, files in os.walk(RECORD_DIR) + for file in files + if root > hour_check + } + else: + # get all recordings files on disk and put them in a set + files_on_disk = { + os.path.join(root, file) + for root, _, files in os.walk(RECORD_DIR) + for file in files + } + + result.files_checked += len(files_on_disk) + + files_to_delete: list[str] = [] + for file in files_on_disk: + if not Recordings.select().where(Recordings.path == file).exists(): + files_to_delete.append(file) + + result.orphans_found += len(files_to_delete) + result.orphan_paths.extend(files_to_delete) + + if ( + files_on_disk + and len(files_to_delete) / len(files_on_disk) > SAFETY_THRESHOLD + ): + if force: + logger.warning( + f"Deleting {(len(files_to_delete) / max(1, len(files_on_disk)) * 100):.2f}% of recordings files (force=True, bypassing safety threshold)" + ) + else: + logger.warning( + f"Deleting {(len(files_to_delete) / max(1, len(files_on_disk)) * 100):.2f}% of recordings files, could be due to configuration error. Aborting..." + ) + result.aborted = True + return result + + if files_to_delete and not dry_run: + logger.info( + f"Deleting {len(files_to_delete)} recordings files with missing DB entries" + ) + for file in files_to_delete: + try: + os.unlink(file) + result.orphans_deleted += 1 + except OSError as e: + logger.error(f"Failed to delete {file}: {e}") + + logger.debug("End sync recordings.") + + except Exception as e: + logger.error(f"Error syncing recordings: {e}") + result.error = str(e) + + return result + + +def sync_event_snapshots(dry_run: bool = False, force: bool = False) -> SyncResult: + """Sync event snapshots - delete files not referenced by any event. + + Event snapshots are stored at: CLIPS_DIR/{camera}-{event_id}.jpg + Also checks for clean variants: {camera}-{event_id}-clean.webp and -clean.png + """ + result = SyncResult(media_type="event_snapshots") + + try: + # Get all event IDs with snapshots from DB + events_with_snapshots = set( + f"{e.camera}-{e.id}" + for e in Event.select(Event.id, Event.camera).where( + Event.has_snapshot == True + ) + ) + + # Find snapshot files on disk (directly in CLIPS_DIR, not subdirectories) + snapshot_files: list[tuple[str, str]] = [] # (full_path, base_name) + if os.path.isdir(CLIPS_DIR): + for file in os.listdir(CLIPS_DIR): + file_path = os.path.join(CLIPS_DIR, file) + if os.path.isfile(file_path) and file.endswith( + (".jpg", "-clean.webp", "-clean.png") + ): + # Extract base name (camera-event_id) from filename + base_name = file + for suffix in ["-clean.webp", "-clean.png", ".jpg"]: + if file.endswith(suffix): + base_name = file[: -len(suffix)] + break + snapshot_files.append((file_path, base_name)) + + result.files_checked = len(snapshot_files) + + # Find orphans + orphans: list[str] = [] + for file_path, base_name in snapshot_files: + if base_name not in events_with_snapshots: + orphans.append(file_path) + + result.orphans_found = len(orphans) + result.orphan_paths = orphans + + if len(orphans) == 0: + return result + + # Safety check + if ( + result.files_checked > 0 + and len(orphans) / result.files_checked > SAFETY_THRESHOLD + ): + if force: + logger.warning( + f"Event snapshots sync: Would delete {len(orphans)}/{result.files_checked} " + f"({len(orphans) / result.files_checked * 100:.2f}%) files (force=True, bypassing safety threshold)." + ) + else: + logger.warning( + f"Event snapshots sync: Would delete {len(orphans)}/{result.files_checked} " + f"({len(orphans) / result.files_checked * 100:.2f}%) files. " + "Aborting due to safety threshold." + ) + result.aborted = True + return result + + if dry_run: + logger.info( + f"Event snapshots sync (dry run): Found {len(orphans)} orphaned files" + ) + return result + + # Delete orphans + logger.info(f"Deleting {len(orphans)} orphaned event snapshot files") + for file_path in orphans: + try: + os.unlink(file_path) + result.orphans_deleted += 1 + except OSError as e: + logger.error(f"Failed to delete {file_path}: {e}") + + except Exception as e: + logger.error(f"Error syncing event snapshots: {e}") + result.error = str(e) + + return result + + +def sync_event_thumbnails(dry_run: bool = False, force: bool = False) -> SyncResult: + """Sync event thumbnails - delete files not referenced by any event. + + Event thumbnails are stored at: THUMB_DIR/{camera}/{event_id}.webp + Only events without inline thumbnail (thumbnail field is None/empty) use files. + """ + result = SyncResult(media_type="event_thumbnails") + + try: + # Get all events that use file-based thumbnails + # Events with thumbnail field populated don't need files + events_with_file_thumbs = set( + (e.camera, e.id) + for e in Event.select(Event.id, Event.camera, Event.thumbnail).where( + (Event.thumbnail.is_null(True)) | (Event.thumbnail == "") + ) + ) + + # Find thumbnail files on disk + thumbnail_files: list[ + tuple[str, str, str] + ] = [] # (full_path, camera, event_id) + if os.path.isdir(THUMB_DIR): + for camera_dir in os.listdir(THUMB_DIR): + camera_path = os.path.join(THUMB_DIR, camera_dir) + if not os.path.isdir(camera_path): + continue + for file in os.listdir(camera_path): + if file.endswith(".webp"): + event_id = file[:-5] # Remove .webp + file_path = os.path.join(camera_path, file) + thumbnail_files.append((file_path, camera_dir, event_id)) + + result.files_checked = len(thumbnail_files) + + # Find orphans - files where event doesn't exist or event has inline thumbnail + orphans: list[str] = [] + for file_path, camera, event_id in thumbnail_files: + if (camera, event_id) not in events_with_file_thumbs: + # Check if event exists with inline thumbnail + event_exists = Event.select().where(Event.id == event_id).exists() + if not event_exists: + orphans.append(file_path) + # If event exists with inline thumbnail, the file is also orphaned + elif event_exists: + event = Event.get_or_none(Event.id == event_id) + if event and event.thumbnail: + orphans.append(file_path) + + result.orphans_found = len(orphans) + result.orphan_paths = orphans + + if len(orphans) == 0: + return result + + # Safety check + if ( + result.files_checked > 0 + and len(orphans) / result.files_checked > SAFETY_THRESHOLD + ): + if force: + logger.warning( + f"Event thumbnails sync: Would delete {len(orphans)}/{result.files_checked} " + f"({len(orphans) / result.files_checked * 100:.2f}%) files (force=True, bypassing safety threshold)." + ) + else: + logger.warning( + f"Event thumbnails sync: Would delete {len(orphans)}/{result.files_checked} " + f"({len(orphans) / result.files_checked * 100:.2f}%) files. " + "Aborting due to safety threshold." + ) + result.aborted = True + return result + + if dry_run: + logger.info( + f"Event thumbnails sync (dry run): Found {len(orphans)} orphaned files" + ) + return result + + # Delete orphans + logger.info(f"Deleting {len(orphans)} orphaned event thumbnail files") + for file_path in orphans: + try: + os.unlink(file_path) + result.orphans_deleted += 1 + except OSError as e: + logger.error(f"Failed to delete {file_path}: {e}") + + except Exception as e: + logger.error(f"Error syncing event thumbnails: {e}") + result.error = str(e) + + return result + + +def sync_review_thumbnails(dry_run: bool = False, force: bool = False) -> SyncResult: + """Sync review segment thumbnails - delete files not referenced by any review segment. + + Review thumbnails are stored at: CLIPS_DIR/review/thumb-{camera}-{review_id}.webp + The full path is stored in ReviewSegment.thumb_path + """ + result = SyncResult(media_type="review_thumbnails") + + try: + # Get all thumb paths from DB + review_thumb_paths = set( + r.thumb_path + for r in ReviewSegment.select(ReviewSegment.thumb_path) + if r.thumb_path + ) + + # Find review thumbnail files on disk + review_dir = os.path.join(CLIPS_DIR, "review") + thumbnail_files: list[str] = [] + if os.path.isdir(review_dir): + for file in os.listdir(review_dir): + if file.startswith("thumb-") and file.endswith(".webp"): + file_path = os.path.join(review_dir, file) + thumbnail_files.append(file_path) + + result.files_checked = len(thumbnail_files) + + # Find orphans + orphans: list[str] = [] + for file_path in thumbnail_files: + if file_path not in review_thumb_paths: + orphans.append(file_path) + + result.orphans_found = len(orphans) + result.orphan_paths = orphans + + if len(orphans) == 0: + return result + + # Safety check + if ( + result.files_checked > 0 + and len(orphans) / result.files_checked > SAFETY_THRESHOLD + ): + if force: + logger.warning( + f"Review thumbnails sync: Would delete {len(orphans)}/{result.files_checked} " + f"({len(orphans) / result.files_checked * 100:.2f}%) files (force=True, bypassing safety threshold)." + ) + else: + logger.warning( + f"Review thumbnails sync: Would delete {len(orphans)}/{result.files_checked} " + f"({len(orphans) / result.files_checked * 100:.2f}%) files. " + "Aborting due to safety threshold." + ) + result.aborted = True + return result + + if dry_run: + logger.info( + f"Review thumbnails sync (dry run): Found {len(orphans)} orphaned files" + ) + return result + + # Delete orphans + logger.info(f"Deleting {len(orphans)} orphaned review thumbnail files") + for file_path in orphans: + try: + os.unlink(file_path) + result.orphans_deleted += 1 + except OSError as e: + logger.error(f"Failed to delete {file_path}: {e}") + + except Exception as e: + logger.error(f"Error syncing review thumbnails: {e}") + result.error = str(e) + + return result + + +def sync_previews(dry_run: bool = False, force: bool = False) -> SyncResult: + """Sync preview files - delete files not referenced by any preview record. + + Previews are stored at: CLIPS_DIR/previews/{camera}/*.mp4 + The full path is stored in Previews.path + """ + result = SyncResult(media_type="previews") + + try: + # Get all preview paths from DB + preview_paths = set(p.path for p in Previews.select(Previews.path) if p.path) + + # Find preview files on disk + previews_dir = os.path.join(CLIPS_DIR, "previews") + preview_files: list[str] = [] + if os.path.isdir(previews_dir): + for camera_dir in os.listdir(previews_dir): + camera_path = os.path.join(previews_dir, camera_dir) + if not os.path.isdir(camera_path): + continue + for file in os.listdir(camera_path): + if file.endswith(".mp4"): + file_path = os.path.join(camera_path, file) + preview_files.append(file_path) + + result.files_checked = len(preview_files) + + # Find orphans + orphans: list[str] = [] + for file_path in preview_files: + if file_path not in preview_paths: + orphans.append(file_path) + + result.orphans_found = len(orphans) + result.orphan_paths = orphans + + if len(orphans) == 0: + return result + + # Safety check + if ( + result.files_checked > 0 + and len(orphans) / result.files_checked > SAFETY_THRESHOLD + ): + if force: + logger.warning( + f"Previews sync: Would delete {len(orphans)}/{result.files_checked} " + f"({len(orphans) / result.files_checked * 100:.2f}%) files (force=True, bypassing safety threshold)." + ) + else: + logger.warning( + f"Previews sync: Would delete {len(orphans)}/{result.files_checked} " + f"({len(orphans) / result.files_checked * 100:.2f}%) files. " + "Aborting due to safety threshold." + ) + result.aborted = True + return result + + if dry_run: + logger.info(f"Previews sync (dry run): Found {len(orphans)} orphaned files") + return result + + # Delete orphans + logger.info(f"Deleting {len(orphans)} orphaned preview files") + for file_path in orphans: + try: + os.unlink(file_path) + result.orphans_deleted += 1 + except OSError as e: + logger.error(f"Failed to delete {file_path}: {e}") + + except Exception as e: + logger.error(f"Error syncing previews: {e}") + result.error = str(e) + + return result + + +def sync_exports(dry_run: bool = False, force: bool = False) -> SyncResult: + """Sync export files - delete files not referenced by any export record. + + Export videos are stored at: EXPORT_DIR/*.mp4 + Export thumbnails are stored at: CLIPS_DIR/export/*.jpg + The paths are stored in Export.video_path and Export.thumb_path + """ + result = SyncResult(media_type="exports") + + try: + # Get all export paths from DB + export_video_paths = set() + export_thumb_paths = set() + for e in Export.select(Export.video_path, Export.thumb_path): + if e.video_path: + export_video_paths.add(e.video_path) + if e.thumb_path: + export_thumb_paths.add(e.thumb_path) + + # Find export video files on disk + export_files: list[str] = [] + if os.path.isdir(EXPORT_DIR): + for file in os.listdir(EXPORT_DIR): + if file.endswith(".mp4"): + file_path = os.path.join(EXPORT_DIR, file) + export_files.append(file_path) + + # Find export thumbnail files on disk + export_thumb_dir = os.path.join(CLIPS_DIR, "export") + thumb_files: list[str] = [] + if os.path.isdir(export_thumb_dir): + for file in os.listdir(export_thumb_dir): + if file.endswith(".jpg"): + file_path = os.path.join(export_thumb_dir, file) + thumb_files.append(file_path) + + result.files_checked = len(export_files) + len(thumb_files) + + # Find orphans + orphans: list[str] = [] + for file_path in export_files: + if file_path not in export_video_paths: + orphans.append(file_path) + for file_path in thumb_files: + if file_path not in export_thumb_paths: + orphans.append(file_path) + + result.orphans_found = len(orphans) + result.orphan_paths = orphans + + if len(orphans) == 0: + return result + + # Safety check + if ( + result.files_checked > 0 + and len(orphans) / result.files_checked > SAFETY_THRESHOLD + ): + if force: + logger.warning( + f"Exports sync: Would delete {len(orphans)}/{result.files_checked} " + f"({len(orphans) / result.files_checked * 100:.2f}%) files (force=True, bypassing safety threshold)." + ) + else: + logger.warning( + f"Exports sync: Would delete {len(orphans)}/{result.files_checked} " + f"({len(orphans) / result.files_checked * 100:.2f}%) files. " + "Aborting due to safety threshold." + ) + result.aborted = True + return result + + if dry_run: + logger.info(f"Exports sync (dry run): Found {len(orphans)} orphaned files") + return result + + # Delete orphans + logger.info(f"Deleting {len(orphans)} orphaned export files") + for file_path in orphans: + try: + os.unlink(file_path) + result.orphans_deleted += 1 + except OSError as e: + logger.error(f"Failed to delete {file_path}: {e}") + + except Exception as e: + logger.error(f"Error syncing exports: {e}") + result.error = str(e) + + return result + + +@dataclass +class MediaSyncResults: + """Combined results from all media sync operations.""" + + event_snapshots: SyncResult | None = None + event_thumbnails: SyncResult | None = None + review_thumbnails: SyncResult | None = None + previews: SyncResult | None = None + exports: SyncResult | None = None + recordings: SyncResult | None = None + + @property + def total_files_checked(self) -> int: + total = 0 + for result in [ + self.event_snapshots, + self.event_thumbnails, + self.review_thumbnails, + self.previews, + self.exports, + self.recordings, + ]: + if result: + total += result.files_checked + return total + + @property + def total_orphans_found(self) -> int: + total = 0 + for result in [ + self.event_snapshots, + self.event_thumbnails, + self.review_thumbnails, + self.previews, + self.exports, + self.recordings, + ]: + if result: + total += result.orphans_found + return total + + @property + def total_orphans_deleted(self) -> int: + total = 0 + for result in [ + self.event_snapshots, + self.event_thumbnails, + self.review_thumbnails, + self.previews, + self.exports, + self.recordings, + ]: + if result: + total += result.orphans_deleted + return total + + def to_dict(self) -> dict: + """Convert results to dictionary for API response.""" + results = {} + for name, result in [ + ("event_snapshots", self.event_snapshots), + ("event_thumbnails", self.event_thumbnails), + ("review_thumbnails", self.review_thumbnails), + ("previews", self.previews), + ("exports", self.exports), + ("recordings", self.recordings), + ]: + if result: + results[name] = { + "files_checked": result.files_checked, + "orphans_found": result.orphans_found, + "orphans_deleted": result.orphans_deleted, + "aborted": result.aborted, + "error": result.error, + } + results["totals"] = { + "files_checked": self.total_files_checked, + "orphans_found": self.total_orphans_found, + "orphans_deleted": self.total_orphans_deleted, + } + return results + + +def sync_all_media( + dry_run: bool = False, media_types: list[str] = ["all"], force: bool = False +) -> MediaSyncResults: + """Sync specified media types with the database. + + Args: + dry_run: If True, only report orphans without deleting them. + media_types: List of media types to sync. Can include: 'all', 'event_snapshots', + 'event_thumbnails', 'review_thumbnails', 'previews', 'exports', 'recordings' + force: If True, bypass safety threshold checks. + + Returns: + MediaSyncResults with details of each sync operation. + """ + logger.debug( + f"Starting media sync (dry_run={dry_run}, media_types={media_types}, force={force})" + ) + + results = MediaSyncResults() + + # Determine which media types to sync + sync_all = "all" in media_types + + if sync_all or "event_snapshots" in media_types: + results.event_snapshots = sync_event_snapshots(dry_run=dry_run, force=force) + + if sync_all or "event_thumbnails" in media_types: + results.event_thumbnails = sync_event_thumbnails(dry_run=dry_run, force=force) + + if sync_all or "review_thumbnails" in media_types: + results.review_thumbnails = sync_review_thumbnails(dry_run=dry_run, force=force) + + if sync_all or "previews" in media_types: + results.previews = sync_previews(dry_run=dry_run, force=force) + + if sync_all or "exports" in media_types: + results.exports = sync_exports(dry_run=dry_run, force=force) + + if sync_all or "recordings" in media_types: + results.recordings = sync_recordings(dry_run=dry_run, force=force) + + logger.info( + f"Media sync complete: checked {results.total_files_checked} files, " + f"found {results.total_orphans_found} orphans, " + f"deleted {results.total_orphans_deleted}" + ) + + return results diff --git a/web/public/locales/en/config/cameras.json b/web/public/locales/en/config/cameras.json index 67015bde5..d2c74dc54 100644 --- a/web/public/locales/en/config/cameras.json +++ b/web/public/locales/en/config/cameras.json @@ -324,9 +324,6 @@ "enabled": { "label": "Enable record on all cameras." }, - "sync_recordings": { - "label": "Sync recordings with disk on startup and once a day." - }, "expire_interval": { "label": "Number of minutes to wait between cleanup runs." }, @@ -758,4 +755,4 @@ "label": "Keep track of original state of camera." } } -} \ No newline at end of file +} diff --git a/web/public/locales/en/config/record.json b/web/public/locales/en/config/record.json index 81139084e..0c4a5fc42 100644 --- a/web/public/locales/en/config/record.json +++ b/web/public/locales/en/config/record.json @@ -4,9 +4,6 @@ "enabled": { "label": "Enable record on all cameras." }, - "sync_recordings": { - "label": "Sync recordings with disk on startup and once a day." - }, "expire_interval": { "label": "Number of minutes to wait between cleanup runs." }, @@ -90,4 +87,4 @@ "label": "Keep track of original state of recording." } } -} \ No newline at end of file +} diff --git a/web/src/types/frigateConfig.ts b/web/src/types/frigateConfig.ts index 94c9ba6e9..7c69ef808 100644 --- a/web/src/types/frigateConfig.ts +++ b/web/src/types/frigateConfig.ts @@ -197,7 +197,6 @@ export interface CameraConfig { days: number; mode: string; }; - sync_recordings: boolean; }; review: { alerts: { @@ -542,7 +541,6 @@ export interface FrigateConfig { days: number; mode: string; }; - sync_recordings: boolean; }; rtmp: { From b66e69efc9036104724e723fa748a4d2e4350a78 Mon Sep 17 00:00:00 2001 From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> Date: Tue, 6 Jan 2026 09:20:19 -0600 Subject: [PATCH 16/56] Media sync API refactor and UI (#21542) * generic job infrastructure * types and dispatcher changes for jobs * save data in memory only for completed jobs * implement media sync job and endpoints * change logs to debug * websocket hook and types * frontend * i18n * docs tweaks * endpoint descriptions * tweak docs --- docs/docs/configuration/record.md | 24 +- docs/static/frigate-api.yaml | 53 +++ frigate/api/app.py | 125 +++-- frigate/comms/dispatcher.py | 17 + frigate/const.py | 1 + frigate/jobs/__init__.py | 0 frigate/jobs/job.py | 21 + frigate/jobs/manager.py | 70 +++ frigate/jobs/media_sync.py | 135 ++++++ frigate/types.py | 9 + web/public/locales/en/views/settings.json | 48 ++ web/src/api/ws.tsx | 38 ++ web/src/pages/Settings.tsx | 6 + web/src/types/ws.ts | 29 ++ .../settings/MaintenanceSettingsView.tsx | 442 ++++++++++++++++++ 15 files changed, 951 insertions(+), 67 deletions(-) create mode 100644 frigate/jobs/__init__.py create mode 100644 frigate/jobs/job.py create mode 100644 frigate/jobs/manager.py create mode 100644 frigate/jobs/media_sync.py create mode 100644 web/src/views/settings/MaintenanceSettingsView.tsx diff --git a/docs/docs/configuration/record.md b/docs/docs/configuration/record.md index f25eac863..eb5d736e4 100644 --- a/docs/docs/configuration/record.md +++ b/docs/docs/configuration/record.md @@ -158,29 +158,9 @@ Apple devices running the Safari browser may fail to playback h.265 recordings. Media files (event snapshots, event thumbnails, review thumbnails, previews, exports, and recordings) can become orphaned when database entries are deleted but the corresponding files remain on disk. -This feature checks the file system for media files and removes any that are not referenced in the database. +Normal operation may leave small numbers of orphaned files until Frigate's scheduled cleanup, but crashes, configuration changes, or upgrades may cause more orphaned files that Frigate does not clean up. This feature checks the file system for media files and removes any that are not referenced in the database. -The API endpoint `POST /api/media/sync` can be used to trigger a media sync. The endpoint accepts a JSON request body to control the operation. - -Request body schema (JSON): - -```json -{ - "dry_run": true, - "media_types": ["all"], - "force": false -} -``` - -- `dry_run` (boolean): If `true` (default) the service will only report orphaned files without deleting them. Set to `false` to allow deletions. -- `media_types` (array of strings): Which media types to sync. Use `"all"` to sync everything, or a list of one or more of: - - `event_snapshots` - - `event_thumbnails` - - `review_thumbnails` - - `previews` - - `exports` - - `recordings` -- `force` (boolean): If `true` the safety threshold is bypassed and deletions proceed even if the operation would remove a large proportion of files. Use with extreme caution. +The Maintenance pane in the Frigate UI or an API endpoint `POST /api/media/sync` can be used to trigger a media sync. When using the API, a job ID is returned and the operation continues on the server. Status can be checked with the `/api/media/sync/status/{job_id}` endpoint. :::warning diff --git a/docs/static/frigate-api.yaml b/docs/static/frigate-api.yaml index f1a00fe61..36b346422 100644 --- a/docs/static/frigate-api.yaml +++ b/docs/static/frigate-api.yaml @@ -331,6 +331,59 @@ paths: application/json: schema: $ref: "#/components/schemas/HTTPValidationError" + /media/sync: + post: + tags: + - App + summary: Start media sync job + description: |- + Start an asynchronous media sync job to find and (optionally) remove orphaned media files. + Returns 202 with job details when queued, or 409 if a job is already running. + operationId: sync_media_media_sync_post + requestBody: + required: true + content: + application/json: + responses: + "202": + description: Accepted - Job queued + "409": + description: Conflict - Job already running + "422": + description: Validation Error + + /media/sync/current: + get: + tags: + - App + summary: Get current media sync job + description: |- + Retrieve the current running media sync job, if any. Returns the job details or null when no job is active. + operationId: get_media_sync_current_media_sync_current_get + responses: + "200": + description: Successful Response + "422": + description: Validation Error + + /media/sync/status/{job_id}: + get: + tags: + - App + summary: Get media sync job status + description: |- + Get status and results for the specified media sync job id. Returns 200 with job details including results, or 404 if the job is not found. + operationId: get_media_sync_status_media_sync_status__job_id__get + parameters: + - name: job_id + in: path + responses: + "200": + description: Successful Response + "404": + description: Not Found - Job not found + "422": + description: Validation Error /faces/train/{name}/classify: post: tags: diff --git a/frigate/api/app.py b/frigate/api/app.py index 7e97a0570..126c613a7 100644 --- a/frigate/api/app.py +++ b/frigate/api/app.py @@ -38,8 +38,14 @@ from frigate.config.camera.updater import ( CameraConfigUpdateTopic, ) from frigate.ffmpeg_presets import FFMPEG_HWACCEL_VAAPI, _gpu_selector +from frigate.jobs.media_sync import ( + get_current_media_sync_job, + get_media_sync_job_by_id, + start_media_sync_job, +) from frigate.models import Event, Timeline from frigate.stats.prometheus import get_metrics, update_metrics +from frigate.types import JobStatusTypesEnum from frigate.util.builtin import ( clean_camera_user_pass, flatten_config_data, @@ -47,7 +53,6 @@ from frigate.util.builtin import ( update_yaml_file_bulk, ) from frigate.util.config import find_config_file -from frigate.util.media import sync_all_media from frigate.util.services import ( get_nvidia_driver_info, process_logs, @@ -608,12 +613,19 @@ def restart(): ) -@router.post("/media/sync", dependencies=[Depends(require_role(["admin"]))]) +@router.post( + "/media/sync", + dependencies=[Depends(require_role(["admin"]))], + summary="Start media sync job", + description="""Start an asynchronous media sync job to find and (optionally) remove orphaned media files. + Returns 202 with job details when queued, or 409 if a job is already running.""", +) def sync_media(body: MediaSyncBody = Body(...)): - """Sync media files with database - remove orphaned files. + """Start async media sync job - remove orphaned files. Syncs specified media types: event snapshots, event thumbnails, review thumbnails, - previews, exports, and/or recordings. + previews, exports, and/or recordings. Job runs in background; use /media/sync/current + or /media/sync/status/{job_id} to check status. Args: body: MediaSyncBody with dry_run flag and media_types list. @@ -621,54 +633,77 @@ def sync_media(body: MediaSyncBody = Body(...)): 'review_thumbnails', 'previews', 'exports', 'recordings' Returns: - JSON response with sync results for each requested media type. + 202 Accepted with job_id, or 409 Conflict if job already running. """ - try: - results = sync_all_media( - dry_run=body.dry_run, media_types=body.media_types, force=body.force - ) + job_id = start_media_sync_job( + dry_run=body.dry_run, media_types=body.media_types, force=body.force + ) - # Check if any operations were aborted or had errors - has_errors = False - for result_name in [ - "event_snapshots", - "event_thumbnails", - "review_thumbnails", - "previews", - "exports", - "recordings", - ]: - result = getattr(results, result_name, None) - if result and (result.aborted or result.error): - has_errors = True - break - - content = { - "success": not has_errors, - "dry_run": body.dry_run, - "media_types": body.media_types, - "results": results.to_dict(), - } - - if has_errors: - content["message"] = ( - "Some sync operations were aborted or had errors; check logs for details." - ) - - return JSONResponse( - content=content, - status_code=200, - ) - except Exception as e: - logger.error(f"Error syncing media files: {e}") + if job_id is None: + # A job is already running + current = get_current_media_sync_job() return JSONResponse( content={ - "success": False, - "message": f"Error syncing media files: {str(e)}", + "error": "A media sync job is already running", + "current_job_id": current.id if current else None, }, - status_code=500, + status_code=409, ) + return JSONResponse( + content={ + "job": { + "job_type": "media_sync", + "status": JobStatusTypesEnum.queued, + "id": job_id, + } + }, + status_code=202, + ) + + +@router.get( + "/media/sync/current", + dependencies=[Depends(require_role(["admin"]))], + summary="Get current media sync job", + description="""Retrieve the current running media sync job, if any. Returns the job details + or null when no job is active.""", +) +def get_media_sync_current(): + """Get the current running media sync job, if any.""" + job = get_current_media_sync_job() + + if job is None: + return JSONResponse(content={"job": None}, status_code=200) + + return JSONResponse( + content={"job": job.to_dict()}, + status_code=200, + ) + + +@router.get( + "/media/sync/status/{job_id}", + dependencies=[Depends(require_role(["admin"]))], + summary="Get media sync job status", + description="""Get status and results for the specified media sync job id. Returns 200 with + job details including results, or 404 if the job is not found.""", +) +def get_media_sync_status(job_id: str): + """Get the status of a specific media sync job.""" + job = get_media_sync_job_by_id(job_id) + + if job is None: + return JSONResponse( + content={"error": "Job not found"}, + status_code=404, + ) + + return JSONResponse( + content={"job": job.to_dict()}, + status_code=200, + ) + @router.get("/labels", dependencies=[Depends(allow_any_authenticated())]) def get_labels(camera: str = ""): diff --git a/frigate/comms/dispatcher.py b/frigate/comms/dispatcher.py index 6e45ac175..68749b102 100644 --- a/frigate/comms/dispatcher.py +++ b/frigate/comms/dispatcher.py @@ -28,6 +28,7 @@ from frigate.const import ( UPDATE_CAMERA_ACTIVITY, UPDATE_EMBEDDINGS_REINDEX_PROGRESS, UPDATE_EVENT_DESCRIPTION, + UPDATE_JOB_STATE, UPDATE_MODEL_STATE, UPDATE_REVIEW_DESCRIPTION, UPSERT_REVIEW_SEGMENT, @@ -60,6 +61,7 @@ class Dispatcher: self.camera_activity = CameraActivityManager(config, self.publish) self.audio_activity = AudioActivityManager(config, self.publish) self.model_state: dict[str, ModelStatusTypesEnum] = {} + self.job_state: dict[str, dict[str, Any]] = {} # {job_type: job_data} self.embeddings_reindex: dict[str, Any] = {} self.birdseye_layout: dict[str, Any] = {} self.audio_transcription_state: str = "idle" @@ -180,6 +182,19 @@ class Dispatcher: def handle_model_state() -> None: self.publish("model_state", json.dumps(self.model_state.copy())) + def handle_update_job_state() -> None: + if payload and isinstance(payload, dict): + job_type = payload.get("job_type") + if job_type: + self.job_state[job_type] = payload + self.publish( + "job_state", + json.dumps(self.job_state), + ) + + def handle_job_state() -> None: + self.publish("job_state", json.dumps(self.job_state.copy())) + def handle_update_audio_transcription_state() -> None: if payload: self.audio_transcription_state = payload @@ -277,6 +292,7 @@ class Dispatcher: UPDATE_EVENT_DESCRIPTION: handle_update_event_description, UPDATE_REVIEW_DESCRIPTION: handle_update_review_description, UPDATE_MODEL_STATE: handle_update_model_state, + UPDATE_JOB_STATE: handle_update_job_state, UPDATE_EMBEDDINGS_REINDEX_PROGRESS: handle_update_embeddings_reindex_progress, UPDATE_BIRDSEYE_LAYOUT: handle_update_birdseye_layout, UPDATE_AUDIO_TRANSCRIPTION_STATE: handle_update_audio_transcription_state, @@ -284,6 +300,7 @@ class Dispatcher: "restart": handle_restart, "embeddingsReindexProgress": handle_embeddings_reindex_progress, "modelState": handle_model_state, + "jobState": handle_job_state, "audioTranscriptionState": handle_audio_transcription_state, "birdseyeLayout": handle_birdseye_layout, "onConnect": handle_on_connect, diff --git a/frigate/const.py b/frigate/const.py index 41c24f087..7229785a7 100644 --- a/frigate/const.py +++ b/frigate/const.py @@ -122,6 +122,7 @@ UPDATE_REVIEW_DESCRIPTION = "update_review_description" UPDATE_MODEL_STATE = "update_model_state" UPDATE_EMBEDDINGS_REINDEX_PROGRESS = "handle_embeddings_reindex_progress" UPDATE_BIRDSEYE_LAYOUT = "update_birdseye_layout" +UPDATE_JOB_STATE = "update_job_state" NOTIFICATION_TEST = "notification_test" # IO Nice Values diff --git a/frigate/jobs/__init__.py b/frigate/jobs/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/frigate/jobs/job.py b/frigate/jobs/job.py new file mode 100644 index 000000000..a445eebf5 --- /dev/null +++ b/frigate/jobs/job.py @@ -0,0 +1,21 @@ +"""Generic base class for long-running background jobs.""" + +from dataclasses import asdict, dataclass, field +from typing import Any, Optional + + +@dataclass +class Job: + """Base class for long-running background jobs.""" + + id: str = field(default_factory=lambda: __import__("uuid").uuid4().__str__()[:12]) + job_type: str = "" # Must be set by subclasses + status: str = "queued" # queued, running, success, failed, cancelled + results: Optional[dict[str, Any]] = None + start_time: Optional[float] = None + end_time: Optional[float] = None + error_message: Optional[str] = None + + def to_dict(self) -> dict[str, Any]: + """Convert to dictionary for WebSocket transmission.""" + return asdict(self) diff --git a/frigate/jobs/manager.py b/frigate/jobs/manager.py new file mode 100644 index 000000000..8aa77b3c7 --- /dev/null +++ b/frigate/jobs/manager.py @@ -0,0 +1,70 @@ +"""Generic job management for long-running background tasks.""" + +import threading +from typing import Optional + +from frigate.jobs.job import Job +from frigate.types import JobStatusTypesEnum + +# Global state and locks for enforcing single concurrent job per job type +_job_locks: dict[str, threading.Lock] = {} +_current_jobs: dict[str, Optional[Job]] = {} +# Keep completed jobs for retrieval, keyed by (job_type, job_id) +_completed_jobs: dict[tuple[str, str], Job] = {} + + +def _get_lock(job_type: str) -> threading.Lock: + """Get or create a lock for the specified job type.""" + if job_type not in _job_locks: + _job_locks[job_type] = threading.Lock() + return _job_locks[job_type] + + +def set_current_job(job: Job) -> None: + """Set the current job for a given job type.""" + lock = _get_lock(job.job_type) + with lock: + # Store the previous job if it was completed + old_job = _current_jobs.get(job.job_type) + if old_job and old_job.status in ( + JobStatusTypesEnum.success, + JobStatusTypesEnum.failed, + JobStatusTypesEnum.cancelled, + ): + _completed_jobs[(job.job_type, old_job.id)] = old_job + _current_jobs[job.job_type] = job + + +def clear_current_job(job_type: str, job_id: Optional[str] = None) -> None: + """Clear the current job for a given job type, optionally checking the ID.""" + lock = _get_lock(job_type) + with lock: + if job_type in _current_jobs: + current = _current_jobs[job_type] + if current is None or (job_id is None or current.id == job_id): + _current_jobs[job_type] = None + + +def get_current_job(job_type: str) -> Optional[Job]: + """Get the current running/queued job for a given job type, if any.""" + lock = _get_lock(job_type) + with lock: + return _current_jobs.get(job_type) + + +def get_job_by_id(job_type: str, job_id: str) -> Optional[Job]: + """Get job by ID. Checks current job first, then completed jobs.""" + lock = _get_lock(job_type) + with lock: + # Check if it's the current job + current = _current_jobs.get(job_type) + if current and current.id == job_id: + return current + # Check if it's a completed job + return _completed_jobs.get((job_type, job_id)) + + +def job_is_running(job_type: str) -> bool: + """Check if a job of the given type is currently running or queued.""" + job = get_current_job(job_type) + return job is not None and job.status in ("queued", "running") diff --git a/frigate/jobs/media_sync.py b/frigate/jobs/media_sync.py new file mode 100644 index 000000000..7c15435fd --- /dev/null +++ b/frigate/jobs/media_sync.py @@ -0,0 +1,135 @@ +"""Media sync job management with background execution.""" + +import logging +import threading +from dataclasses import dataclass, field +from datetime import datetime +from typing import Optional + +from frigate.comms.inter_process import InterProcessRequestor +from frigate.const import UPDATE_JOB_STATE +from frigate.jobs.job import Job +from frigate.jobs.manager import ( + get_current_job, + get_job_by_id, + job_is_running, + set_current_job, +) +from frigate.types import JobStatusTypesEnum +from frigate.util.media import sync_all_media + +logger = logging.getLogger(__name__) + + +@dataclass +class MediaSyncJob(Job): + """In-memory job state for media sync operations.""" + + job_type: str = "media_sync" + dry_run: bool = False + media_types: list[str] = field(default_factory=lambda: ["all"]) + force: bool = False + + +class MediaSyncRunner(threading.Thread): + """Thread-based runner for media sync jobs.""" + + def __init__(self, job: MediaSyncJob) -> None: + super().__init__(daemon=True, name="media_sync") + self.job = job + self.requestor = InterProcessRequestor() + + def run(self) -> None: + """Execute the media sync job and broadcast status updates.""" + try: + # Update job status to running + self.job.status = JobStatusTypesEnum.running + self.job.start_time = datetime.now().timestamp() + self._broadcast_status() + + # Execute sync with provided parameters + logger.debug( + f"Starting media sync job {self.job.id}: " + f"media_types={self.job.media_types}, " + f"dry_run={self.job.dry_run}, " + f"force={self.job.force}" + ) + + results = sync_all_media( + dry_run=self.job.dry_run, + media_types=self.job.media_types, + force=self.job.force, + ) + + # Store results and mark as complete + self.job.results = results.to_dict() + self.job.status = JobStatusTypesEnum.success + self.job.end_time = datetime.now().timestamp() + + logger.debug(f"Media sync job {self.job.id} completed successfully") + self._broadcast_status() + + except Exception as e: + logger.error(f"Media sync job {self.job.id} failed: {e}", exc_info=True) + self.job.status = JobStatusTypesEnum.failed + self.job.error_message = str(e) + self.job.end_time = datetime.now().timestamp() + self._broadcast_status() + + finally: + if self.requestor: + self.requestor.stop() + + def _broadcast_status(self) -> None: + """Broadcast job status update via IPC to all WebSocket subscribers.""" + try: + self.requestor.send_data( + UPDATE_JOB_STATE, + self.job.to_dict(), + ) + except Exception as e: + logger.warning(f"Failed to broadcast media sync status: {e}") + + +def start_media_sync_job( + dry_run: bool = False, + media_types: Optional[list[str]] = None, + force: bool = False, +) -> Optional[str]: + """Start a new media sync job if none is currently running. + + Returns job ID on success, None if job already running. + """ + # Check if a job is already running + if job_is_running("media_sync"): + current = get_current_job("media_sync") + logger.warning( + f"Media sync job {current.id} is already running. Rejecting new request." + ) + return None + + # Create and start new job + job = MediaSyncJob( + dry_run=dry_run, + media_types=media_types or ["all"], + force=force, + ) + + logger.debug(f"Creating new media sync job: {job.id}") + set_current_job(job) + + # Start the background runner + runner = MediaSyncRunner(job) + runner.start() + + return job.id + + +def get_current_media_sync_job() -> Optional[MediaSyncJob]: + """Get the current running/queued media sync job, if any.""" + return get_current_job("media_sync") + + +def get_media_sync_job_by_id(job_id: str) -> Optional[MediaSyncJob]: + """Get media sync job by ID. Currently only tracks the current job.""" + return get_job_by_id("media_sync", job_id) diff --git a/frigate/types.py b/frigate/types.py index 6c5135616..77bb50845 100644 --- a/frigate/types.py +++ b/frigate/types.py @@ -26,6 +26,15 @@ class ModelStatusTypesEnum(str, Enum): failed = "failed" +class JobStatusTypesEnum(str, Enum): + pending = "pending" + queued = "queued" + running = "running" + success = "success" + failed = "failed" + cancelled = "cancelled" + + class TrackedObjectUpdateTypesEnum(str, Enum): description = "description" face = "face" diff --git a/web/public/locales/en/views/settings.json b/web/public/locales/en/views/settings.json index ea2869986..a84c15619 100644 --- a/web/public/locales/en/views/settings.json +++ b/web/public/locales/en/views/settings.json @@ -1067,5 +1067,53 @@ "deleteTriggerFailed": "Failed to delete trigger: {{errorMessage}}" } } + }, + "maintenance": { + "title": "Maintenance", + "sync": { + "title": "Media Sync", + "desc": "Frigate will periodically clean up media on a regular schedule according to your retention configuration. It is normal to see a few orphaned files as Frigate runs. Use this feature to remove orphaned media files from disk that are no longer referenced in the database.", + "started": "Media sync started.", + "alreadyRunning": "A sync job is already running", + "error": "Failed to start sync", + "currentStatus": "Status", + "jobId": "Job ID", + "startTime": "Start Time", + "endTime": "End Time", + "statusLabel": "Status", + "results": "Results", + "errorLabel": "Error", + "mediaTypes": "Media Types", + "allMedia": "All Media", + "dryRun": "Dry Run", + "dryRunEnabled": "No files will be deleted", + "dryRunDisabled": "Files will be deleted", + "force": "Force", + "forceDesc": "Bypass safety threshold and complete sync even if more than 50% of the files would be deleted.", + "running": "Sync Running...", + "start": "Start Sync", + "inProgress": "Sync is in progress. This page is disabled.", + "status": { + "queued": "Queued", + "running": "Running", + "completed": "Completed", + "failed": "Failed", + "notRunning": "Not Running" + }, + "resultsFields": { + "filesChecked": "Files Checked", + "orphansFound": "Orphans Found", + "orphansDeleted": "Orphans Deleted", + "aborted": "Aborted. Deletion would exceed safety threshold.", + "error": "Error", + "totals": "Totals" + }, + "event_snapshots": "Tracked Object Snapshots", + "event_thumbnails": "Tracked Object Thumbnails", + "review_thumbnails": "Review Thumbnails", + "previews": "Previews", + "exports": "Exports", + "recordings": "Recordings" + } } } diff --git a/web/src/api/ws.tsx b/web/src/api/ws.tsx index 44d45ea2f..6bb2fdc32 100644 --- a/web/src/api/ws.tsx +++ b/web/src/api/ws.tsx @@ -11,6 +11,7 @@ import { TrackedObjectUpdateReturnType, TriggerStatus, FrigateAudioDetections, + Job, } from "@/types/ws"; import { FrigateStats } from "@/types/stats"; import { createContainer } from "react-tracked"; @@ -651,3 +652,40 @@ export function useTriggers(): { payload: TriggerStatus } { : { name: "", camera: "", event_id: "", type: "", score: 0 }; return { payload: useDeepMemo(parsed) }; } + +export function useJobStatus( + jobType: string, + revalidateOnFocus: boolean = true, +): { payload: Job | null } { + const { + value: { payload }, + send: sendCommand, + } = useWs("job_state", "jobState"); + + const jobData = useDeepMemo( + payload && typeof payload === "string" ? JSON.parse(payload) : {}, + ); + const currentJob = jobData[jobType] || null; + + useEffect(() => { + let listener: (() => void) | undefined; + if (revalidateOnFocus) { + sendCommand("jobState"); + listener = () => { + if (document.visibilityState === "visible") { + sendCommand("jobState"); + } + }; + addEventListener("visibilitychange", listener); + } + + return () => { + if (listener) { + removeEventListener("visibilitychange", listener); + } + }; + // eslint-disable-next-line react-hooks/exhaustive-deps + }, [revalidateOnFocus]); + + return { payload: currentJob as Job | null }; +} diff --git a/web/src/pages/Settings.tsx b/web/src/pages/Settings.tsx index 1d44125cb..50b72ab80 100644 --- a/web/src/pages/Settings.tsx +++ b/web/src/pages/Settings.tsx @@ -36,6 +36,7 @@ import NotificationView from "@/views/settings/NotificationsSettingsView"; import EnrichmentsSettingsView from "@/views/settings/EnrichmentsSettingsView"; import UiSettingsView from "@/views/settings/UiSettingsView"; import FrigatePlusSettingsView from "@/views/settings/FrigatePlusSettingsView"; +import MaintenanceSettingsView from "@/views/settings/MaintenanceSettingsView"; import { useSearchEffect } from "@/hooks/use-overlay-state"; import { useNavigate, useSearchParams } from "react-router-dom"; import { useInitialCameraState } from "@/api/ws"; @@ -81,6 +82,7 @@ const allSettingsViews = [ "roles", "notifications", "frigateplus", + "maintenance", ] as const; type SettingsType = (typeof allSettingsViews)[number]; @@ -120,6 +122,10 @@ const settingsGroups = [ label: "frigateplus", items: [{ key: "frigateplus", component: FrigatePlusSettingsView }], }, + { + label: "maintenance", + items: [{ key: "maintenance", component: MaintenanceSettingsView }], + }, ]; const CAMERA_SELECT_BUTTON_PAGES = [ diff --git a/web/src/types/ws.ts b/web/src/types/ws.ts index 1d98b7b01..6e22345eb 100644 --- a/web/src/types/ws.ts +++ b/web/src/types/ws.ts @@ -126,3 +126,32 @@ export type TriggerStatus = { type: string; score: number; }; + +export type MediaSyncStats = { + files_checked: number; + orphans_found: number; + orphans_deleted: number; + aborted: boolean; + error: string | null; +}; + +export type MediaSyncTotals = { + files_checked: number; + orphans_found: number; + orphans_deleted: number; +}; + +export type MediaSyncResults = { + [mediaType: string]: MediaSyncStats | MediaSyncTotals; + totals: MediaSyncTotals; +}; + +export type Job = { + id: string; + job_type: string; + status: string; + results?: MediaSyncResults; + start_time?: number; + end_time?: number; + error_message?: string; +}; diff --git a/web/src/views/settings/MaintenanceSettingsView.tsx b/web/src/views/settings/MaintenanceSettingsView.tsx new file mode 100644 index 000000000..f2d1bad30 --- /dev/null +++ b/web/src/views/settings/MaintenanceSettingsView.tsx @@ -0,0 +1,442 @@ +import Heading from "@/components/ui/heading"; +import { Button } from "@/components/ui/button"; +import { Label } from "@/components/ui/label"; +import { Separator } from "@/components/ui/separator"; +import { Toaster } from "@/components/ui/sonner"; +import ActivityIndicator from "@/components/indicators/activity-indicator"; +import { useCallback, useState } from "react"; +import { useTranslation } from "react-i18next"; +import axios from "axios"; +import { toast } from "sonner"; +import { useJobStatus } from "@/api/ws"; +import { Switch } from "@/components/ui/switch"; +import { LuCheck, LuX } from "react-icons/lu"; +import { cn } from "@/lib/utils"; +import { formatUnixTimestampToDateTime } from "@/utils/dateUtil"; +import { MediaSyncStats } from "@/types/ws"; + +export default function MaintenanceSettingsView() { + const { t } = useTranslation("views/settings"); + const [selectedMediaTypes, setSelectedMediaTypes] = useState([ + "all", + ]); + const [dryRun, setDryRun] = useState(true); + const [force, setForce] = useState(false); + const [isSubmitting, setIsSubmitting] = useState(false); + + const MEDIA_TYPES = [ + { id: "event_snapshots", label: t("maintenance.sync.event_snapshots") }, + { id: "event_thumbnails", label: t("maintenance.sync.event_thumbnails") }, + { id: "review_thumbnails", label: t("maintenance.sync.review_thumbnails") }, + { id: "previews", label: t("maintenance.sync.previews") }, + { id: "exports", label: t("maintenance.sync.exports") }, + { id: "recordings", label: t("maintenance.sync.recordings") }, + ]; + + // Subscribe to media sync status via WebSocket + const { payload: currentJob } = useJobStatus("media_sync"); + + const isJobRunning = Boolean( + currentJob && + (currentJob.status === "queued" || currentJob.status === "running"), + ); + + const handleMediaTypeChange = useCallback((id: string, checked: boolean) => { + setSelectedMediaTypes((prev) => { + if (id === "all") { + return checked ? ["all"] : []; + } + + let next = prev.filter((t) => t !== "all"); + if (checked) { + next.push(id); + } else { + next = next.filter((t) => t !== id); + } + return next.length === 0 ? ["all"] : next; + }); + }, []); + + const handleStartSync = useCallback(async () => { + setIsSubmitting(true); + + try { + const response = await axios.post( + "/media/sync", + { + dry_run: dryRun, + media_types: selectedMediaTypes, + force: force, + }, + { + headers: { + "Content-Type": "application/json", + }, + }, + ); + + if (response.status === 202) { + toast.success(t("maintenance.sync.started"), { + position: "top-center", + closeButton: true, + }); + } else if (response.status === 409) { + toast.error(t("maintenance.sync.alreadyRunning"), { + position: "top-center", + closeButton: true, + }); + } + } catch { + toast.error(t("maintenance.sync.error"), { + position: "top-center", + closeButton: true, + }); + } finally { + setIsSubmitting(false); + } + }, [selectedMediaTypes, dryRun, force, t]); + + return ( + <> +
+ +
+
+
+ + {t("maintenance.sync.title")} + + +
+
+

{t("maintenance.sync.desc")}

+
+
+ +
+ {/* Media Types Selection */} +
+ +
+
+ + + handleMediaTypeChange("all", checked) + } + disabled={isJobRunning} + /> +
+
+ {MEDIA_TYPES.map((type) => ( +
+ + + handleMediaTypeChange(type.id, checked) + } + disabled={ + isJobRunning || selectedMediaTypes.includes("all") + } + /> +
+ ))} +
+
+
+ + {/* Options */} +
+
+
+ +
+ +

+ {dryRun + ? t("maintenance.sync.dryRunEnabled") + : t("maintenance.sync.dryRunDisabled")} +

+
+
+
+ +
+
+ +
+ +

+ {t("maintenance.sync.forceDesc")} +

+
+
+
+
+ + {/* Action Buttons */} +
+ +
+
+
+ +
+
+ +
+ + {t("maintenance.sync.currentStatus")} + +
+ {currentJob?.status === "success" && ( + + )} + {currentJob?.status === "failed" && ( + + )} + {(currentJob?.status === "running" || + currentJob?.status === "queued") && ( + + )} + {t( + `maintenance.sync.status.${currentJob?.status ?? "notRunning"}`, + )} +
+
+ + {/* Current Job Status */} +
+ {currentJob?.start_time && ( +
+ + {t("maintenance.sync.startTime")}: + + + {formatUnixTimestampToDateTime( + currentJob?.start_time ?? "-", + )} + +
+ )} + {currentJob?.end_time && ( +
+ + {t("maintenance.sync.endTime")}: + + + {formatUnixTimestampToDateTime(currentJob?.end_time)} + +
+ )} + {currentJob?.results && ( +
+

+ {t("maintenance.sync.results")} +

+
+ {/* Individual media type results */} +
+ {Object.entries(currentJob.results) + .filter(([key]) => key !== "totals") + .map(([mediaType, stats]) => { + const mediaStats = stats as MediaSyncStats; + return ( +
+

+ {t(`maintenance.sync.${mediaType}`)} +

+
+
+ + {t( + "maintenance.sync.resultsFields.filesChecked", + )} + + {mediaStats.files_checked} +
+
+ + {t( + "maintenance.sync.resultsFields.orphansFound", + )} + + 0 + ? "text-yellow-500" + : "" + } + > + {mediaStats.orphans_found} + +
+
+ + {t( + "maintenance.sync.resultsFields.orphansDeleted", + )} + + 0 && + "text-success", + mediaStats.orphans_deleted === 0 && + mediaStats.aborted && + "text-destructive", + )} + > + {mediaStats.orphans_deleted} + +
+ {mediaStats.aborted && ( +
+ + + {t( + "maintenance.sync.resultsFields.aborted", + )} +
+ )} + {mediaStats.error && ( +
+ {t( + "maintenance.sync.resultsFields.error", + )} + {": "} + {mediaStats.error} +
+ )} +
+
+ ); + })} +
+ {/* Totals */} + {currentJob.results.totals && ( +
+

+ {t("maintenance.sync.resultsFields.totals")} +

+
+
+ + {t( + "maintenance.sync.resultsFields.filesChecked", + )} + + + {currentJob.results.totals.files_checked} + +
+
+ + {t( + "maintenance.sync.resultsFields.orphansFound", + )} + + 0 + ? "font-medium text-yellow-500" + : "font-medium" + } + > + {currentJob.results.totals.orphans_found} + +
+
+ + {t( + "maintenance.sync.resultsFields.orphansDeleted", + )} + + + 0 + ? "text-success" + : "text-muted-foreground", + )} + > + {currentJob.results.totals.orphans_deleted} + +
+
+
+ )} +
+
+ )} + {currentJob?.error_message && ( +
+

+ {t("maintenance.sync.errorLabel")} +

+

{currentJob?.error_message}

+
+ )} +
+
+
+
+
+
+ + ); +} From 88348bf53521b849cdef6208eba664651c1c835f Mon Sep 17 00:00:00 2001 From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> Date: Mon, 12 Jan 2026 18:20:27 -0600 Subject: [PATCH 17/56] use same logging pattern in sync_recordings as the other sync functions (#21625) --- frigate/util/media.py | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/frigate/util/media.py b/frigate/util/media.py index a31b93e91..130fec79d 100644 --- a/frigate/util/media.py +++ b/frigate/util/media.py @@ -198,16 +198,20 @@ def sync_recordings( result.aborted = True return result - if files_to_delete and not dry_run: + if dry_run: logger.info( - f"Deleting {len(files_to_delete)} recordings files with missing DB entries" + f"Recordings sync (dry run): Found {len(files_to_delete)} orphaned files" ) - for file in files_to_delete: - try: - os.unlink(file) - result.orphans_deleted += 1 - except OSError as e: - logger.error(f"Failed to delete {file}: {e}") + return result + + # Delete orphans + logger.info(f"Deleting {len(files_to_delete)} orphaned recordings files") + for file in files_to_delete: + try: + os.unlink(file) + result.orphans_deleted += 1 + except OSError as e: + logger.error(f"Failed to delete {file}: {e}") logger.debug("End sync recordings.") From 56c7a13fbeb755f482dfd426765562423fb78d31 Mon Sep 17 00:00:00 2001 From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> Date: Mon, 12 Jan 2026 19:25:07 -0600 Subject: [PATCH 18/56] Fix incorrect counting in sync_recordings (#21626) --- frigate/util/media.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/frigate/util/media.py b/frigate/util/media.py index 130fec79d..24b690e31 100644 --- a/frigate/util/media.py +++ b/frigate/util/media.py @@ -98,7 +98,6 @@ def sync_recordings( {"id": recording.id, "path": recording.path} ) - result.files_checked += recordings_count result.orphans_found += len(recordings_to_delete) result.orphan_paths.extend( [ @@ -173,7 +172,7 @@ def sync_recordings( for file in files } - result.files_checked += len(files_on_disk) + result.files_checked = len(files_on_disk) files_to_delete: list[str] = [] for file in files_on_disk: From 542295dcb3f56f78481bafef4cdf3dde55eebf24 Mon Sep 17 00:00:00 2001 From: Eugeny Tulupov Date: Wed, 14 Jan 2026 22:15:45 +0700 Subject: [PATCH 19/56] Update go2rtc to v1.9.13 (#21648) Co-authored-by: Eugeny Tulupov --- docker/main/Dockerfile | 2 +- docs/docs/configuration/advanced.md | 2 +- docs/docs/configuration/camera_specific.md | 2 +- docs/docs/configuration/reference.md | 2 +- docs/docs/configuration/restream.md | 4 ++-- docs/docs/guides/configuring_go2rtc.md | 6 +++--- docs/sidebars.ts | 2 +- 7 files changed, 10 insertions(+), 10 deletions(-) diff --git a/docker/main/Dockerfile b/docker/main/Dockerfile index 055a1458f..b14320033 100644 --- a/docker/main/Dockerfile +++ b/docker/main/Dockerfile @@ -55,7 +55,7 @@ RUN --mount=type=tmpfs,target=/tmp --mount=type=tmpfs,target=/var/cache/apt \ FROM scratch AS go2rtc ARG TARGETARCH WORKDIR /rootfs/usr/local/go2rtc/bin -ADD --link --chmod=755 "https://github.com/AlexxIT/go2rtc/releases/download/v1.9.10/go2rtc_linux_${TARGETARCH}" go2rtc +ADD --link --chmod=755 "https://github.com/AlexxIT/go2rtc/releases/download/v1.9.13/go2rtc_linux_${TARGETARCH}" go2rtc FROM wget AS tempio ARG TARGETARCH diff --git a/docs/docs/configuration/advanced.md b/docs/docs/configuration/advanced.md index 17eb2053d..8cd368144 100644 --- a/docs/docs/configuration/advanced.md +++ b/docs/docs/configuration/advanced.md @@ -234,7 +234,7 @@ To do this: ### Custom go2rtc version -Frigate currently includes go2rtc v1.9.10, there may be certain cases where you want to run a different version of go2rtc. +Frigate currently includes go2rtc v1.9.13, there may be certain cases where you want to run a different version of go2rtc. To do this: diff --git a/docs/docs/configuration/camera_specific.md b/docs/docs/configuration/camera_specific.md index 50d5c52aa..aae8c57b4 100644 --- a/docs/docs/configuration/camera_specific.md +++ b/docs/docs/configuration/camera_specific.md @@ -244,7 +244,7 @@ go2rtc: - rtspx://192.168.1.1:7441/abcdefghijk ``` -[See the go2rtc docs for more information](https://github.com/AlexxIT/go2rtc/tree/v1.9.10#source-rtsp) +[See the go2rtc docs for more information](https://github.com/AlexxIT/go2rtc/tree/v1.9.13#source-rtsp) In the Unifi 2.0 update Unifi Protect Cameras had a change in audio sample rate which causes issues for ffmpeg. The input rate needs to be set for record if used directly with unifi protect. diff --git a/docs/docs/configuration/reference.md b/docs/docs/configuration/reference.md index b287a7e9b..1577d7b01 100644 --- a/docs/docs/configuration/reference.md +++ b/docs/docs/configuration/reference.md @@ -752,7 +752,7 @@ classification: interval: None # Optional: Restream configuration -# Uses https://github.com/AlexxIT/go2rtc (v1.9.10) +# Uses https://github.com/AlexxIT/go2rtc (v1.9.13) # NOTE: The default go2rtc API port (1984) must be used, # changing this port for the integrated go2rtc instance is not supported. go2rtc: diff --git a/docs/docs/configuration/restream.md b/docs/docs/configuration/restream.md index ebd506294..a3c11f2d0 100644 --- a/docs/docs/configuration/restream.md +++ b/docs/docs/configuration/restream.md @@ -7,7 +7,7 @@ title: Restream Frigate can restream your video feed as an RTSP feed for other applications such as Home Assistant to utilize it at `rtsp://:8554/`. Port 8554 must be open. [This allows you to use a video feed for detection in Frigate and Home Assistant live view at the same time without having to make two separate connections to the camera](#reduce-connections-to-camera). The video feed is copied from the original video feed directly to avoid re-encoding. This feed does not include any annotation by Frigate. -Frigate uses [go2rtc](https://github.com/AlexxIT/go2rtc/tree/v1.9.10) to provide its restream and MSE/WebRTC capabilities. The go2rtc config is hosted at the `go2rtc` in the config, see [go2rtc docs](https://github.com/AlexxIT/go2rtc/tree/v1.9.10#configuration) for more advanced configurations and features. +Frigate uses [go2rtc](https://github.com/AlexxIT/go2rtc/tree/v1.9.13) to provide its restream and MSE/WebRTC capabilities. The go2rtc config is hosted at the `go2rtc` in the config, see [go2rtc docs](https://github.com/AlexxIT/go2rtc/tree/v1.9.13#configuration) for more advanced configurations and features. :::note @@ -206,7 +206,7 @@ Enabling arbitrary exec sources allows execution of arbitrary commands through g ## Advanced Restream Configurations -The [exec](https://github.com/AlexxIT/go2rtc/tree/v1.9.10#source-exec) source in go2rtc can be used for custom ffmpeg commands. An example is below: +The [exec](https://github.com/AlexxIT/go2rtc/tree/v1.9.13#source-exec) source in go2rtc can be used for custom ffmpeg commands. An example is below: :::warning diff --git a/docs/docs/guides/configuring_go2rtc.md b/docs/docs/guides/configuring_go2rtc.md index ca50a90d3..8b01de3e7 100644 --- a/docs/docs/guides/configuring_go2rtc.md +++ b/docs/docs/guides/configuring_go2rtc.md @@ -11,7 +11,7 @@ Use of the bundled go2rtc is optional. You can still configure FFmpeg to connect ## Setup a go2rtc stream -First, you will want to configure go2rtc to connect to your camera stream by adding the stream you want to use for live view in your Frigate config file. Avoid changing any other parts of your config at this step. Note that go2rtc supports [many different stream types](https://github.com/AlexxIT/go2rtc/tree/v1.9.10#module-streams), not just rtsp. +First, you will want to configure go2rtc to connect to your camera stream by adding the stream you want to use for live view in your Frigate config file. Avoid changing any other parts of your config at this step. Note that go2rtc supports [many different stream types](https://github.com/AlexxIT/go2rtc/tree/v1.9.13#module-streams), not just rtsp. :::tip @@ -47,8 +47,8 @@ After adding this to the config, restart Frigate and try to watch the live strea - Check Video Codec: - If the camera stream works in go2rtc but not in your browser, the video codec might be unsupported. - - If using H265, switch to H264. Refer to [video codec compatibility](https://github.com/AlexxIT/go2rtc/tree/v1.9.10#codecs-madness) in go2rtc documentation. - - If unable to switch from H265 to H264, or if the stream format is different (e.g., MJPEG), re-encode the video using [FFmpeg parameters](https://github.com/AlexxIT/go2rtc/tree/v1.9.10#source-ffmpeg). It supports rotating and resizing video feeds and hardware acceleration. Keep in mind that transcoding video from one format to another is a resource intensive task and you may be better off using the built-in jsmpeg view. + - If using H265, switch to H264. Refer to [video codec compatibility](https://github.com/AlexxIT/go2rtc/tree/v1.9.13#codecs-madness) in go2rtc documentation. + - If unable to switch from H265 to H264, or if the stream format is different (e.g., MJPEG), re-encode the video using [FFmpeg parameters](https://github.com/AlexxIT/go2rtc/tree/v1.9.13#source-ffmpeg). It supports rotating and resizing video feeds and hardware acceleration. Keep in mind that transcoding video from one format to another is a resource intensive task and you may be better off using the built-in jsmpeg view. ```yaml go2rtc: streams: diff --git a/docs/sidebars.ts b/docs/sidebars.ts index ea0d2f5c8..a4c1bca9d 100644 --- a/docs/sidebars.ts +++ b/docs/sidebars.ts @@ -28,7 +28,7 @@ const sidebars: SidebarsConfig = { { type: "link", label: "Go2RTC Configuration Reference", - href: "https://github.com/AlexxIT/go2rtc/tree/v1.9.10#configuration", + href: "https://github.com/AlexxIT/go2rtc/tree/v1.9.13#configuration", } as PropSidebarItemLink, ], Detectors: [ From ee2c96c793afc3acb51f7867f09830120219d46a Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Thu, 15 Jan 2026 10:30:55 -0700 Subject: [PATCH 20/56] Refactor Time-Lapse Export (#21668) * refactor time lapse creation to be a separate API call with ability to pass arbitrary ffmpeg args * Add CPU fallback --- .../defs/request/export_recordings_body.py | 37 ++++- frigate/api/export.py | 145 +++++++++++++++++- frigate/config/camera/record.py | 5 - frigate/record/export.py | 92 ++++++++--- frigate/util/config.py | 24 ++- 5 files changed, 257 insertions(+), 46 deletions(-) diff --git a/frigate/api/defs/request/export_recordings_body.py b/frigate/api/defs/request/export_recordings_body.py index 1a6f609bf..96ecccaa4 100644 --- a/frigate/api/defs/request/export_recordings_body.py +++ b/frigate/api/defs/request/export_recordings_body.py @@ -3,16 +3,10 @@ from typing import Optional, Union from pydantic import BaseModel, Field from pydantic.json_schema import SkipJsonSchema -from frigate.record.export import ( - PlaybackFactorEnum, - PlaybackSourceEnum, -) +from frigate.record.export import PlaybackSourceEnum class ExportRecordingsBody(BaseModel): - playback: PlaybackFactorEnum = Field( - default=PlaybackFactorEnum.realtime, title="Playback factor" - ) source: PlaybackSourceEnum = Field( default=PlaybackSourceEnum.recordings, title="Playback source" ) @@ -24,3 +18,32 @@ class ExportRecordingsBody(BaseModel): max_length=30, description="ID of the export case to assign this export to", ) + + +class ExportRecordingsCustomBody(BaseModel): + source: PlaybackSourceEnum = Field( + default=PlaybackSourceEnum.recordings, title="Playback source" + ) + name: str = Field(title="Friendly name", default=None, max_length=256) + image_path: Union[str, SkipJsonSchema[None]] = None + export_case_id: Optional[str] = Field( + default=None, + title="Export case ID", + max_length=30, + description="ID of the export case to assign this export to", + ) + ffmpeg_input_args: Optional[str] = Field( + default=None, + title="FFmpeg input arguments", + description="Custom FFmpeg input arguments. If not provided, defaults to timelapse input args.", + ) + ffmpeg_output_args: Optional[str] = Field( + default=None, + title="FFmpeg output arguments", + description="Custom FFmpeg output arguments. If not provided, defaults to timelapse output args.", + ) + cpu_fallback: bool = Field( + default=False, + title="CPU Fallback", + description="If true, retry export without hardware acceleration if the initial export fails.", + ) diff --git a/frigate/api/export.py b/frigate/api/export.py index c2cf66a34..23f975618 100644 --- a/frigate/api/export.py +++ b/frigate/api/export.py @@ -24,7 +24,10 @@ from frigate.api.defs.request.export_case_body import ( ExportCaseCreateBody, ExportCaseUpdateBody, ) -from frigate.api.defs.request.export_recordings_body import ExportRecordingsBody +from frigate.api.defs.request.export_recordings_body import ( + ExportRecordingsBody, + ExportRecordingsCustomBody, +) from frigate.api.defs.request.export_rename_body import ExportRenameBody from frigate.api.defs.response.export_case_response import ( ExportCaseModel, @@ -40,7 +43,7 @@ from frigate.api.defs.tags import Tags from frigate.const import CLIPS_DIR, EXPORT_DIR from frigate.models import Export, ExportCase, Previews, Recordings from frigate.record.export import ( - PlaybackFactorEnum, + DEFAULT_TIME_LAPSE_FFMPEG_ARGS, PlaybackSourceEnum, RecordingExporter, ) @@ -262,7 +265,6 @@ def export_recording( status_code=404, ) - playback_factor = body.playback playback_source = body.source friendly_name = body.name existing_image = sanitize_filepath(body.image_path) if body.image_path else None @@ -335,11 +337,6 @@ def export_recording( existing_image, int(start_time), int(end_time), - ( - PlaybackFactorEnum[playback_factor] - if playback_factor in PlaybackFactorEnum.__members__.values() - else PlaybackFactorEnum.realtime - ), ( PlaybackSourceEnum[playback_source] if playback_source in PlaybackSourceEnum.__members__.values() @@ -456,6 +453,138 @@ async def export_delete(event_id: str, request: Request): ) +@router.post( + "/export/custom/{camera_name}/start/{start_time}/end/{end_time}", + response_model=StartExportResponse, + dependencies=[Depends(require_camera_access)], + summary="Start custom recording export", + description="""Starts an export of a recording for the specified time range using custom FFmpeg arguments. + The export can be from recordings or preview footage. Returns the export ID if + successful, or an error message if the camera is invalid or no recordings/previews + are found for the time range. If ffmpeg_input_args and ffmpeg_output_args are not provided, + defaults to timelapse export settings.""", +) +def export_recording_custom( + request: Request, + camera_name: str, + start_time: float, + end_time: float, + body: ExportRecordingsCustomBody, +): + if not camera_name or not request.app.frigate_config.cameras.get(camera_name): + return JSONResponse( + content=( + {"success": False, "message": f"{camera_name} is not a valid camera."} + ), + status_code=404, + ) + + playback_source = body.source + friendly_name = body.name + existing_image = sanitize_filepath(body.image_path) if body.image_path else None + ffmpeg_input_args = body.ffmpeg_input_args + ffmpeg_output_args = body.ffmpeg_output_args + cpu_fallback = body.cpu_fallback + + export_case_id = body.export_case_id + if export_case_id is not None: + try: + ExportCase.get(ExportCase.id == export_case_id) + except DoesNotExist: + return JSONResponse( + content={"success": False, "message": "Export case not found"}, + status_code=404, + ) + + # Ensure that existing_image is a valid path + if existing_image and not existing_image.startswith(CLIPS_DIR): + return JSONResponse( + content=({"success": False, "message": "Invalid image path"}), + status_code=400, + ) + + if playback_source == "recordings": + recordings_count = ( + Recordings.select() + .where( + Recordings.start_time.between(start_time, end_time) + | Recordings.end_time.between(start_time, end_time) + | ( + (start_time > Recordings.start_time) + & (end_time < Recordings.end_time) + ) + ) + .where(Recordings.camera == camera_name) + .count() + ) + + if recordings_count <= 0: + return JSONResponse( + content=( + {"success": False, "message": "No recordings found for time range"} + ), + status_code=400, + ) + else: + previews_count = ( + Previews.select() + .where( + Previews.start_time.between(start_time, end_time) + | Previews.end_time.between(start_time, end_time) + | ((start_time > Previews.start_time) & (end_time < Previews.end_time)) + ) + .where(Previews.camera == camera_name) + .count() + ) + + if not is_current_hour(start_time) and previews_count <= 0: + return JSONResponse( + content=( + {"success": False, "message": "No previews found for time range"} + ), + status_code=400, + ) + + export_id = f"{camera_name}_{''.join(random.choices(string.ascii_lowercase + string.digits, k=6))}" + + # Set default values if not provided (timelapse defaults) + if ffmpeg_input_args is None: + ffmpeg_input_args = "" + + if ffmpeg_output_args is None: + ffmpeg_output_args = DEFAULT_TIME_LAPSE_FFMPEG_ARGS + + exporter = RecordingExporter( + request.app.frigate_config, + export_id, + camera_name, + friendly_name, + existing_image, + int(start_time), + int(end_time), + ( + PlaybackSourceEnum[playback_source] + if playback_source in PlaybackSourceEnum.__members__.values() + else PlaybackSourceEnum.recordings + ), + export_case_id, + ffmpeg_input_args, + ffmpeg_output_args, + cpu_fallback, + ) + exporter.start() + return JSONResponse( + content=( + { + "success": True, + "message": "Starting export of recording.", + "export_id": export_id, + } + ), + status_code=200, + ) + + @router.get( "/exports/{export_id}", response_model=ExportModel, diff --git a/frigate/config/camera/record.py b/frigate/config/camera/record.py index 21816523a..fe24cf522 100644 --- a/frigate/config/camera/record.py +++ b/frigate/config/camera/record.py @@ -19,8 +19,6 @@ __all__ = [ "RetainModeEnum", ] -DEFAULT_TIME_LAPSE_FFMPEG_ARGS = "-vf setpts=0.04*PTS -r 30" - class RecordRetainConfig(FrigateBaseModel): days: float = Field(default=0, ge=0, title="Default retention period.") @@ -67,9 +65,6 @@ class RecordPreviewConfig(FrigateBaseModel): class RecordExportConfig(FrigateBaseModel): - timelapse_args: str = Field( - default=DEFAULT_TIME_LAPSE_FFMPEG_ARGS, title="Timelapse Args" - ) hwaccel_args: Union[str, list[str]] = Field( default="auto", title="Export-specific FFmpeg hardware acceleration arguments." ) diff --git a/frigate/record/export.py b/frigate/record/export.py index 9a8b5dbdb..afaed1a2a 100644 --- a/frigate/record/export.py +++ b/frigate/record/export.py @@ -33,6 +33,7 @@ from frigate.util.time import is_current_hour logger = logging.getLogger(__name__) +DEFAULT_TIME_LAPSE_FFMPEG_ARGS = "-vf setpts=0.04*PTS -r 30" TIMELAPSE_DATA_INPUT_ARGS = "-an -skip_frame nokey" @@ -40,11 +41,6 @@ def lower_priority(): os.nice(PROCESS_PRIORITY_LOW) -class PlaybackFactorEnum(str, Enum): - realtime = "realtime" - timelapse_25x = "timelapse_25x" - - class PlaybackSourceEnum(str, Enum): recordings = "recordings" preview = "preview" @@ -62,9 +58,11 @@ class RecordingExporter(threading.Thread): image: Optional[str], start_time: int, end_time: int, - playback_factor: PlaybackFactorEnum, playback_source: PlaybackSourceEnum, export_case_id: Optional[str] = None, + ffmpeg_input_args: Optional[str] = None, + ffmpeg_output_args: Optional[str] = None, + cpu_fallback: bool = False, ) -> None: super().__init__() self.config = config @@ -74,9 +72,11 @@ class RecordingExporter(threading.Thread): self.user_provided_image = image self.start_time = start_time self.end_time = end_time - self.playback_factor = playback_factor self.playback_source = playback_source self.export_case_id = export_case_id + self.ffmpeg_input_args = ffmpeg_input_args + self.ffmpeg_output_args = ffmpeg_output_args + self.cpu_fallback = cpu_fallback # ensure export thumb dir Path(os.path.join(CLIPS_DIR, "export")).mkdir(exist_ok=True) @@ -181,7 +181,9 @@ class RecordingExporter(threading.Thread): return thumb_path - def get_record_export_command(self, video_path: str) -> list[str]: + def get_record_export_command( + self, video_path: str, use_hwaccel: bool = True + ) -> list[str]: if (self.end_time - self.start_time) <= MAX_PLAYLIST_SECONDS: playlist_lines = f"http://127.0.0.1:5000/vod/{self.camera}/start/{self.start_time}/end/{self.end_time}/index.m3u8" ffmpeg_input = ( @@ -220,20 +222,25 @@ class RecordingExporter(threading.Thread): ffmpeg_input = "-y -protocol_whitelist pipe,file,http,tcp -f concat -safe 0 -i /dev/stdin" - if self.playback_factor == PlaybackFactorEnum.realtime: - ffmpeg_cmd = ( - f"{self.config.ffmpeg.ffmpeg_path} -hide_banner {ffmpeg_input} -c copy -movflags +faststart" - ).split(" ") - elif self.playback_factor == PlaybackFactorEnum.timelapse_25x: + if self.ffmpeg_input_args is not None and self.ffmpeg_output_args is not None: + hwaccel_args = ( + self.config.cameras[self.camera].record.export.hwaccel_args + if use_hwaccel + else None + ) ffmpeg_cmd = ( parse_preset_hardware_acceleration_encode( self.config.ffmpeg.ffmpeg_path, - self.config.cameras[self.camera].record.export.hwaccel_args, - f"-an {ffmpeg_input}", - f"{self.config.cameras[self.camera].record.export.timelapse_args} -movflags +faststart", + hwaccel_args, + f"{self.ffmpeg_input_args} -an {ffmpeg_input}".strip(), + f"{self.ffmpeg_output_args} -movflags +faststart".strip(), EncodeTypeEnum.timelapse, ) ).split(" ") + else: + ffmpeg_cmd = ( + f"{self.config.ffmpeg.ffmpeg_path} -hide_banner {ffmpeg_input} -c copy -movflags +faststart" + ).split(" ") # add metadata title = f"Frigate Recording for {self.camera}, {self.get_datetime_from_timestamp(self.start_time)} - {self.get_datetime_from_timestamp(self.end_time)}" @@ -243,7 +250,9 @@ class RecordingExporter(threading.Thread): return ffmpeg_cmd, playlist_lines - def get_preview_export_command(self, video_path: str) -> list[str]: + def get_preview_export_command( + self, video_path: str, use_hwaccel: bool = True + ) -> list[str]: playlist_lines = [] codec = "-c copy" @@ -311,20 +320,25 @@ class RecordingExporter(threading.Thread): "-y -protocol_whitelist pipe,file,tcp -f concat -safe 0 -i /dev/stdin" ) - if self.playback_factor == PlaybackFactorEnum.realtime: - ffmpeg_cmd = ( - f"{self.config.ffmpeg.ffmpeg_path} -hide_banner {ffmpeg_input} {codec} -movflags +faststart {video_path}" - ).split(" ") - elif self.playback_factor == PlaybackFactorEnum.timelapse_25x: + if self.ffmpeg_input_args is not None and self.ffmpeg_output_args is not None: + hwaccel_args = ( + self.config.cameras[self.camera].record.export.hwaccel_args + if use_hwaccel + else None + ) ffmpeg_cmd = ( parse_preset_hardware_acceleration_encode( self.config.ffmpeg.ffmpeg_path, - self.config.cameras[self.camera].record.export.hwaccel_args, - f"{TIMELAPSE_DATA_INPUT_ARGS} {ffmpeg_input}", - f"{self.config.cameras[self.camera].record.export.timelapse_args} -movflags +faststart {video_path}", + hwaccel_args, + f"{self.ffmpeg_input_args} {TIMELAPSE_DATA_INPUT_ARGS} {ffmpeg_input}".strip(), + f"{self.ffmpeg_output_args} -movflags +faststart {video_path}".strip(), EncodeTypeEnum.timelapse, ) ).split(" ") + else: + ffmpeg_cmd = ( + f"{self.config.ffmpeg.ffmpeg_path} -hide_banner {ffmpeg_input} {codec} -movflags +faststart {video_path}" + ).split(" ") # add metadata title = f"Frigate Preview for {self.camera}, {self.get_datetime_from_timestamp(self.start_time)} - {self.get_datetime_from_timestamp(self.end_time)}" @@ -381,6 +395,34 @@ class RecordingExporter(threading.Thread): capture_output=True, ) + # If export failed and cpu_fallback is enabled, retry without hwaccel + if ( + p.returncode != 0 + and self.cpu_fallback + and self.ffmpeg_input_args is not None + and self.ffmpeg_output_args is not None + ): + logger.warning( + f"Export with hardware acceleration failed, retrying without hwaccel for {self.export_id}" + ) + + if self.playback_source == PlaybackSourceEnum.recordings: + ffmpeg_cmd, playlist_lines = self.get_record_export_command( + video_path, use_hwaccel=False + ) + else: + ffmpeg_cmd, playlist_lines = self.get_preview_export_command( + video_path, use_hwaccel=False + ) + + p = sp.run( + ffmpeg_cmd, + input="\n".join(playlist_lines), + encoding="ascii", + preexec_fn=lower_priority, + capture_output=True, + ) + if p.returncode != 0: logger.error( f"Failed to export {self.playback_source.value} for command {' '.join(ffmpeg_cmd)}" diff --git a/frigate/util/config.py b/frigate/util/config.py index b9e3fccb8..1af5c8e4e 100644 --- a/frigate/util/config.py +++ b/frigate/util/config.py @@ -442,13 +442,35 @@ def migrate_018_0(config: dict[str, dict[str, Any]]) -> dict[str, dict[str, Any] if new_config.get("record", {}).get("sync_recordings") is not None: del new_config["record"]["sync_recordings"] - # Remove deprecated sync_recordings from camera-specific record configs + # Remove deprecated timelapse_args from global record export config + if new_config.get("record", {}).get("export", {}).get("timelapse_args") is not None: + del new_config["record"]["export"]["timelapse_args"] + # Remove export section if empty + if not new_config.get("record", {}).get("export"): + del new_config["record"]["export"] + # Remove record section if empty + if not new_config.get("record"): + del new_config["record"] + + # Remove deprecated sync_recordings and timelapse_args from camera-specific record configs for name, camera in config.get("cameras", {}).items(): camera_config: dict[str, dict[str, Any]] = camera.copy() if camera_config.get("record", {}).get("sync_recordings") is not None: del camera_config["record"]["sync_recordings"] + if ( + camera_config.get("record", {}).get("export", {}).get("timelapse_args") + is not None + ): + del camera_config["record"]["export"]["timelapse_args"] + # Remove export section if empty + if not camera_config.get("record", {}).get("export"): + del camera_config["record"]["export"] + # Remove record section if empty + if not camera_config.get("record"): + del camera_config["record"] + new_config["cameras"][name] = camera_config new_config["version"] = "0.18-0" From d3260e34b69d7a34b432f2a0c26dac8709aca608 Mon Sep 17 00:00:00 2001 From: John Shaw <1753078+johnshaw@users.noreply.github.com> Date: Sat, 17 Jan 2026 16:47:21 -0600 Subject: [PATCH 21/56] Optimize empty directory cleanup for recordings (#21695) The previous empty directory cleanup did a full recursive directory walk, which can be extremely slow. This new implementation only removes directories which have a chance of being empty due to a recent file deletion. --- frigate/record/cleanup.py | 43 ++++++++++++++++++++++++++++----------- frigate/util/media.py | 30 ++++++++++++++++----------- 2 files changed, 49 insertions(+), 24 deletions(-) diff --git a/frigate/record/cleanup.py b/frigate/record/cleanup.py index 29c68a53c..32887684f 100644 --- a/frigate/record/cleanup.py +++ b/frigate/record/cleanup.py @@ -11,7 +11,7 @@ from pathlib import Path from playhouse.sqlite_ext import SqliteExtDatabase from frigate.config import CameraConfig, FrigateConfig, RetainModeEnum -from frigate.const import CACHE_DIR, CLIPS_DIR, MAX_WAL_SIZE, RECORD_DIR +from frigate.const import CACHE_DIR, CLIPS_DIR, MAX_WAL_SIZE from frigate.models import Previews, Recordings, ReviewSegment, UserReviewStatus from frigate.util.builtin import clear_and_unlink from frigate.util.media import remove_empty_directories @@ -60,7 +60,7 @@ class RecordingCleanup(threading.Thread): db.execute_sql("PRAGMA wal_checkpoint(TRUNCATE);") db.close() - def expire_review_segments(self, config: CameraConfig, now: datetime) -> None: + def expire_review_segments(self, config: CameraConfig, now: datetime) -> set[Path]: """Delete review segments that are expired""" alert_expire_date = ( now - datetime.timedelta(days=config.record.alerts.retain.days) @@ -84,9 +84,12 @@ class RecordingCleanup(threading.Thread): .namedtuples() ) + maybe_empty_dirs = set() thumbs_to_delete = list(map(lambda x: x[1], expired_reviews)) for thumb_path in thumbs_to_delete: - Path(thumb_path).unlink(missing_ok=True) + thumb_path = Path(thumb_path) + thumb_path.unlink(missing_ok=True) + maybe_empty_dirs.add(thumb_path.parent) max_deletes = 100000 deleted_reviews_list = list(map(lambda x: x[0], expired_reviews)) @@ -99,13 +102,15 @@ class RecordingCleanup(threading.Thread): << deleted_reviews_list[i : i + max_deletes] ).execute() + return maybe_empty_dirs + def expire_existing_camera_recordings( self, continuous_expire_date: float, motion_expire_date: float, config: CameraConfig, reviews: ReviewSegment, - ) -> None: + ) -> set[Path]: """Delete recordings for existing camera based on retention config.""" # Get the timestamp for cutoff of retained days @@ -136,6 +141,8 @@ class RecordingCleanup(threading.Thread): .iterator() ) + maybe_empty_dirs = set() + # loop over recordings and see if they overlap with any non-expired reviews # TODO: expire segments based on segment stats according to config review_start = 0 @@ -190,8 +197,10 @@ class RecordingCleanup(threading.Thread): ) or (mode == RetainModeEnum.active_objects and recording.objects == 0) ): - Path(recording.path).unlink(missing_ok=True) + recording_path = Path(recording.path) + recording_path.unlink(missing_ok=True) deleted_recordings.add(recording.id) + maybe_empty_dirs.add(recording_path.parent) else: kept_recordings.append((recording.start_time, recording.end_time)) @@ -252,8 +261,10 @@ class RecordingCleanup(threading.Thread): # Delete previews without any relevant recordings if not keep: - Path(preview.path).unlink(missing_ok=True) + preview_path = Path(preview.path) + preview_path.unlink(missing_ok=True) deleted_previews.add(preview.id) + maybe_empty_dirs.add(preview_path.parent) # expire previews logger.debug(f"Expiring {len(deleted_previews)} previews") @@ -265,7 +276,9 @@ class RecordingCleanup(threading.Thread): Previews.id << deleted_previews_list[i : i + max_deletes] ).execute() - def expire_recordings(self) -> None: + return maybe_empty_dirs + + def expire_recordings(self) -> set[Path]: """Delete recordings based on retention config.""" logger.debug("Start expire recordings.") logger.debug("Start deleted cameras.") @@ -290,10 +303,14 @@ class RecordingCleanup(threading.Thread): .iterator() ) + maybe_empty_dirs = set() + deleted_recordings = set() for recording in no_camera_recordings: - Path(recording.path).unlink(missing_ok=True) + recording_path = Path(recording.path) + recording_path.unlink(missing_ok=True) deleted_recordings.add(recording.id) + maybe_empty_dirs.add(recording_path.parent) logger.debug(f"Expiring {len(deleted_recordings)} recordings") # delete up to 100,000 at a time @@ -310,7 +327,7 @@ class RecordingCleanup(threading.Thread): logger.debug(f"Start camera: {camera}.") now = datetime.datetime.now() - self.expire_review_segments(config, now) + maybe_empty_dirs |= self.expire_review_segments(config, now) continuous_expire_date = ( now - datetime.timedelta(days=config.record.continuous.days) ).timestamp() @@ -340,7 +357,7 @@ class RecordingCleanup(threading.Thread): .namedtuples() ) - self.expire_existing_camera_recordings( + maybe_empty_dirs |= self.expire_existing_camera_recordings( continuous_expire_date, motion_expire_date, config, reviews ) logger.debug(f"End camera: {camera}.") @@ -348,6 +365,8 @@ class RecordingCleanup(threading.Thread): logger.debug("End all cameras.") logger.debug("End expire recordings.") + return maybe_empty_dirs + def run(self) -> None: # Expire tmp clips every minute, recordings and clean directories every hour. for counter in itertools.cycle(range(self.config.record.expire_interval)): @@ -359,6 +378,6 @@ class RecordingCleanup(threading.Thread): if counter == 0: self.clean_tmp_clips() - self.expire_recordings() - remove_empty_directories(RECORD_DIR) + maybe_empty_dirs = self.expire_recordings() + remove_empty_directories(maybe_empty_dirs) self.truncate_wal() diff --git a/frigate/util/media.py b/frigate/util/media.py index 24b690e31..26276f6a6 100644 --- a/frigate/util/media.py +++ b/frigate/util/media.py @@ -1,9 +1,12 @@ """Recordings Utilities.""" import datetime +import errno import logging import os from dataclasses import dataclass, field +from pathlib import Path +from typing import Iterable from peewee import DatabaseError, chunked @@ -47,20 +50,23 @@ class SyncResult: } -def remove_empty_directories(directory: str) -> None: - # list all directories recursively and sort them by path, - # longest first - paths = sorted( - [x[0] for x in os.walk(directory)], - key=lambda p: len(str(p)), - reverse=True, - ) +def remove_empty_directories(paths: Iterable[Path]) -> None: + """ + Remove directories if they exist and are empty. + Silently ignores non-existent and non-empty directories. + """ + count = 0 for path in paths: - # don't delete the parent - if path == directory: + try: + path.rmdir() + except FileNotFoundError: continue - if len(os.listdir(path)) == 0: - os.rmdir(path) + except OSError as e: + if e.errno == errno.ENOTEMPTY: + continue + raise + count += 1 + logger.debug("Removed {count} empty directories") def sync_recordings( From 9b7cee18dbf073fc7caad6df2b93d1399cbb214d Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Sun, 18 Jan 2026 06:34:30 -0700 Subject: [PATCH 22/56] Implement llama.cpp GenAI Provider (#21690) * Implement llama.cpp GenAI Provider * Add docs * Update links * Fix broken mqtt links * Fix more broken anchors --- docs/docs/configuration/genai/config.md | 42 +++++++- docs/docs/configuration/genai/objects.md | 4 +- .../configuration/genai/review_summaries.md | 2 +- frigate/config/camera/genai.py | 1 + frigate/genai/llama_cpp.py | 101 ++++++++++++++++++ 5 files changed, 145 insertions(+), 5 deletions(-) create mode 100644 frigate/genai/llama_cpp.py diff --git a/docs/docs/configuration/genai/config.md b/docs/docs/configuration/genai/config.md index e1f79b744..6a004e353 100644 --- a/docs/docs/configuration/genai/config.md +++ b/docs/docs/configuration/genai/config.md @@ -5,7 +5,7 @@ title: Configuring Generative AI ## Configuration -A Generative AI provider can be configured in the global config, which will make the Generative AI features available for use. There are currently 3 native providers available to integrate with Frigate. Other providers that support the OpenAI standard API can also be used. See the OpenAI section below. +A Generative AI provider can be configured in the global config, which will make the Generative AI features available for use. There are currently 4 native providers available to integrate with Frigate. Other providers that support the OpenAI standard API can also be used. See the OpenAI section below. To use Generative AI, you must define a single provider at the global level of your Frigate configuration. If the provider you choose requires an API key, you may either directly paste it in your configuration, or store it in an environment variable prefixed with `FRIGATE_`. @@ -77,8 +77,46 @@ genai: provider: ollama base_url: http://localhost:11434 model: qwen3-vl:4b + provider_options: # other Ollama client options can be defined + keep_alive: -1 + options: + num_ctx: 8192 # make sure the context matches other services that are using ollama ``` +## llama.cpp + +[llama.cpp](https://github.com/ggml-org/llama.cpp) is a C++ implementation of LLaMA that provides a high-performance inference server. Using llama.cpp directly gives you access to all native llama.cpp options and parameters. + +:::warning + +Using llama.cpp on CPU is not recommended, high inference times make using Generative AI impractical. + +::: + +It is highly recommended to host the llama.cpp server on a machine with a discrete graphics card, or on an Apple silicon Mac for best performance. + +### Supported Models + +You must use a vision capable model with Frigate. The llama.cpp server supports various vision models in GGUF format. + +### Configuration + +```yaml +genai: + provider: llamacpp + base_url: http://localhost:8080 + model: your-model-name + provider_options: + temperature: 0.7 + repeat_penalty: 1.05 + top_p: 0.8 + top_k: 40 + min_p: 0.05 + seed: -1 +``` + +All llama.cpp native options can be passed through `provider_options`, including `temperature`, `top_k`, `top_p`, `min_p`, `repeat_penalty`, `repeat_last_n`, `seed`, `grammar`, and more. See the [llama.cpp server documentation](https://github.com/ggml-org/llama.cpp/blob/master/tools/server/README.md) for a complete list of available parameters. + ## Google Gemini Google Gemini has a [free tier](https://ai.google.dev/pricing) for the API, however the limits may not be sufficient for standard Frigate usage. Choose a plan appropriate for your installation. @@ -185,4 +223,4 @@ genai: base_url: https://instance.cognitiveservices.azure.com/openai/responses?api-version=2025-04-01-preview model: gpt-5-mini api_key: "{FRIGATE_OPENAI_API_KEY}" -``` +``` \ No newline at end of file diff --git a/docs/docs/configuration/genai/objects.md b/docs/docs/configuration/genai/objects.md index e3ae31393..c878f5ec8 100644 --- a/docs/docs/configuration/genai/objects.md +++ b/docs/docs/configuration/genai/objects.md @@ -11,7 +11,7 @@ By default, descriptions will be generated for all tracked objects and all zones Optionally, you can generate the description using a snapshot (if enabled) by setting `use_snapshot` to `True`. By default, this is set to `False`, which sends the uncompressed images from the `detect` stream collected over the object's lifetime to the model. Once the object lifecycle ends, only a single compressed and cropped thumbnail is saved with the tracked object. Using a snapshot might be useful when you want to _regenerate_ a tracked object's description as it will provide the AI with a higher-quality image (typically downscaled by the AI itself) than the cropped/compressed thumbnail. Using a snapshot otherwise has a trade-off in that only a single image is sent to your provider, which will limit the model's ability to determine object movement or direction. -Generative AI object descriptions can also be toggled dynamically for a camera via MQTT with the topic `frigate//object_descriptions/set`. See the [MQTT documentation](/integrations/mqtt/#frigatecamera_nameobjectdescriptionsset). +Generative AI object descriptions can also be toggled dynamically for a camera via MQTT with the topic `frigate//object_descriptions/set`. See the [MQTT documentation](/integrations/mqtt#frigatecamera_nameobject_descriptionsset). ## Usage and Best Practices @@ -75,4 +75,4 @@ Many providers also have a public facing chat interface for their models. Downlo - OpenAI - [ChatGPT](https://chatgpt.com) - Gemini - [Google AI Studio](https://aistudio.google.com) -- Ollama - [Open WebUI](https://docs.openwebui.com/) +- Ollama - [Open WebUI](https://docs.openwebui.com/) \ No newline at end of file diff --git a/docs/docs/configuration/genai/review_summaries.md b/docs/docs/configuration/genai/review_summaries.md index df287446c..c6f5e53ec 100644 --- a/docs/docs/configuration/genai/review_summaries.md +++ b/docs/docs/configuration/genai/review_summaries.md @@ -7,7 +7,7 @@ Generative AI can be used to automatically generate structured summaries of revi Requests for a summary are requested automatically to your AI provider for alert review items when the activity has ended, they can also be optionally enabled for detections as well. -Generative AI review summaries can also be toggled dynamically for a [camera via MQTT](/integrations/mqtt/#frigatecamera_namereviewdescriptionsset). +Generative AI review summaries can also be toggled dynamically for a [camera via MQTT](/integrations/mqtt#frigatecamera_namereview_descriptionsset). ## Review Summary Usage and Best Practices diff --git a/frigate/config/camera/genai.py b/frigate/config/camera/genai.py index a4d9199af..3dd596c3b 100644 --- a/frigate/config/camera/genai.py +++ b/frigate/config/camera/genai.py @@ -14,6 +14,7 @@ class GenAIProviderEnum(str, Enum): azure_openai = "azure_openai" gemini = "gemini" ollama = "ollama" + llamacpp = "llamacpp" class GenAIConfig(FrigateBaseModel): diff --git a/frigate/genai/llama_cpp.py b/frigate/genai/llama_cpp.py new file mode 100644 index 000000000..45e364bc0 --- /dev/null +++ b/frigate/genai/llama_cpp.py @@ -0,0 +1,101 @@ +"""llama.cpp Provider for Frigate AI.""" + +import base64 +import logging +from typing import Any, Optional + +import requests + +from frigate.config import GenAIProviderEnum +from frigate.genai import GenAIClient, register_genai_provider + +logger = logging.getLogger(__name__) + + +@register_genai_provider(GenAIProviderEnum.llamacpp) +class LlamaCppClient(GenAIClient): + """Generative AI client for Frigate using llama.cpp server.""" + + LOCAL_OPTIMIZED_OPTIONS = { + "temperature": 0.7, + "repeat_penalty": 1.05, + "top_p": 0.8, + } + + provider: str # base_url + provider_options: dict[str, Any] + + def _init_provider(self): + """Initialize the client.""" + self.provider_options = { + **self.LOCAL_OPTIMIZED_OPTIONS, + **self.genai_config.provider_options, + } + return ( + self.genai_config.base_url.rstrip("/") + if self.genai_config.base_url + else None + ) + + def _send(self, prompt: str, images: list[bytes]) -> Optional[str]: + """Submit a request to llama.cpp server.""" + if self.provider is None: + logger.warning( + "llama.cpp provider has not been initialized, a description will not be generated. Check your llama.cpp configuration." + ) + return None + + try: + content = [] + for image in images: + encoded_image = base64.b64encode(image).decode("utf-8") + content.append( + { + "type": "image_url", + "image_url": { + "url": f"data:image/jpeg;base64,{encoded_image}", + }, + } + ) + content.append( + { + "type": "text", + "text": prompt, + } + ) + + # Build request payload with llama.cpp native options + payload = { + "messages": [ + { + "role": "user", + "content": content, + }, + ], + **self.provider_options, + } + + response = requests.post( + f"{self.provider}/v1/chat/completions", + json=payload, + timeout=self.timeout, + ) + response.raise_for_status() + result = response.json() + + if ( + result is not None + and "choices" in result + and len(result["choices"]) > 0 + ): + choice = result["choices"][0] + if "message" in choice and "content" in choice["message"]: + return choice["message"]["content"].strip() + return None + except Exception as e: + logger.warning("llama.cpp returned an error: %s", str(e)) + return None + + def get_context_size(self) -> int: + """Get the context window size for llama.cpp.""" + return self.genai_config.provider_options.get("context_size", 4096) From af2339b35ce857938d2803e3ba979b4bb96dbe73 Mon Sep 17 00:00:00 2001 From: John Shaw <1753078+johnshaw@users.noreply.github.com> Date: Mon, 19 Jan 2026 22:24:27 -0600 Subject: [PATCH 23/56] Remove parents in remove_empty_directories (#21726) The original implementation did a full directory tree walk to find and remove empty directories, so this implementation should remove the parents as well, like the original did. --- frigate/record/cleanup.py | 4 ++-- frigate/util/media.py | 34 ++++++++++++++++++++++++---------- 2 files changed, 26 insertions(+), 12 deletions(-) diff --git a/frigate/record/cleanup.py b/frigate/record/cleanup.py index 32887684f..15a0ba7e8 100644 --- a/frigate/record/cleanup.py +++ b/frigate/record/cleanup.py @@ -11,7 +11,7 @@ from pathlib import Path from playhouse.sqlite_ext import SqliteExtDatabase from frigate.config import CameraConfig, FrigateConfig, RetainModeEnum -from frigate.const import CACHE_DIR, CLIPS_DIR, MAX_WAL_SIZE +from frigate.const import CACHE_DIR, CLIPS_DIR, MAX_WAL_SIZE, RECORD_DIR from frigate.models import Previews, Recordings, ReviewSegment, UserReviewStatus from frigate.util.builtin import clear_and_unlink from frigate.util.media import remove_empty_directories @@ -379,5 +379,5 @@ class RecordingCleanup(threading.Thread): if counter == 0: self.clean_tmp_clips() maybe_empty_dirs = self.expire_recordings() - remove_empty_directories(maybe_empty_dirs) + remove_empty_directories(Path(RECORD_DIR), maybe_empty_dirs) self.truncate_wal() diff --git a/frigate/util/media.py b/frigate/util/media.py index 26276f6a6..c7de85c9f 100644 --- a/frigate/util/media.py +++ b/frigate/util/media.py @@ -50,22 +50,36 @@ class SyncResult: } -def remove_empty_directories(paths: Iterable[Path]) -> None: +def remove_empty_directories(root: Path, paths: Iterable[Path]) -> None: """ Remove directories if they exist and are empty. Silently ignores non-existent and non-empty directories. + Attempts to remove parent directories as well, stopping at the given root. """ count = 0 - for path in paths: - try: - path.rmdir() - except FileNotFoundError: - continue - except OSError as e: - if e.errno == errno.ENOTEMPTY: + while True: + parents = set() + for path in paths: + if path == root: continue - raise - count += 1 + + try: + path.rmdir() + count += 1 + except FileNotFoundError: + pass + except OSError as e: + if e.errno == errno.ENOTEMPTY: + continue + raise + + parents.add(path.parent) + + if not parents: + break + + paths = parents + logger.debug("Removed {count} empty directories") From e76b48f98bdab7a404db578e31c9d1008123a1b0 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Tue, 20 Jan 2026 08:13:12 -0700 Subject: [PATCH 24/56] Implement LLM Chat API with tool calling support (#21731) * Implement initial tools definiton APIs * Add initial chat completion API with tool support * Implement other providers * Cleanup --- frigate/api/chat.py | 476 +++++++++++++++++++++ frigate/api/defs/request/chat_body.py | 34 ++ frigate/api/defs/response/chat_response.py | 37 ++ frigate/api/defs/tags.py | 1 + frigate/api/fastapi_app.py | 2 + frigate/genai/__init__.py | 58 +++ frigate/genai/azure-openai.py | 93 +++- frigate/genai/gemini.py | 188 +++++++- frigate/genai/llama_cpp.py | 130 ++++++ frigate/genai/ollama.py | 118 +++++ frigate/genai/openai.py | 113 ++++- 11 files changed, 1247 insertions(+), 3 deletions(-) create mode 100644 frigate/api/chat.py create mode 100644 frigate/api/defs/request/chat_body.py create mode 100644 frigate/api/defs/response/chat_response.py diff --git a/frigate/api/chat.py b/frigate/api/chat.py new file mode 100644 index 000000000..eeff3ab6d --- /dev/null +++ b/frigate/api/chat.py @@ -0,0 +1,476 @@ +"""Chat and LLM tool calling APIs.""" + +import json +import logging +from datetime import datetime, timezone +from typing import Any, Dict, List + +from fastapi import APIRouter, Body, Depends, Request +from fastapi.responses import JSONResponse +from pydantic import BaseModel + +from frigate.api.auth import ( + allow_any_authenticated, + get_allowed_cameras_for_filter, +) +from frigate.api.defs.query.events_query_parameters import EventsQueryParams +from frigate.api.defs.request.chat_body import ChatCompletionRequest +from frigate.api.defs.response.chat_response import ( + ChatCompletionResponse, + ChatMessageResponse, +) +from frigate.api.defs.tags import Tags +from frigate.api.event import events +from frigate.genai import get_genai_client + +logger = logging.getLogger(__name__) + +router = APIRouter(tags=[Tags.chat]) + + +class ToolExecuteRequest(BaseModel): + """Request model for tool execution.""" + + tool_name: str + arguments: Dict[str, Any] + + +def get_tool_definitions() -> List[Dict[str, Any]]: + """ + Get OpenAI-compatible tool definitions for Frigate. + + Returns a list of tool definitions that can be used with OpenAI-compatible + function calling APIs. + """ + return [ + { + "type": "function", + "function": { + "name": "search_objects", + "description": ( + "Search for detected objects in Frigate by camera, object label, time range, " + "zones, and other filters. Use this to answer questions about when " + "objects were detected, what objects appeared, or to find specific object detections. " + "An 'object' in Frigate represents a tracked detection (e.g., a person, package, car)." + ), + "parameters": { + "type": "object", + "properties": { + "camera": { + "type": "string", + "description": "Camera name to filter by (optional). Use 'all' for all cameras.", + }, + "label": { + "type": "string", + "description": "Object label to filter by (e.g., 'person', 'package', 'car').", + }, + "after": { + "type": "string", + "description": "Start time in ISO 8601 format (e.g., '2024-01-01T00:00:00Z').", + }, + "before": { + "type": "string", + "description": "End time in ISO 8601 format (e.g., '2024-01-01T23:59:59Z').", + }, + "zones": { + "type": "array", + "items": {"type": "string"}, + "description": "List of zone names to filter by.", + }, + "limit": { + "type": "integer", + "description": "Maximum number of objects to return (default: 10).", + "default": 10, + }, + }, + }, + "required": [], + }, + }, + ] + + +@router.get( + "/chat/tools", + dependencies=[Depends(allow_any_authenticated())], + summary="Get available tools", + description="Returns OpenAI-compatible tool definitions for function calling.", +) +def get_tools(request: Request) -> JSONResponse: + """Get list of available tools for LLM function calling.""" + tools = get_tool_definitions() + return JSONResponse(content={"tools": tools}) + + +async def _execute_search_objects( + request: Request, + arguments: Dict[str, Any], + allowed_cameras: List[str], +) -> JSONResponse: + """ + Execute the search_objects tool. + + This searches for detected objects (events) in Frigate using the same + logic as the events API endpoint. + """ + # Parse ISO 8601 timestamps to Unix timestamps if provided + after = arguments.get("after") + before = arguments.get("before") + + if after: + try: + after_dt = datetime.fromisoformat(after.replace("Z", "+00:00")) + after = after_dt.timestamp() + except (ValueError, AttributeError): + logger.warning(f"Invalid 'after' timestamp format: {after}") + after = None + + if before: + try: + before_dt = datetime.fromisoformat(before.replace("Z", "+00:00")) + before = before_dt.timestamp() + except (ValueError, AttributeError): + logger.warning(f"Invalid 'before' timestamp format: {before}") + before = None + + # Convert zones array to comma-separated string if provided + zones = arguments.get("zones") + if isinstance(zones, list): + zones = ",".join(zones) + elif zones is None: + zones = "all" + + # Build query parameters compatible with EventsQueryParams + query_params = EventsQueryParams( + camera=arguments.get("camera", "all"), + cameras=arguments.get("camera", "all"), + label=arguments.get("label", "all"), + labels=arguments.get("label", "all"), + zones=zones, + zone=zones, + after=after, + before=before, + limit=arguments.get("limit", 10), + ) + + try: + # Call the events endpoint function directly + # The events function is synchronous and takes params and allowed_cameras + response = events(query_params, allowed_cameras) + + # The response is already a JSONResponse with event data + # Return it as-is for the LLM + return response + except Exception as e: + logger.error(f"Error executing search_objects: {e}", exc_info=True) + return JSONResponse( + content={ + "success": False, + "message": f"Error searching objects: {str(e)}", + }, + status_code=500, + ) + + +@router.post( + "/chat/execute", + dependencies=[Depends(allow_any_authenticated())], + summary="Execute a tool", + description="Execute a tool function call from an LLM.", +) +async def execute_tool( + request: Request, + body: ToolExecuteRequest = Body(...), + allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter), +) -> JSONResponse: + """ + Execute a tool function call. + + This endpoint receives tool calls from LLMs and executes the corresponding + Frigate operations, returning results in a format the LLM can understand. + """ + tool_name = body.tool_name + arguments = body.arguments + + logger.debug(f"Executing tool: {tool_name} with arguments: {arguments}") + + if tool_name == "search_objects": + return await _execute_search_objects(request, arguments, allowed_cameras) + + return JSONResponse( + content={ + "success": False, + "message": f"Unknown tool: {tool_name}", + "tool": tool_name, + }, + status_code=400, + ) + + +async def _execute_tool_internal( + tool_name: str, + arguments: Dict[str, Any], + request: Request, + allowed_cameras: List[str], +) -> Dict[str, Any]: + """ + Internal helper to execute a tool and return the result as a dict. + + This is used by the chat completion endpoint to execute tools. + """ + if tool_name == "search_objects": + response = await _execute_search_objects(request, arguments, allowed_cameras) + try: + if hasattr(response, "body"): + body_str = response.body.decode("utf-8") + return json.loads(body_str) + elif hasattr(response, "content"): + return response.content + else: + return {} + except (json.JSONDecodeError, AttributeError) as e: + logger.warning(f"Failed to extract tool result: {e}") + return {"error": "Failed to parse tool result"} + else: + return {"error": f"Unknown tool: {tool_name}"} + + +@router.post( + "/chat/completion", + response_model=ChatCompletionResponse, + dependencies=[Depends(allow_any_authenticated())], + summary="Chat completion with tool calling", + description=( + "Send a chat message to the configured GenAI provider with tool calling support. " + "The LLM can call Frigate tools to answer questions about your cameras and events." + ), +) +async def chat_completion( + request: Request, + body: ChatCompletionRequest = Body(...), + allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter), +) -> JSONResponse: + """ + Chat completion endpoint with tool calling support. + + This endpoint: + 1. Gets the configured GenAI client + 2. Gets tool definitions + 3. Sends messages + tools to LLM + 4. Handles tool_calls if present + 5. Executes tools and sends results back to LLM + 6. Repeats until final answer + 7. Returns response to user + """ + genai_client = get_genai_client(request.app.frigate_config) + if not genai_client: + return JSONResponse( + content={ + "error": "GenAI is not configured. Please configure a GenAI provider in your Frigate config.", + }, + status_code=400, + ) + + tools = get_tool_definitions() + conversation = [] + + current_datetime = datetime.now(timezone.utc) + current_date_str = current_datetime.strftime("%Y-%m-%d") + current_time_str = current_datetime.strftime("%H:%M:%S %Z") + system_prompt = f"""You are a helpful assistant for Frigate, a security camera NVR system. You help users answer questions about their cameras, detected objects, and events. + +Current date and time: {current_date_str} at {current_time_str} (UTC) + +When users ask questions about "today", "yesterday", "this week", etc., use the current date above as reference. +When searching for objects or events, use ISO 8601 format for dates (e.g., {current_date_str}T00:00:00Z for the start of today). +Always be accurate with time calculations based on the current date provided.""" + + conversation.append( + { + "role": "system", + "content": system_prompt, + } + ) + + for msg in body.messages: + msg_dict = { + "role": msg.role, + "content": msg.content, + } + if msg.tool_call_id: + msg_dict["tool_call_id"] = msg.tool_call_id + if msg.name: + msg_dict["name"] = msg.name + conversation.append(msg_dict) + + tool_iterations = 0 + max_iterations = body.max_tool_iterations + + logger.debug( + f"Starting chat completion with {len(conversation)} message(s), " + f"{len(tools)} tool(s) available, max_iterations={max_iterations}" + ) + + try: + while tool_iterations < max_iterations: + logger.debug( + f"Calling LLM (iteration {tool_iterations + 1}/{max_iterations}) " + f"with {len(conversation)} message(s) in conversation" + ) + response = genai_client.chat_with_tools( + messages=conversation, + tools=tools if tools else None, + tool_choice="auto", + ) + + if response.get("finish_reason") == "error": + logger.error("GenAI client returned an error") + return JSONResponse( + content={ + "error": "An error occurred while processing your request.", + }, + status_code=500, + ) + + assistant_message = { + "role": "assistant", + "content": response.get("content"), + } + if response.get("tool_calls"): + assistant_message["tool_calls"] = [ + { + "id": tc["id"], + "type": "function", + "function": { + "name": tc["name"], + "arguments": json.dumps(tc["arguments"]), + }, + } + for tc in response["tool_calls"] + ] + conversation.append(assistant_message) + + tool_calls = response.get("tool_calls") + if not tool_calls: + logger.debug( + f"Chat completion finished with final answer (iterations: {tool_iterations})" + ) + return JSONResponse( + content=ChatCompletionResponse( + message=ChatMessageResponse( + role="assistant", + content=response.get("content"), + tool_calls=None, + ), + finish_reason=response.get("finish_reason", "stop"), + tool_iterations=tool_iterations, + ).model_dump(), + ) + + # Execute tools + tool_iterations += 1 + logger.debug( + f"Tool calls detected (iteration {tool_iterations}/{max_iterations}): " + f"{len(tool_calls)} tool(s) to execute" + ) + tool_results = [] + + for tool_call in tool_calls: + tool_name = tool_call["name"] + tool_args = tool_call["arguments"] + tool_call_id = tool_call["id"] + + logger.debug( + f"Executing tool: {tool_name} (id: {tool_call_id}) with arguments: {json.dumps(tool_args, indent=2)}" + ) + + try: + tool_result = await _execute_tool_internal( + tool_name, tool_args, request, allowed_cameras + ) + + if isinstance(tool_result, dict): + result_content = json.dumps(tool_result) + result_summary = tool_result + if isinstance(tool_result, dict) and isinstance( + tool_result.get("content"), list + ): + result_count = len(tool_result.get("content", [])) + result_summary = { + "count": result_count, + "sample": tool_result.get("content", [])[:2] + if result_count > 0 + else [], + } + logger.debug( + f"Tool {tool_name} (id: {tool_call_id}) completed successfully. " + f"Result: {json.dumps(result_summary, indent=2)}" + ) + elif isinstance(tool_result, str): + result_content = tool_result + logger.debug( + f"Tool {tool_name} (id: {tool_call_id}) completed successfully. " + f"Result length: {len(result_content)} characters" + ) + else: + result_content = str(tool_result) + logger.debug( + f"Tool {tool_name} (id: {tool_call_id}) completed successfully. " + f"Result type: {type(tool_result).__name__}" + ) + + tool_results.append( + { + "role": "tool", + "tool_call_id": tool_call_id, + "content": result_content, + } + ) + except Exception as e: + logger.error( + f"Error executing tool {tool_name} (id: {tool_call_id}): {e}", + exc_info=True, + ) + error_content = json.dumps( + {"error": f"Tool execution failed: {str(e)}"} + ) + tool_results.append( + { + "role": "tool", + "tool_call_id": tool_call_id, + "content": error_content, + } + ) + logger.debug( + f"Tool {tool_name} (id: {tool_call_id}) failed. Error result added to conversation." + ) + + conversation.extend(tool_results) + logger.debug( + f"Added {len(tool_results)} tool result(s) to conversation. " + f"Continuing with next LLM call..." + ) + + logger.warning( + f"Max tool iterations ({max_iterations}) reached. Returning partial response." + ) + return JSONResponse( + content=ChatCompletionResponse( + message=ChatMessageResponse( + role="assistant", + content="I reached the maximum number of tool call iterations. Please try rephrasing your question.", + tool_calls=None, + ), + finish_reason="length", + tool_iterations=tool_iterations, + ).model_dump(), + ) + + except Exception as e: + logger.error(f"Error in chat completion: {e}", exc_info=True) + return JSONResponse( + content={ + "error": "An error occurred while processing your request.", + }, + status_code=500, + ) diff --git a/frigate/api/defs/request/chat_body.py b/frigate/api/defs/request/chat_body.py new file mode 100644 index 000000000..7b327bf5a --- /dev/null +++ b/frigate/api/defs/request/chat_body.py @@ -0,0 +1,34 @@ +"""Chat API request models.""" + +from typing import Optional + +from pydantic import BaseModel, Field + + +class ChatMessage(BaseModel): + """A single message in a chat conversation.""" + + role: str = Field( + description="Message role: 'user', 'assistant', 'system', or 'tool'" + ) + content: str = Field(description="Message content") + tool_call_id: Optional[str] = Field( + default=None, description="For tool messages, the ID of the tool call" + ) + name: Optional[str] = Field( + default=None, description="For tool messages, the tool name" + ) + + +class ChatCompletionRequest(BaseModel): + """Request for chat completion with tool calling.""" + + messages: list[ChatMessage] = Field( + description="List of messages in the conversation" + ) + max_tool_iterations: int = Field( + default=5, + ge=1, + le=10, + description="Maximum number of tool call iterations (default: 5)", + ) diff --git a/frigate/api/defs/response/chat_response.py b/frigate/api/defs/response/chat_response.py new file mode 100644 index 000000000..f1cc9194b --- /dev/null +++ b/frigate/api/defs/response/chat_response.py @@ -0,0 +1,37 @@ +"""Chat API response models.""" + +from typing import Any, Optional + +from pydantic import BaseModel, Field + + +class ToolCall(BaseModel): + """A tool call from the LLM.""" + + id: str = Field(description="Unique identifier for this tool call") + name: str = Field(description="Tool name to call") + arguments: dict[str, Any] = Field(description="Arguments for the tool call") + + +class ChatMessageResponse(BaseModel): + """A message in the chat response.""" + + role: str = Field(description="Message role") + content: Optional[str] = Field( + default=None, description="Message content (None if tool calls present)" + ) + tool_calls: Optional[list[ToolCall]] = Field( + default=None, description="Tool calls if LLM wants to call tools" + ) + + +class ChatCompletionResponse(BaseModel): + """Response from chat completion.""" + + message: ChatMessageResponse = Field(description="The assistant's message") + finish_reason: str = Field( + description="Reason generation stopped: 'stop', 'tool_calls', 'length', 'error'" + ) + tool_iterations: int = Field( + default=0, description="Number of tool call iterations performed" + ) diff --git a/frigate/api/defs/tags.py b/frigate/api/defs/tags.py index 20e4ac31b..3aaaa59ef 100644 --- a/frigate/api/defs/tags.py +++ b/frigate/api/defs/tags.py @@ -5,6 +5,7 @@ class Tags(Enum): app = "App" auth = "Auth" camera = "Camera" + chat = "Chat" events = "Events" export = "Export" classification = "Classification" diff --git a/frigate/api/fastapi_app.py b/frigate/api/fastapi_app.py index 27d844b8a..496c8fada 100644 --- a/frigate/api/fastapi_app.py +++ b/frigate/api/fastapi_app.py @@ -16,6 +16,7 @@ from frigate.api import app as main_app from frigate.api import ( auth, camera, + chat, classification, event, export, @@ -121,6 +122,7 @@ def create_fastapi_app( # Order of include_router matters: https://fastapi.tiangolo.com/tutorial/path-params/#order-matters app.include_router(auth.router) app.include_router(camera.router) + app.include_router(chat.router) app.include_router(classification.router) app.include_router(review.router) app.include_router(main_app.router) diff --git a/frigate/genai/__init__.py b/frigate/genai/__init__.py index be1f6d1e7..07b7b2a2b 100644 --- a/frigate/genai/__init__.py +++ b/frigate/genai/__init__.py @@ -292,6 +292,64 @@ Guidelines: """Get the context window size for this provider in tokens.""" return 4096 + def chat_with_tools( + self, + messages: list[dict[str, Any]], + tools: Optional[list[dict[str, Any]]] = None, + tool_choice: Optional[str] = "auto", + ) -> dict[str, Any]: + """ + Send chat messages to LLM with optional tool definitions. + + This method handles conversation-style interactions with the LLM, + including function calling/tool usage capabilities. + + Args: + messages: List of message dictionaries. Each message should have: + - 'role': str - One of 'user', 'assistant', 'system', or 'tool' + - 'content': str - The message content + - 'tool_call_id': Optional[str] - For tool responses, the ID of the tool call + - 'name': Optional[str] - For tool messages, the tool name + tools: Optional list of tool definitions in OpenAI-compatible format. + Each tool should have 'type': 'function' and 'function' with: + - 'name': str - Tool name + - 'description': str - Tool description + - 'parameters': dict - JSON schema for parameters + tool_choice: How the model should handle tools: + - 'auto': Model decides whether to call tools + - 'none': Model must not call tools + - 'required': Model must call at least one tool + - Or a dict specifying a specific tool to call + **kwargs: Additional provider-specific parameters. + + Returns: + Dictionary with: + - 'content': Optional[str] - The text response from the LLM, None if tool calls + - 'tool_calls': Optional[List[Dict]] - List of tool calls if LLM wants to call tools. + Each tool call dict has: + - 'id': str - Unique identifier for this tool call + - 'name': str - Tool name to call + - 'arguments': dict - Arguments for the tool call (parsed JSON) + - 'finish_reason': str - Reason generation stopped: + - 'stop': Normal completion + - 'tool_calls': LLM wants to call tools + - 'length': Hit token limit + - 'error': An error occurred + + Raises: + NotImplementedError: If the provider doesn't implement this method. + """ + # Base implementation - each provider should override this + logger.warning( + f"{self.__class__.__name__} does not support chat_with_tools. " + "This method should be overridden by the provider implementation." + ) + return { + "content": None, + "tool_calls": None, + "finish_reason": "error", + } + def get_genai_client(config: FrigateConfig) -> Optional[GenAIClient]: """Get the GenAI client.""" diff --git a/frigate/genai/azure-openai.py b/frigate/genai/azure-openai.py index eb08f7786..21ed5d856 100644 --- a/frigate/genai/azure-openai.py +++ b/frigate/genai/azure-openai.py @@ -1,8 +1,9 @@ """Azure OpenAI Provider for Frigate AI.""" import base64 +import json import logging -from typing import Optional +from typing import Any, Optional from urllib.parse import parse_qs, urlparse from openai import AzureOpenAI @@ -76,3 +77,93 @@ class OpenAIClient(GenAIClient): def get_context_size(self) -> int: """Get the context window size for Azure OpenAI.""" return 128000 + + def chat_with_tools( + self, + messages: list[dict[str, Any]], + tools: Optional[list[dict[str, Any]]] = None, + tool_choice: Optional[str] = "auto", + ) -> dict[str, Any]: + try: + openai_tool_choice = None + if tool_choice: + if tool_choice == "none": + openai_tool_choice = "none" + elif tool_choice == "auto": + openai_tool_choice = "auto" + elif tool_choice == "required": + openai_tool_choice = "required" + + request_params = { + "model": self.genai_config.model, + "messages": messages, + "timeout": self.timeout, + } + + if tools: + request_params["tools"] = tools + if openai_tool_choice is not None: + request_params["tool_choice"] = openai_tool_choice + + result = self.provider.chat.completions.create(**request_params) + + if ( + result is None + or not hasattr(result, "choices") + or len(result.choices) == 0 + ): + return { + "content": None, + "tool_calls": None, + "finish_reason": "error", + } + + choice = result.choices[0] + message = choice.message + + content = message.content.strip() if message.content else None + + tool_calls = None + if message.tool_calls: + tool_calls = [] + for tool_call in message.tool_calls: + try: + arguments = json.loads(tool_call.function.arguments) + except (json.JSONDecodeError, AttributeError) as e: + logger.warning( + f"Failed to parse tool call arguments: {e}, " + f"tool: {tool_call.function.name if hasattr(tool_call.function, 'name') else 'unknown'}" + ) + arguments = {} + + tool_calls.append( + { + "id": tool_call.id if hasattr(tool_call, "id") else "", + "name": tool_call.function.name + if hasattr(tool_call.function, "name") + else "", + "arguments": arguments, + } + ) + + finish_reason = "error" + if hasattr(choice, "finish_reason") and choice.finish_reason: + finish_reason = choice.finish_reason + elif tool_calls: + finish_reason = "tool_calls" + elif content: + finish_reason = "stop" + + return { + "content": content, + "tool_calls": tool_calls, + "finish_reason": finish_reason, + } + + except Exception as e: + logger.warning("Azure OpenAI returned an error: %s", str(e)) + return { + "content": None, + "tool_calls": None, + "finish_reason": "error", + } diff --git a/frigate/genai/gemini.py b/frigate/genai/gemini.py index b700c33a4..c2e5d9f7e 100644 --- a/frigate/genai/gemini.py +++ b/frigate/genai/gemini.py @@ -1,7 +1,8 @@ """Gemini Provider for Frigate AI.""" +import json import logging -from typing import Optional +from typing import Any, Optional from google import genai from google.genai import errors, types @@ -76,3 +77,188 @@ class GeminiClient(GenAIClient): """Get the context window size for Gemini.""" # Gemini Pro Vision has a 1M token context window return 1000000 + + def chat_with_tools( + self, + messages: list[dict[str, Any]], + tools: Optional[list[dict[str, Any]]] = None, + tool_choice: Optional[str] = "auto", + ) -> dict[str, Any]: + try: + if tools: + function_declarations = [] + for tool in tools: + if tool.get("type") == "function": + func_def = tool.get("function", {}) + function_declarations.append( + genai.protos.FunctionDeclaration( + name=func_def.get("name"), + description=func_def.get("description"), + parameters=genai.protos.Schema( + type=genai.protos.Type.OBJECT, + properties={ + prop_name: genai.protos.Schema( + type=_convert_json_type_to_gemini( + prop.get("type") + ), + description=prop.get("description"), + ) + for prop_name, prop in func_def.get( + "parameters", {} + ) + .get("properties", {}) + .items() + }, + required=func_def.get("parameters", {}).get( + "required", [] + ), + ), + ) + ) + + tool_config = genai.protos.Tool( + function_declarations=function_declarations + ) + + if tool_choice == "none": + function_calling_config = genai.protos.FunctionCallingConfig( + mode=genai.protos.FunctionCallingConfig.Mode.NONE + ) + elif tool_choice == "required": + function_calling_config = genai.protos.FunctionCallingConfig( + mode=genai.protos.FunctionCallingConfig.Mode.ANY + ) + else: + function_calling_config = genai.protos.FunctionCallingConfig( + mode=genai.protos.FunctionCallingConfig.Mode.AUTO + ) + else: + tool_config = None + function_calling_config = None + + contents = [] + for msg in messages: + role = msg.get("role") + content = msg.get("content", "") + + if role == "system": + continue + elif role == "user": + contents.append({"role": "user", "parts": [content]}) + elif role == "assistant": + parts = [content] if content else [] + if "tool_calls" in msg: + for tc in msg["tool_calls"]: + parts.append( + genai.protos.FunctionCall( + name=tc["function"]["name"], + args=json.loads(tc["function"]["arguments"]), + ) + ) + contents.append({"role": "model", "parts": parts}) + elif role == "tool": + tool_name = msg.get("name", "") + tool_result = ( + json.loads(content) if isinstance(content, str) else content + ) + contents.append( + { + "role": "function", + "parts": [ + genai.protos.FunctionResponse( + name=tool_name, + response=tool_result, + ) + ], + } + ) + + generation_config = genai.types.GenerationConfig( + candidate_count=1, + ) + if function_calling_config: + generation_config.function_calling_config = function_calling_config + + response = self.provider.generate_content( + contents, + tools=[tool_config] if tool_config else None, + generation_config=generation_config, + request_options=genai.types.RequestOptions(timeout=self.timeout), + ) + + content = None + tool_calls = None + + if response.candidates and response.candidates[0].content: + parts = response.candidates[0].content.parts + text_parts = [p.text for p in parts if hasattr(p, "text") and p.text] + if text_parts: + content = " ".join(text_parts).strip() + + function_calls = [ + p.function_call + for p in parts + if hasattr(p, "function_call") and p.function_call + ] + if function_calls: + tool_calls = [] + for fc in function_calls: + tool_calls.append( + { + "id": f"call_{hash(fc.name)}", + "name": fc.name, + "arguments": dict(fc.args) + if hasattr(fc, "args") + else {}, + } + ) + + finish_reason = "error" + if response.candidates: + finish_reason_map = { + genai.types.FinishReason.STOP: "stop", + genai.types.FinishReason.MAX_TOKENS: "length", + genai.types.FinishReason.SAFETY: "stop", + genai.types.FinishReason.RECITATION: "stop", + genai.types.FinishReason.OTHER: "error", + } + finish_reason = finish_reason_map.get( + response.candidates[0].finish_reason, "error" + ) + elif tool_calls: + finish_reason = "tool_calls" + elif content: + finish_reason = "stop" + + return { + "content": content, + "tool_calls": tool_calls, + "finish_reason": finish_reason, + } + + except GoogleAPICallError as e: + logger.warning("Gemini returned an error: %s", str(e)) + return { + "content": None, + "tool_calls": None, + "finish_reason": "error", + } + except Exception as e: + logger.warning("Unexpected error in Gemini chat_with_tools: %s", str(e)) + return { + "content": None, + "tool_calls": None, + "finish_reason": "error", + } + + +def _convert_json_type_to_gemini(json_type: str) -> genai.protos.Type: + type_map = { + "string": genai.protos.Type.STRING, + "integer": genai.protos.Type.INTEGER, + "number": genai.protos.Type.NUMBER, + "boolean": genai.protos.Type.BOOLEAN, + "array": genai.protos.Type.ARRAY, + "object": genai.protos.Type.OBJECT, + } + return type_map.get(json_type, genai.protos.Type.STRING) diff --git a/frigate/genai/llama_cpp.py b/frigate/genai/llama_cpp.py index 45e364bc0..5523ce389 100644 --- a/frigate/genai/llama_cpp.py +++ b/frigate/genai/llama_cpp.py @@ -1,6 +1,7 @@ """llama.cpp Provider for Frigate AI.""" import base64 +import json import logging from typing import Any, Optional @@ -99,3 +100,132 @@ class LlamaCppClient(GenAIClient): def get_context_size(self) -> int: """Get the context window size for llama.cpp.""" return self.genai_config.provider_options.get("context_size", 4096) + + def chat_with_tools( + self, + messages: list[dict[str, Any]], + tools: Optional[list[dict[str, Any]]] = None, + tool_choice: Optional[str] = "auto", + ) -> dict[str, Any]: + """ + Send chat messages to llama.cpp server with optional tool definitions. + + Uses the OpenAI-compatible endpoint but passes through all native llama.cpp + parameters (like slot_id, temperature, etc.) via provider_options. + """ + if self.provider is None: + logger.warning( + "llama.cpp provider has not been initialized. Check your llama.cpp configuration." + ) + return { + "content": None, + "tool_calls": None, + "finish_reason": "error", + } + + try: + openai_tool_choice = None + if tool_choice: + if tool_choice == "none": + openai_tool_choice = "none" + elif tool_choice == "auto": + openai_tool_choice = "auto" + elif tool_choice == "required": + openai_tool_choice = "required" + + payload = { + "messages": messages, + } + + if tools: + payload["tools"] = tools + if openai_tool_choice is not None: + payload["tool_choice"] = openai_tool_choice + + provider_opts = { + k: v for k, v in self.provider_options.items() if k != "context_size" + } + payload.update(provider_opts) + + response = requests.post( + f"{self.provider}/v1/chat/completions", + json=payload, + timeout=self.timeout, + ) + response.raise_for_status() + result = response.json() + + if result is None or "choices" not in result or len(result["choices"]) == 0: + return { + "content": None, + "tool_calls": None, + "finish_reason": "error", + } + + choice = result["choices"][0] + message = choice.get("message", {}) + + content = message.get("content") + if content: + content = content.strip() + else: + content = None + + tool_calls = None + if "tool_calls" in message and message["tool_calls"]: + tool_calls = [] + for tool_call in message["tool_calls"]: + try: + function_data = tool_call.get("function", {}) + arguments_str = function_data.get("arguments", "{}") + arguments = json.loads(arguments_str) + except (json.JSONDecodeError, KeyError, TypeError) as e: + logger.warning( + f"Failed to parse tool call arguments: {e}, " + f"tool: {function_data.get('name', 'unknown')}" + ) + arguments = {} + + tool_calls.append( + { + "id": tool_call.get("id", ""), + "name": function_data.get("name", ""), + "arguments": arguments, + } + ) + + finish_reason = "error" + if "finish_reason" in choice and choice["finish_reason"]: + finish_reason = choice["finish_reason"] + elif tool_calls: + finish_reason = "tool_calls" + elif content: + finish_reason = "stop" + + return { + "content": content, + "tool_calls": tool_calls, + "finish_reason": finish_reason, + } + + except requests.exceptions.Timeout as e: + logger.warning("llama.cpp request timed out: %s", str(e)) + return { + "content": None, + "tool_calls": None, + "finish_reason": "error", + } + except requests.exceptions.RequestException as e: + logger.warning("llama.cpp returned an error: %s", str(e)) + return { + "content": None, + "tool_calls": None, + "finish_reason": "error", + } + except Exception as e: + logger.warning("Unexpected error in llama.cpp chat_with_tools: %s", str(e)) + return { + "content": None, + "tool_calls": None, + "finish_reason": "error", + } diff --git a/frigate/genai/ollama.py b/frigate/genai/ollama.py index ab6d3c0b3..6e9a4f5d5 100644 --- a/frigate/genai/ollama.py +++ b/frigate/genai/ollama.py @@ -1,5 +1,6 @@ """Ollama Provider for Frigate AI.""" +import json import logging from typing import Any, Optional @@ -86,3 +87,120 @@ class OllamaClient(GenAIClient): return self.genai_config.provider_options.get("options", {}).get( "num_ctx", 4096 ) + + def chat_with_tools( + self, + messages: list[dict[str, Any]], + tools: Optional[list[dict[str, Any]]] = None, + tool_choice: Optional[str] = "auto", + ) -> dict[str, Any]: + if self.provider is None: + logger.warning( + "Ollama provider has not been initialized. Check your Ollama configuration." + ) + return { + "content": None, + "tool_calls": None, + "finish_reason": "error", + } + + try: + request_messages = [] + for msg in messages: + msg_dict = { + "role": msg.get("role"), + "content": msg.get("content", ""), + } + if msg.get("tool_call_id"): + msg_dict["tool_call_id"] = msg["tool_call_id"] + if msg.get("name"): + msg_dict["name"] = msg["name"] + if msg.get("tool_calls"): + msg_dict["tool_calls"] = msg["tool_calls"] + request_messages.append(msg_dict) + + request_params = { + "model": self.genai_config.model, + "messages": request_messages, + } + + if tools: + request_params["tools"] = tools + if tool_choice: + if tool_choice == "none": + request_params["tool_choice"] = "none" + elif tool_choice == "required": + request_params["tool_choice"] = "required" + elif tool_choice == "auto": + request_params["tool_choice"] = "auto" + + request_params.update(self.provider_options) + + response = self.provider.chat(**request_params) + + if not response or "message" not in response: + return { + "content": None, + "tool_calls": None, + "finish_reason": "error", + } + + message = response["message"] + content = ( + message.get("content", "").strip() if message.get("content") else None + ) + + tool_calls = None + if "tool_calls" in message and message["tool_calls"]: + tool_calls = [] + for tool_call in message["tool_calls"]: + try: + function_data = tool_call.get("function", {}) + arguments_str = function_data.get("arguments", "{}") + arguments = json.loads(arguments_str) + except (json.JSONDecodeError, KeyError, TypeError) as e: + logger.warning( + f"Failed to parse tool call arguments: {e}, " + f"tool: {function_data.get('name', 'unknown')}" + ) + arguments = {} + + tool_calls.append( + { + "id": tool_call.get("id", ""), + "name": function_data.get("name", ""), + "arguments": arguments, + } + ) + + finish_reason = "error" + if "done" in response and response["done"]: + if tool_calls: + finish_reason = "tool_calls" + elif content: + finish_reason = "stop" + elif tool_calls: + finish_reason = "tool_calls" + elif content: + finish_reason = "stop" + + return { + "content": content, + "tool_calls": tool_calls, + "finish_reason": finish_reason, + } + + except (TimeoutException, ResponseError, ConnectionError) as e: + logger.warning("Ollama returned an error: %s", str(e)) + return { + "content": None, + "tool_calls": None, + "finish_reason": "error", + } + except Exception as e: + logger.warning("Unexpected error in Ollama chat_with_tools: %s", str(e)) + return { + "content": None, + "tool_calls": None, + "finish_reason": "error", + } diff --git a/frigate/genai/openai.py b/frigate/genai/openai.py index 1fb0dd852..c8d9ca7ab 100644 --- a/frigate/genai/openai.py +++ b/frigate/genai/openai.py @@ -1,8 +1,9 @@ """OpenAI Provider for Frigate AI.""" import base64 +import json import logging -from typing import Optional +from typing import Any, Optional from httpx import TimeoutException from openai import OpenAI @@ -116,3 +117,113 @@ class OpenAIClient(GenAIClient): f"Using default context size {self.context_size} for model {self.genai_config.model}" ) return self.context_size + + def chat_with_tools( + self, + messages: list[dict[str, Any]], + tools: Optional[list[dict[str, Any]]] = None, + tool_choice: Optional[str] = "auto", + ) -> dict[str, Any]: + """ + Send chat messages to OpenAI with optional tool definitions. + + Implements function calling/tool usage for OpenAI models. + """ + try: + openai_tool_choice = None + if tool_choice: + if tool_choice == "none": + openai_tool_choice = "none" + elif tool_choice == "auto": + openai_tool_choice = "auto" + elif tool_choice == "required": + openai_tool_choice = "required" + + request_params = { + "model": self.genai_config.model, + "messages": messages, + "timeout": self.timeout, + } + + if tools: + request_params["tools"] = tools + if openai_tool_choice is not None: + request_params["tool_choice"] = openai_tool_choice + + if isinstance(self.genai_config.provider_options, dict): + excluded_options = {"context_size"} + provider_opts = { + k: v + for k, v in self.genai_config.provider_options.items() + if k not in excluded_options + } + request_params.update(provider_opts) + + result = self.provider.chat.completions.create(**request_params) + + if ( + result is None + or not hasattr(result, "choices") + or len(result.choices) == 0 + ): + return { + "content": None, + "tool_calls": None, + "finish_reason": "error", + } + + choice = result.choices[0] + message = choice.message + content = message.content.strip() if message.content else None + + tool_calls = None + if message.tool_calls: + tool_calls = [] + for tool_call in message.tool_calls: + try: + arguments = json.loads(tool_call.function.arguments) + except (json.JSONDecodeError, AttributeError) as e: + logger.warning( + f"Failed to parse tool call arguments: {e}, " + f"tool: {tool_call.function.name if hasattr(tool_call.function, 'name') else 'unknown'}" + ) + arguments = {} + + tool_calls.append( + { + "id": tool_call.id if hasattr(tool_call, "id") else "", + "name": tool_call.function.name + if hasattr(tool_call.function, "name") + else "", + "arguments": arguments, + } + ) + + finish_reason = "error" + if hasattr(choice, "finish_reason") and choice.finish_reason: + finish_reason = choice.finish_reason + elif tool_calls: + finish_reason = "tool_calls" + elif content: + finish_reason = "stop" + + return { + "content": content, + "tool_calls": tool_calls, + "finish_reason": finish_reason, + } + + except TimeoutException as e: + logger.warning("OpenAI request timed out: %s", str(e)) + return { + "content": None, + "tool_calls": None, + "finish_reason": "error", + } + except Exception as e: + logger.warning("OpenAI returned an error: %s", str(e)) + return { + "content": None, + "tool_calls": None, + "finish_reason": "error", + } From c78ab2dc87b7421b5e0e8ed0b4f4892589860bde Mon Sep 17 00:00:00 2001 From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> Date: Thu, 22 Jan 2026 11:21:41 -0600 Subject: [PATCH 25/56] Offline preview image (#21752) * use latest preview frame for latest image when camera is offline * remove frame extraction logic * tests * frontend * add description to api endpoint --- frigate/api/media.py | 52 ++++++--- frigate/output/preview.py | 45 ++++++++ .../test/http_api/test_http_latest_frame.py | 107 ++++++++++++++++++ frigate/test/test_preview_loader.py | 80 +++++++++++++ web/src/components/player/LivePlayer.tsx | 27 +++++ 5 files changed, 297 insertions(+), 14 deletions(-) create mode 100644 frigate/test/http_api/test_http_latest_frame.py create mode 100644 frigate/test/test_preview_loader.py diff --git a/frigate/api/media.py b/frigate/api/media.py index b488ba360..7d0d02a45 100644 --- a/frigate/api/media.py +++ b/frigate/api/media.py @@ -42,6 +42,7 @@ from frigate.const import ( PREVIEW_FRAME_TYPE, ) from frigate.models import Event, Previews, Recordings, Regions, ReviewSegment +from frigate.output.preview import get_most_recent_preview_frame from frigate.track.object_processing import TrackedObjectProcessor from frigate.util.file import get_event_thumbnail_bytes from frigate.util.image import get_image_from_recording @@ -125,7 +126,9 @@ async def camera_ptz_info(request: Request, camera_name: str): @router.get( - "/{camera_name}/latest.{extension}", dependencies=[Depends(require_camera_access)] + "/{camera_name}/latest.{extension}", + dependencies=[Depends(require_camera_access)], + description="Returns the latest frame from the specified camera in the requested format (jpg, png, webp). Falls back to preview frames if the camera is offline.", ) async def latest_frame( request: Request, @@ -159,20 +162,37 @@ async def latest_frame( or 10 ) + is_offline = False if frame is None or datetime.now().timestamp() > ( frame_processor.get_current_frame_time(camera_name) + retry_interval ): - if request.app.camera_error_image is None: - error_image = glob.glob( - os.path.join(INSTALL_DIR, "frigate/images/camera-error.jpg") - ) + last_frame_time = frame_processor.get_current_frame_time(camera_name) + preview_path = get_most_recent_preview_frame( + camera_name, before=last_frame_time + ) - if len(error_image) > 0: - request.app.camera_error_image = cv2.imread( - error_image[0], cv2.IMREAD_UNCHANGED + if preview_path: + logger.debug(f"Using most recent preview frame for {camera_name}") + frame = cv2.imread(preview_path, cv2.IMREAD_UNCHANGED) + + if frame is not None: + is_offline = True + + if frame is None or not is_offline: + logger.debug( + f"No live or preview frame available for {camera_name}. Using error image." + ) + if request.app.camera_error_image is None: + error_image = glob.glob( + os.path.join(INSTALL_DIR, "frigate/images/camera-error.jpg") ) - frame = request.app.camera_error_image + if len(error_image) > 0: + request.app.camera_error_image = cv2.imread( + error_image[0], cv2.IMREAD_UNCHANGED + ) + + frame = request.app.camera_error_image height = int(params.height or str(frame.shape[0])) width = int(height * frame.shape[1] / frame.shape[0]) @@ -194,14 +214,18 @@ async def latest_frame( frame = cv2.resize(frame, dsize=(width, height), interpolation=cv2.INTER_AREA) _, img = cv2.imencode(f".{extension.value}", frame, quality_params) + + headers = { + "Cache-Control": "no-store" if not params.store else "private, max-age=60", + } + + if is_offline: + headers["X-Frigate-Offline"] = "true" + return Response( content=img.tobytes(), media_type=extension.get_mime_type(), - headers={ - "Cache-Control": "no-store" - if not params.store - else "private, max-age=60", - }, + headers=headers, ) elif ( camera_name == "birdseye" diff --git a/frigate/output/preview.py b/frigate/output/preview.py index 6dfd90904..f16bb3bd7 100644 --- a/frigate/output/preview.py +++ b/frigate/output/preview.py @@ -57,6 +57,51 @@ def get_cache_image_name(camera: str, frame_time: float) -> str: ) +def get_most_recent_preview_frame(camera: str, before: float = None) -> str | None: + """Get the most recent preview frame for a camera.""" + if not os.path.exists(PREVIEW_CACHE_DIR): + return None + + try: + # files are named preview_{camera}-{timestamp}.webp + # we want the largest timestamp that is less than or equal to before + preview_files = [ + f + for f in os.listdir(PREVIEW_CACHE_DIR) + if f.startswith(f"preview_{camera}-") + and f.endswith(f".{PREVIEW_FRAME_TYPE}") + ] + + if not preview_files: + return None + + # sort by timestamp in descending order + # filenames are like preview_front-1712345678.901234.webp + preview_files.sort(reverse=True) + + if before is None: + return os.path.join(PREVIEW_CACHE_DIR, preview_files[0]) + + for file_name in preview_files: + try: + # Extract timestamp: preview_front-1712345678.901234.webp + # Split by dash and extension + timestamp_part = file_name.split("-")[-1].split( + f".{PREVIEW_FRAME_TYPE}" + )[0] + timestamp = float(timestamp_part) + + if timestamp <= before: + return os.path.join(PREVIEW_CACHE_DIR, file_name) + except (ValueError, IndexError): + continue + + return None + except Exception as e: + logger.error(f"Error searching for most recent preview frame: {e}") + return None + + class FFMpegConverter(threading.Thread): """Convert a list of still frames into a vfr mp4.""" diff --git a/frigate/test/http_api/test_http_latest_frame.py b/frigate/test/http_api/test_http_latest_frame.py new file mode 100644 index 000000000..755ee6eb1 --- /dev/null +++ b/frigate/test/http_api/test_http_latest_frame.py @@ -0,0 +1,107 @@ +import os +import shutil +from unittest.mock import MagicMock + +import cv2 +import numpy as np + +from frigate.output.preview import PREVIEW_CACHE_DIR, PREVIEW_FRAME_TYPE +from frigate.test.http_api.base_http_test import AuthTestClient, BaseTestHttp + + +class TestHttpLatestFrame(BaseTestHttp): + def setUp(self): + super().setUp([]) + self.app = super().create_app() + self.app.detected_frames_processor = MagicMock() + + if os.path.exists(PREVIEW_CACHE_DIR): + shutil.rmtree(PREVIEW_CACHE_DIR) + os.makedirs(PREVIEW_CACHE_DIR) + + def tearDown(self): + if os.path.exists(PREVIEW_CACHE_DIR): + shutil.rmtree(PREVIEW_CACHE_DIR) + super().tearDown() + + def test_latest_frame_fallback_to_preview(self): + camera = "front_door" + # 1. Mock frame processor to return None (simulating offline/missing frame) + self.app.detected_frames_processor.get_current_frame.return_value = None + # Return a timestamp that is after our dummy preview frame + self.app.detected_frames_processor.get_current_frame_time.return_value = ( + 1234567891.0 + ) + + # 2. Create a dummy preview file + dummy_frame = np.zeros((180, 320, 3), np.uint8) + cv2.putText( + dummy_frame, + "PREVIEW", + (50, 50), + cv2.FONT_HERSHEY_SIMPLEX, + 1, + (255, 255, 255), + 2, + ) + preview_path = os.path.join( + PREVIEW_CACHE_DIR, f"preview_{camera}-1234567890.0.{PREVIEW_FRAME_TYPE}" + ) + cv2.imwrite(preview_path, dummy_frame) + + with AuthTestClient(self.app) as client: + response = client.get(f"/{camera}/latest.webp") + assert response.status_code == 200 + assert response.headers.get("X-Frigate-Offline") == "true" + # Verify we got an image (webp) + assert response.headers.get("content-type") == "image/webp" + + def test_latest_frame_no_fallback_when_live(self): + camera = "front_door" + # 1. Mock frame processor to return a live frame + dummy_frame = np.zeros((180, 320, 3), np.uint8) + self.app.detected_frames_processor.get_current_frame.return_value = dummy_frame + self.app.detected_frames_processor.get_current_frame_time.return_value = ( + 2000000000.0 # Way in the future + ) + + with AuthTestClient(self.app) as client: + response = client.get(f"/{camera}/latest.webp") + assert response.status_code == 200 + assert "X-Frigate-Offline" not in response.headers + + def test_latest_frame_stale_falls_back_to_preview(self): + camera = "front_door" + # 1. Mock frame processor to return a stale frame + dummy_frame = np.zeros((180, 320, 3), np.uint8) + self.app.detected_frames_processor.get_current_frame.return_value = dummy_frame + # Return a timestamp that is after our dummy preview frame, but way in the past + self.app.detected_frames_processor.get_current_frame_time.return_value = 1000.0 + + # 2. Create a dummy preview file + preview_path = os.path.join( + PREVIEW_CACHE_DIR, f"preview_{camera}-999.0.{PREVIEW_FRAME_TYPE}" + ) + cv2.imwrite(preview_path, dummy_frame) + + with AuthTestClient(self.app) as client: + response = client.get(f"/{camera}/latest.webp") + assert response.status_code == 200 + assert response.headers.get("X-Frigate-Offline") == "true" + + def test_latest_frame_no_preview_found(self): + camera = "front_door" + # 1. Mock frame processor to return None + self.app.detected_frames_processor.get_current_frame.return_value = None + + # 2. No preview file created + + with AuthTestClient(self.app) as client: + response = client.get(f"/{camera}/latest.webp") + # Should fall back to camera-error.jpg (which might not exist in test env, but let's see) + # If camera-error.jpg is not found, it returns 500 "Unable to get valid frame" in latest_frame + # OR it uses request.app.camera_error_image if already loaded. + + # Since we didn't provide camera-error.jpg, it might 500 if glob fails or return 500 if frame is None. + assert response.status_code in [200, 500] + assert "X-Frigate-Offline" not in response.headers diff --git a/frigate/test/test_preview_loader.py b/frigate/test/test_preview_loader.py new file mode 100644 index 000000000..e2062fce1 --- /dev/null +++ b/frigate/test/test_preview_loader.py @@ -0,0 +1,80 @@ +import os +import shutil +import unittest + +from frigate.output.preview import ( + PREVIEW_CACHE_DIR, + PREVIEW_FRAME_TYPE, + get_most_recent_preview_frame, +) + + +class TestPreviewLoader(unittest.TestCase): + def setUp(self): + if os.path.exists(PREVIEW_CACHE_DIR): + shutil.rmtree(PREVIEW_CACHE_DIR) + os.makedirs(PREVIEW_CACHE_DIR) + + def tearDown(self): + if os.path.exists(PREVIEW_CACHE_DIR): + shutil.rmtree(PREVIEW_CACHE_DIR) + + def test_get_most_recent_preview_frame_missing(self): + self.assertIsNone(get_most_recent_preview_frame("test_camera")) + + def test_get_most_recent_preview_frame_exists(self): + camera = "test_camera" + # create dummy preview files + for ts in ["1000.0", "2000.0", "1500.0"]: + with open( + os.path.join( + PREVIEW_CACHE_DIR, f"preview_{camera}-{ts}.{PREVIEW_FRAME_TYPE}" + ), + "w", + ) as f: + f.write(f"test_{ts}") + + expected_path = os.path.join( + PREVIEW_CACHE_DIR, f"preview_{camera}-2000.0.{PREVIEW_FRAME_TYPE}" + ) + self.assertEqual(get_most_recent_preview_frame(camera), expected_path) + + def test_get_most_recent_preview_frame_before(self): + camera = "test_camera" + # create dummy preview files + for ts in ["1000.0", "2000.0"]: + with open( + os.path.join( + PREVIEW_CACHE_DIR, f"preview_{camera}-{ts}.{PREVIEW_FRAME_TYPE}" + ), + "w", + ) as f: + f.write(f"test_{ts}") + + # Test finding frame before or at 1500 + expected_path = os.path.join( + PREVIEW_CACHE_DIR, f"preview_{camera}-1000.0.{PREVIEW_FRAME_TYPE}" + ) + self.assertEqual( + get_most_recent_preview_frame(camera, before=1500.0), expected_path + ) + + # Test finding frame before or at 999 + self.assertIsNone(get_most_recent_preview_frame(camera, before=999.0)) + + def test_get_most_recent_preview_frame_other_camera(self): + camera = "test_camera" + other_camera = "other_camera" + with open( + os.path.join( + PREVIEW_CACHE_DIR, f"preview_{other_camera}-3000.0.{PREVIEW_FRAME_TYPE}" + ), + "w", + ) as f: + f.write("test") + + self.assertIsNone(get_most_recent_preview_frame(camera)) + + def test_get_most_recent_preview_frame_no_directory(self): + shutil.rmtree(PREVIEW_CACHE_DIR) + self.assertIsNone(get_most_recent_preview_frame("test_camera")) diff --git a/web/src/components/player/LivePlayer.tsx b/web/src/components/player/LivePlayer.tsx index dbbc289c5..f48a7d475 100644 --- a/web/src/components/player/LivePlayer.tsx +++ b/web/src/components/player/LivePlayer.tsx @@ -82,6 +82,11 @@ export default function LivePlayer({ const internalContainerRef = useRef(null); const cameraName = useCameraFriendlyName(cameraConfig); + + // player is showing on a dashboard if containerRef is not provided + + const inDashboard = containerRef?.current == null; + // stats const [stats, setStats] = useState({ @@ -416,6 +421,28 @@ export default function LivePlayer({ />
+ {offline && inDashboard && ( + <> +
+
+
+
{t("streamOffline.title")}
+ +

+ + streamOffline.desc + +

+
+
+ + )} + {offline && !showStillWithoutActivity && cameraEnabled && (
From fa0feebd0371b8453225f0953eab6a7bedec9412 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Thu, 22 Jan 2026 12:00:39 -0700 Subject: [PATCH 26/56] Update to ROCm 7.2.0 (#21753) * Update to ROCm 7.2.0 * ROCm now works properly with JinaV1 * Arcface has compilation error --- docker/rocm/Dockerfile | 4 +++- docker/rocm/requirements-wheels-rocm.txt | 2 +- docker/rocm/rocm.hcl | 2 +- frigate/detectors/detection_runners.py | 6 ++---- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/docker/rocm/Dockerfile b/docker/rocm/Dockerfile index 9edcd6058..42447a26b 100644 --- a/docker/rocm/Dockerfile +++ b/docker/rocm/Dockerfile @@ -13,7 +13,7 @@ ARG ROCM RUN apt update -qq && \ apt install -y wget gpg && \ - wget -O rocm.deb https://repo.radeon.com/amdgpu-install/7.1.1/ubuntu/jammy/amdgpu-install_7.1.1.70101-1_all.deb && \ + wget -O rocm.deb https://repo.radeon.com/amdgpu-install/7.2/ubuntu/jammy/amdgpu-install_7.2.70200-1_all.deb && \ apt install -y ./rocm.deb && \ apt update && \ apt install -qq -y rocm @@ -56,6 +56,8 @@ FROM scratch AS rocm-dist ARG ROCM +# Copy HIP headers required for MIOpen JIT (BuildHip) / HIPRTC at runtime +COPY --from=rocm /opt/rocm-${ROCM}/include/ /opt/rocm-${ROCM}/include/ COPY --from=rocm /opt/rocm-$ROCM/bin/rocminfo /opt/rocm-$ROCM/bin/migraphx-driver /opt/rocm-$ROCM/bin/ # Copy MIOpen database files for gfx10xx and gfx11xx only (RDNA2/RDNA3) COPY --from=rocm /opt/rocm-$ROCM/share/miopen/db/*gfx10* /opt/rocm-$ROCM/share/miopen/db/ diff --git a/docker/rocm/requirements-wheels-rocm.txt b/docker/rocm/requirements-wheels-rocm.txt index b6a202f93..da22f2ff6 100644 --- a/docker/rocm/requirements-wheels-rocm.txt +++ b/docker/rocm/requirements-wheels-rocm.txt @@ -1 +1 @@ -onnxruntime-migraphx @ https://github.com/NickM-27/frigate-onnxruntime-rocm/releases/download/v7.1.0/onnxruntime_migraphx-1.23.1-cp311-cp311-linux_x86_64.whl \ No newline at end of file +onnxruntime-migraphx @ https://github.com/NickM-27/frigate-onnxruntime-rocm/releases/download/v7.2.0/onnxruntime_migraphx-1.23.1-cp311-cp311-linux_x86_64.whl \ No newline at end of file diff --git a/docker/rocm/rocm.hcl b/docker/rocm/rocm.hcl index 6595066c5..710bfe995 100644 --- a/docker/rocm/rocm.hcl +++ b/docker/rocm/rocm.hcl @@ -1,5 +1,5 @@ variable "ROCM" { - default = "7.1.1" + default = "7.2.0" } variable "HSA_OVERRIDE_GFX_VERSION" { default = "" diff --git a/frigate/detectors/detection_runners.py b/frigate/detectors/detection_runners.py index fcbb41e66..8b2f5ecf4 100644 --- a/frigate/detectors/detection_runners.py +++ b/frigate/detectors/detection_runners.py @@ -131,10 +131,8 @@ class ONNXModelRunner(BaseModelRunner): return model_type in [ EnrichmentModelTypeEnum.paddleocr.value, - EnrichmentModelTypeEnum.yolov9_license_plate.value, - EnrichmentModelTypeEnum.jina_v1.value, EnrichmentModelTypeEnum.jina_v2.value, - EnrichmentModelTypeEnum.facenet.value, + EnrichmentModelTypeEnum.arcface.value, ModelTypeEnum.rfdetr.value, ModelTypeEnum.dfine.value, ] @@ -605,4 +603,4 @@ def get_optimized_runner( provider_options=options, ), model_type=model_type, - ) + ) \ No newline at end of file From a4362caa0a4f838d2f03b9af5e5d3eac8687f460 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Thu, 22 Jan 2026 12:04:40 -0700 Subject: [PATCH 27/56] Add live context tool to LLM (#21754) * Add live context tool * Improve handling of images in request * Improve prompt caching --- frigate/api/chat.py | 172 +++++++++++++++++++++++++- frigate/api/defs/request/chat_body.py | 7 ++ frigate/genai/__init__.py | 5 +- frigate/genai/llama_cpp.py | 9 +- 4 files changed, 188 insertions(+), 5 deletions(-) diff --git a/frigate/api/chat.py b/frigate/api/chat.py index eeff3ab6d..444650e13 100644 --- a/frigate/api/chat.py +++ b/frigate/api/chat.py @@ -1,10 +1,12 @@ """Chat and LLM tool calling APIs.""" +import base64 import json import logging from datetime import datetime, timezone -from typing import Any, Dict, List +from typing import Any, Dict, List, Optional +import cv2 from fastapi import APIRouter, Body, Depends, Request from fastapi.responses import JSONResponse from pydantic import BaseModel @@ -87,6 +89,28 @@ def get_tool_definitions() -> List[Dict[str, Any]]: "required": [], }, }, + { + "type": "function", + "function": { + "name": "get_live_context", + "description": ( + "Get the current detection information for a camera: objects being tracked, " + "zones, timestamps. Use this to understand what is visible in the live view. " + "Call this when the user has included a live image (via include_live_image) or " + "when answering questions about what is happening right now on a specific camera." + ), + "parameters": { + "type": "object", + "properties": { + "camera": { + "type": "string", + "description": "Camera name to get live context for.", + }, + }, + "required": ["camera"], + }, + }, + }, ] @@ -207,6 +231,98 @@ async def execute_tool( ) +async def _execute_get_live_context( + request: Request, + camera: str, + allowed_cameras: List[str], +) -> Dict[str, Any]: + if camera not in allowed_cameras: + return { + "error": f"Camera '{camera}' not found or access denied", + } + + if camera not in request.app.frigate_config.cameras: + return { + "error": f"Camera '{camera}' not found", + } + + try: + frame_processor = request.app.detected_frames_processor + camera_state = frame_processor.camera_states.get(camera) + + if camera_state is None: + return { + "error": f"Camera '{camera}' state not available", + } + + tracked_objects_dict = {} + with camera_state.current_frame_lock: + tracked_objects = camera_state.tracked_objects.copy() + frame_time = camera_state.current_frame_time + + for obj_id, tracked_obj in tracked_objects.items(): + obj_dict = tracked_obj.to_dict() + if obj_dict.get("frame_time") == frame_time: + tracked_objects_dict[obj_id] = { + "label": obj_dict.get("label"), + "zones": obj_dict.get("current_zones", []), + "sub_label": obj_dict.get("sub_label"), + "stationary": obj_dict.get("stationary", False), + } + + return { + "camera": camera, + "timestamp": frame_time, + "detections": list(tracked_objects_dict.values()), + } + + except Exception as e: + logger.error(f"Error executing get_live_context: {e}", exc_info=True) + return { + "error": f"Error getting live context: {str(e)}", + } + + +async def _get_live_frame_image_url( + request: Request, + camera: str, + allowed_cameras: List[str], +) -> Optional[str]: + """ + Fetch the current live frame for a camera as a base64 data URL. + + Returns None if the frame cannot be retrieved. Used when include_live_image + is set to attach the image to the first user message. + """ + if ( + camera not in allowed_cameras + or camera not in request.app.frigate_config.cameras + ): + return None + try: + frame_processor = request.app.detected_frames_processor + if camera not in frame_processor.camera_states: + return None + frame = frame_processor.get_current_frame(camera, {}) + if frame is None: + return None + height, width = frame.shape[:2] + max_dimension = 1024 + if height > max_dimension or width > max_dimension: + scale = max_dimension / max(height, width) + frame = cv2.resize( + frame, + (int(width * scale), int(height * scale)), + interpolation=cv2.INTER_AREA, + ) + _, img_encoded = cv2.imencode(".jpg", frame, [cv2.IMWRITE_JPEG_QUALITY, 85]) + b64 = base64.b64encode(img_encoded.tobytes()).decode("utf-8") + return f"data:image/jpeg;base64,{b64}" + except Exception as e: + logger.debug("Failed to get live frame for %s: %s", camera, e) + return None + + async def _execute_tool_internal( tool_name: str, arguments: Dict[str, Any], @@ -231,6 +347,11 @@ async def _execute_tool_internal( except (json.JSONDecodeError, AttributeError) as e: logger.warning(f"Failed to extract tool result: {e}") return {"error": "Failed to parse tool result"} + elif tool_name == "get_live_context": + camera = arguments.get("camera") + if not camera: + return {"error": "Camera parameter is required"} + return await _execute_get_live_context(request, camera, allowed_cameras) else: return {"error": f"Unknown tool: {tool_name}"} @@ -277,13 +398,43 @@ async def chat_completion( current_datetime = datetime.now(timezone.utc) current_date_str = current_datetime.strftime("%Y-%m-%d") current_time_str = current_datetime.strftime("%H:%M:%S %Z") + + cameras_info = [] + config = request.app.frigate_config + for camera_id in allowed_cameras: + if camera_id not in config.cameras: + continue + camera_config = config.cameras[camera_id] + friendly_name = ( + camera_config.friendly_name + if camera_config.friendly_name + else camera_id.replace("_", " ").title() + ) + cameras_info.append(f" - {friendly_name} (ID: {camera_id})") + + cameras_section = "" + if cameras_info: + cameras_section = ( + "\n\nAvailable cameras:\n" + + "\n".join(cameras_info) + + "\n\nWhen users refer to cameras by their friendly name (e.g., 'Back Deck Camera'), use the corresponding camera ID (e.g., 'back_deck_cam') in tool calls." + ) + + live_image_note = "" + if body.include_live_image: + live_image_note = ( + f"\n\nThe first user message includes a live image from camera " + f"'{body.include_live_image}'. Use get_live_context for that camera to get " + "current detection details (objects, zones) to aid in understanding the image." + ) + system_prompt = f"""You are a helpful assistant for Frigate, a security camera NVR system. You help users answer questions about their cameras, detected objects, and events. Current date and time: {current_date_str} at {current_time_str} (UTC) When users ask questions about "today", "yesterday", "this week", etc., use the current date above as reference. When searching for objects or events, use ISO 8601 format for dates (e.g., {current_date_str}T00:00:00Z for the start of today). -Always be accurate with time calculations based on the current date provided.""" +Always be accurate with time calculations based on the current date provided.{cameras_section}{live_image_note}""" conversation.append( { @@ -292,6 +443,7 @@ Always be accurate with time calculations based on the current date provided.""" } ) + first_user_message_seen = False for msg in body.messages: msg_dict = { "role": msg.role, @@ -301,6 +453,22 @@ Always be accurate with time calculations based on the current date provided.""" msg_dict["tool_call_id"] = msg.tool_call_id if msg.name: msg_dict["name"] = msg.name + + if ( + msg.role == "user" + and not first_user_message_seen + and body.include_live_image + ): + first_user_message_seen = True + image_url = await _get_live_frame_image_url( + request, body.include_live_image, allowed_cameras + ) + if image_url: + msg_dict["content"] = [ + {"type": "text", "text": msg.content}, + {"type": "image_url", "image_url": {"url": image_url}}, + ] + conversation.append(msg_dict) tool_iterations = 0 diff --git a/frigate/api/defs/request/chat_body.py b/frigate/api/defs/request/chat_body.py index 7b327bf5a..fa3c3860a 100644 --- a/frigate/api/defs/request/chat_body.py +++ b/frigate/api/defs/request/chat_body.py @@ -32,3 +32,10 @@ class ChatCompletionRequest(BaseModel): le=10, description="Maximum number of tool call iterations (default: 5)", ) + include_live_image: Optional[str] = Field( + default=None, + description=( + "If set, the current live frame from this camera is attached to the first " + "user message as multimodal content. Use with get_live_context for detection info." + ), + ) diff --git a/frigate/genai/__init__.py b/frigate/genai/__init__.py index 07b7b2a2b..0ae664b9f 100644 --- a/frigate/genai/__init__.py +++ b/frigate/genai/__init__.py @@ -69,7 +69,7 @@ class GenAIClient: return "\n- (No objects detected)" context_prompt = f""" -Your task is to analyze the sequence of images ({len(thumbnails)} total) taken in chronological order from the perspective of the {review_data["camera"]} security camera. +Your task is to analyze a sequence of images taken in chronological order from a security camera. ## Normal Activity Patterns for This Property @@ -108,7 +108,8 @@ Your response MUST be a flat JSON object with: ## Sequence Details -- Frame 1 = earliest, Frame {len(thumbnails)} = latest +- Camera: {review_data["camera"]} +- Total frames: {len(thumbnails)} (Frame 1 = earliest, Frame {len(thumbnails)} = latest) - Activity started at {review_data["start"]} and lasted {review_data["duration"]} seconds - Zones involved: {", ".join(review_data["zones"]) if review_data["zones"] else "None"} diff --git a/frigate/genai/llama_cpp.py b/frigate/genai/llama_cpp.py index 5523ce389..fafef74ae 100644 --- a/frigate/genai/llama_cpp.py +++ b/frigate/genai/llama_cpp.py @@ -216,7 +216,14 @@ class LlamaCppClient(GenAIClient): "finish_reason": "error", } except requests.exceptions.RequestException as e: - logger.warning("llama.cpp returned an error: %s", str(e)) + error_detail = str(e) + if hasattr(e, "response") and e.response is not None: + try: + error_body = e.response.text + error_detail = f"{str(e)} - Response: {error_body[:500]}" + except Exception: + pass + logger.warning("llama.cpp returned an error: %s", error_detail) return { "content": None, "tool_calls": None, From 2db0269825ae33cf478737a7387b9376680a6d4e Mon Sep 17 00:00:00 2001 From: Eric Work Date: Wed, 28 Jan 2026 06:27:46 -0800 Subject: [PATCH 28/56] Add networking options for configuring listening ports (#21779) --- Makefile | 3 +- .../etc/s6-overlay/s6-rc.d/certsync/run | 7 ++- .../rootfs/etc/s6-overlay/s6-rc.d/nginx/run | 12 ++-- .../rootfs/usr/local/nginx/get_base_path.py | 11 ---- .../usr/local/nginx/get_listen_settings.py | 35 ----------- .../usr/local/nginx/get_nginx_settings.py | 62 +++++++++++++++++++ .../local/nginx/templates/base_path.gotmpl | 2 +- .../usr/local/nginx/templates/listen.gotmpl | 61 ++++++++---------- docs/docs/configuration/advanced.md | 41 ++++++------ docs/docs/configuration/reference.md | 10 ++- frigate/api/auth.py | 38 +++++++++--- frigate/config/__init__.py | 1 + frigate/config/network.py | 18 +++++- frigate/const.py | 1 - frigate/record/export.py | 9 ++- web/public/locales/en/config/networking.json | 15 ++++- web/src/components/auth/ProtectedRoute.tsx | 2 +- web/src/hooks/use-allowed-cameras.ts | 2 +- 18 files changed, 200 insertions(+), 130 deletions(-) delete mode 100644 docker/main/rootfs/usr/local/nginx/get_base_path.py delete mode 100644 docker/main/rootfs/usr/local/nginx/get_listen_settings.py create mode 100644 docker/main/rootfs/usr/local/nginx/get_nginx_settings.py diff --git a/Makefile b/Makefile index 1226a9e01..3800399ea 100644 --- a/Makefile +++ b/Makefile @@ -49,7 +49,8 @@ push: push-boards --push run: local - docker run --rm --publish=5000:5000 --volume=${PWD}/config:/config frigate:latest + docker run --rm --publish=5000:5000 --publish=8971:8971 \ + --volume=${PWD}/config:/config frigate:latest run_tests: local docker run --rm --workdir=/opt/frigate --entrypoint= frigate:latest \ diff --git a/docker/main/rootfs/etc/s6-overlay/s6-rc.d/certsync/run b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/certsync/run index 4ce1c133f..b834c09bb 100755 --- a/docker/main/rootfs/etc/s6-overlay/s6-rc.d/certsync/run +++ b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/certsync/run @@ -10,7 +10,8 @@ echo "[INFO] Starting certsync..." lefile="/etc/letsencrypt/live/frigate/fullchain.pem" -tls_enabled=`python3 /usr/local/nginx/get_listen_settings.py | jq -r .tls.enabled` +tls_enabled=`python3 /usr/local/nginx/get_nginx_settings.py | jq -r .tls.enabled` +listen_external_port=`python3 /usr/local/nginx/get_nginx_settings.py | jq -r .listen.external_port` while true do @@ -34,7 +35,7 @@ do ;; esac - liveprint=`echo | openssl s_client -showcerts -connect 127.0.0.1:8971 2>&1 | openssl x509 -fingerprint 2>&1 | grep -i fingerprint || echo 'failed'` + liveprint=`echo | openssl s_client -showcerts -connect 127.0.0.1:$listen_external_port 2>&1 | openssl x509 -fingerprint 2>&1 | grep -i fingerprint || echo 'failed'` case "$liveprint" in *Fingerprint*) @@ -55,4 +56,4 @@ do done -exit 0 \ No newline at end of file +exit 0 diff --git a/docker/main/rootfs/etc/s6-overlay/s6-rc.d/nginx/run b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/nginx/run index 8bd9b5250..a3c7b3248 100755 --- a/docker/main/rootfs/etc/s6-overlay/s6-rc.d/nginx/run +++ b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/nginx/run @@ -80,14 +80,14 @@ if [ ! \( -f "$letsencrypt_path/privkey.pem" -a -f "$letsencrypt_path/fullchain. fi # build templates for optional FRIGATE_BASE_PATH environment variable -python3 /usr/local/nginx/get_base_path.py | \ +python3 /usr/local/nginx/get_nginx_settings.py | \ tempio -template /usr/local/nginx/templates/base_path.gotmpl \ - -out /usr/local/nginx/conf/base_path.conf + -out /usr/local/nginx/conf/base_path.conf -# build templates for optional TLS support -python3 /usr/local/nginx/get_listen_settings.py | \ - tempio -template /usr/local/nginx/templates/listen.gotmpl \ - -out /usr/local/nginx/conf/listen.conf +# build templates for additional network settings +python3 /usr/local/nginx/get_nginx_settings.py | \ + tempio -template /usr/local/nginx/templates/listen.gotmpl \ + -out /usr/local/nginx/conf/listen.conf # Replace the bash process with the NGINX process, redirecting stderr to stdout exec 2>&1 diff --git a/docker/main/rootfs/usr/local/nginx/get_base_path.py b/docker/main/rootfs/usr/local/nginx/get_base_path.py deleted file mode 100644 index 2e78a7de9..000000000 --- a/docker/main/rootfs/usr/local/nginx/get_base_path.py +++ /dev/null @@ -1,11 +0,0 @@ -"""Prints the base path as json to stdout.""" - -import json -import os -from typing import Any - -base_path = os.environ.get("FRIGATE_BASE_PATH", "") - -result: dict[str, Any] = {"base_path": base_path} - -print(json.dumps(result)) diff --git a/docker/main/rootfs/usr/local/nginx/get_listen_settings.py b/docker/main/rootfs/usr/local/nginx/get_listen_settings.py deleted file mode 100644 index d879db56e..000000000 --- a/docker/main/rootfs/usr/local/nginx/get_listen_settings.py +++ /dev/null @@ -1,35 +0,0 @@ -"""Prints the tls config as json to stdout.""" - -import json -import sys -from typing import Any - -from ruamel.yaml import YAML - -sys.path.insert(0, "/opt/frigate") -from frigate.util.config import find_config_file - -sys.path.remove("/opt/frigate") - -yaml = YAML() - -config_file = find_config_file() - -try: - with open(config_file) as f: - raw_config = f.read() - - if config_file.endswith((".yaml", ".yml")): - config: dict[str, Any] = yaml.load(raw_config) - elif config_file.endswith(".json"): - config: dict[str, Any] = json.loads(raw_config) -except FileNotFoundError: - config: dict[str, Any] = {} - -tls_config: dict[str, any] = config.get("tls", {"enabled": True}) -networking_config = config.get("networking", {}) -ipv6_config = networking_config.get("ipv6", {"enabled": False}) - -output = {"tls": tls_config, "ipv6": ipv6_config} - -print(json.dumps(output)) diff --git a/docker/main/rootfs/usr/local/nginx/get_nginx_settings.py b/docker/main/rootfs/usr/local/nginx/get_nginx_settings.py new file mode 100644 index 000000000..79cda3686 --- /dev/null +++ b/docker/main/rootfs/usr/local/nginx/get_nginx_settings.py @@ -0,0 +1,62 @@ +"""Prints the nginx settings as json to stdout.""" + +import json +import os +import sys +from typing import Any + +from ruamel.yaml import YAML + +sys.path.insert(0, "/opt/frigate") +from frigate.util.config import find_config_file + +sys.path.remove("/opt/frigate") + +yaml = YAML() + +config_file = find_config_file() + +try: + with open(config_file) as f: + raw_config = f.read() + + if config_file.endswith((".yaml", ".yml")): + config: dict[str, Any] = yaml.load(raw_config) + elif config_file.endswith(".json"): + config: dict[str, Any] = json.loads(raw_config) +except FileNotFoundError: + config: dict[str, Any] = {} + +tls_config: dict[str, Any] = config.get("tls", {}) +tls_config.setdefault("enabled", True) + +networking_config: dict[str, Any] = config.get("networking", {}) +ipv6_config: dict[str, Any] = networking_config.get("ipv6", {}) +ipv6_config.setdefault("enabled", False) + +listen_config: dict[str, Any] = networking_config.get("listen", {}) +listen_config.setdefault("internal", 5000) +listen_config.setdefault("external", 8971) + +# handle case where internal port is a string with ip:port +internal_port = listen_config["internal"] +if type(internal_port) is str: + internal_port = int(internal_port.split(":")[-1]) +listen_config["internal_port"] = internal_port + +# handle case where external port is a string with ip:port +external_port = listen_config["external"] +if type(external_port) is str: + external_port = int(external_port.split(":")[-1]) +listen_config["external_port"] = external_port + +base_path = os.environ.get("FRIGATE_BASE_PATH", "") + +result: dict[str, Any] = { + "tls": tls_config, + "ipv6": ipv6_config, + "listen": listen_config, + "base_path": base_path, +} + +print(json.dumps(result)) diff --git a/docker/main/rootfs/usr/local/nginx/templates/base_path.gotmpl b/docker/main/rootfs/usr/local/nginx/templates/base_path.gotmpl index ace4443ee..ca945ba1f 100644 --- a/docker/main/rootfs/usr/local/nginx/templates/base_path.gotmpl +++ b/docker/main/rootfs/usr/local/nginx/templates/base_path.gotmpl @@ -7,7 +7,7 @@ location ^~ {{ .base_path }}/ { # remove base_url from the path before passing upstream rewrite ^{{ .base_path }}/(.*) /$1 break; - proxy_pass $scheme://127.0.0.1:8971; + proxy_pass $scheme://127.0.0.1:{{ .listen.external_port }}; proxy_http_version 1.1; proxy_set_header Upgrade $http_upgrade; proxy_set_header Connection "upgrade"; diff --git a/docker/main/rootfs/usr/local/nginx/templates/listen.gotmpl b/docker/main/rootfs/usr/local/nginx/templates/listen.gotmpl index 066f872cb..628784b60 100644 --- a/docker/main/rootfs/usr/local/nginx/templates/listen.gotmpl +++ b/docker/main/rootfs/usr/local/nginx/templates/listen.gotmpl @@ -1,45 +1,36 @@ - # Internal (IPv4 always; IPv6 optional) -listen 5000; -{{ if .ipv6 }}{{ if .ipv6.enabled }}listen [::]:5000;{{ end }}{{ end }} - +listen {{ .listen.internal }}; +{{ if .ipv6.enabled }}listen [::]:{{ .listen.internal_port }};{{ end }} # intended for external traffic, protected by auth -{{ if .tls }} - {{ if .tls.enabled }} - # external HTTPS (IPv4 always; IPv6 optional) - listen 8971 ssl; - {{ if .ipv6 }}{{ if .ipv6.enabled }}listen [::]:8971 ssl;{{ end }}{{ end }} +{{ if .tls.enabled }} + # external HTTPS (IPv4 always; IPv6 optional) + listen {{ .listen.external }} ssl; + {{ if .ipv6.enabled }}listen [::]:{{ .listen.external_port }} ssl;{{ end }} - ssl_certificate /etc/letsencrypt/live/frigate/fullchain.pem; - ssl_certificate_key /etc/letsencrypt/live/frigate/privkey.pem; + ssl_certificate /etc/letsencrypt/live/frigate/fullchain.pem; + ssl_certificate_key /etc/letsencrypt/live/frigate/privkey.pem; - # generated 2024-06-01, Mozilla Guideline v5.7, nginx 1.25.3, OpenSSL 1.1.1w, modern configuration, no OCSP - # https://ssl-config.mozilla.org/#server=nginx&version=1.25.3&config=modern&openssl=1.1.1w&ocsp=false&guideline=5.7 - ssl_session_timeout 1d; - ssl_session_cache shared:MozSSL:10m; # about 40000 sessions - ssl_session_tickets off; + # generated 2024-06-01, Mozilla Guideline v5.7, nginx 1.25.3, OpenSSL 1.1.1w, modern configuration, no OCSP + # https://ssl-config.mozilla.org/#server=nginx&version=1.25.3&config=modern&openssl=1.1.1w&ocsp=false&guideline=5.7 + ssl_session_timeout 1d; + ssl_session_cache shared:MozSSL:10m; # about 40000 sessions + ssl_session_tickets off; - # modern configuration - ssl_protocols TLSv1.3; - ssl_prefer_server_ciphers off; + # modern configuration + ssl_protocols TLSv1.3; + ssl_prefer_server_ciphers off; - # HSTS (ngx_http_headers_module is required) (63072000 seconds) - add_header Strict-Transport-Security "max-age=63072000" always; + # HSTS (ngx_http_headers_module is required) (63072000 seconds) + add_header Strict-Transport-Security "max-age=63072000" always; - # ACME challenge location - location /.well-known/acme-challenge/ { - default_type "text/plain"; - root /etc/letsencrypt/www; - } - {{ else }} - # external HTTP (IPv4 always; IPv6 optional) - listen 8971; - {{ if .ipv6 }}{{ if .ipv6.enabled }}listen [::]:8971;{{ end }}{{ end }} - {{ end }} + # ACME challenge location + location /.well-known/acme-challenge/ { + default_type "text/plain"; + root /etc/letsencrypt/www; + } {{ else }} - # (No tls section) default to HTTP (IPv4 always; IPv6 optional) - listen 8971; - {{ if .ipv6 }}{{ if .ipv6.enabled }}listen [::]:8971;{{ end }}{{ end }} + # (No tls) default to HTTP (IPv4 always; IPv6 optional) + listen {{ .listen.external }}; + {{ if .ipv6.enabled }}listen [::]:{{ .listen.external_port }};{{ end }} {{ end }} - diff --git a/docs/docs/configuration/advanced.md b/docs/docs/configuration/advanced.md index 8cd368144..b8dbffd62 100644 --- a/docs/docs/configuration/advanced.md +++ b/docs/docs/configuration/advanced.md @@ -155,34 +155,33 @@ services: ### Enabling IPv6 -IPv6 is disabled by default, to enable IPv6 listen.gotmpl needs to be bind mounted with IPv6 enabled. For example: +IPv6 is disabled by default, to enable IPv6 modify your Frigate configuration as follows: -``` -{{ if not .enabled }} -# intended for external traffic, protected by auth -listen 8971; -{{ else }} -# intended for external traffic, protected by auth -listen 8971 ssl; - -# intended for internal traffic, not protected by auth -listen 5000; +```yaml +networking: + ipv6: + enabled: True ``` -becomes +### Listen on different ports -``` -{{ if not .enabled }} -# intended for external traffic, protected by auth -listen [::]:8971 ipv6only=off; -{{ else }} -# intended for external traffic, protected by auth -listen [::]:8971 ipv6only=off ssl; +You can change the ports Nginx uses for listening using Frigate's configuration file. The internal port (unauthenticated) and external port (authenticated) can be changed independently. You can also specify an IP address using the format `ip:port` if you wish to bind the port to a specific interface. This may be useful for example to prevent exposing the internal port outside the container. -# intended for internal traffic, not protected by auth -listen [::]:5000 ipv6only=off; +For example: + +```yaml +networking: + listen: + internal: 127.0.0.1:5000 + external: 8971 ``` +:::warning + +This setting is for advanced users. For the majority of use cases it's recommended to change the `ports` section of your Docker compose file or use the Docker `run` `--publish` option instead, e.g. `-p 443:8971`. Changing Frigate's ports may break some integrations. + +::: + ## Base path By default, Frigate runs at the root path (`/`). However some setups require to run Frigate under a custom path prefix (e.g. `/frigate`), especially when Frigate is located behind a reverse proxy that requires path-based routing. diff --git a/docs/docs/configuration/reference.md b/docs/docs/configuration/reference.md index 1577d7b01..5c3ca4ea8 100644 --- a/docs/docs/configuration/reference.md +++ b/docs/docs/configuration/reference.md @@ -73,11 +73,19 @@ tls: # Optional: Enable TLS for port 8971 (default: shown below) enabled: True -# Optional: IPv6 configuration +# Optional: Networking configuration networking: # Optional: Enable IPv6 on 5000, and 8971 if tls is configured (default: shown below) ipv6: enabled: False + # Optional: Override ports Frigate uses for listening (defaults: shown below) + # An IP address may also be provided to bind to a specific interface, e.g. ip:port + # NOTE: This setting is for advanced users and may break some integrations. The majority + # of users should change ports in the docker compose file + # or use the docker run `--publish` option to select a different port. + listen: + internal: 5000 + external: 8971 # Optional: Proxy configuration proxy: diff --git a/frigate/api/auth.py b/frigate/api/auth.py index e0a6ec924..af21b1d4b 100644 --- a/frigate/api/auth.py +++ b/frigate/api/auth.py @@ -26,7 +26,7 @@ from frigate.api.defs.request.app_body import ( AppPutRoleBody, ) from frigate.api.defs.tags import Tags -from frigate.config import AuthConfig, ProxyConfig +from frigate.config import AuthConfig, NetworkingConfig, ProxyConfig from frigate.const import CONFIG_DIR, JWT_SECRET_ENV_VAR, PASSWORD_HASH_ALGORITHM from frigate.models import User @@ -41,7 +41,7 @@ def require_admin_by_default(): endpoints require admin access unless explicitly overridden with allow_public(), allow_any_authenticated(), or require_role(). - Port 5000 (internal) always has admin role set by the /auth endpoint, + Internal port always has admin role set by the /auth endpoint, so this check passes automatically for internal requests. Certain paths are exempted from the global admin check because they must @@ -130,7 +130,7 @@ def require_admin_by_default(): pass # For all other paths, require admin role - # Port 5000 (internal) requests have admin role set automatically + # Internal port requests have admin role set automatically role = request.headers.get("remote-role") if role == "admin": return @@ -143,6 +143,17 @@ def require_admin_by_default(): return admin_checker +def _is_authenticated(request: Request) -> bool: + """ + Helper to determine if a request is from an authenticated user. + + Returns True if the request has a valid authenticated user (not anonymous). + Internal port requests are considered anonymous despite having admin role. + """ + username = request.headers.get("remote-user") + return username is not None and username != "anonymous" + + def allow_public(): """ Override dependency to allow unauthenticated access to an endpoint. @@ -171,6 +182,7 @@ def allow_any_authenticated(): Rejects: - Requests with no remote-user header (did not pass through /auth endpoint) + - External port requests with anonymous user (auth disabled, no proxy auth) Example: @router.get("/authenticated-endpoint", dependencies=[Depends(allow_any_authenticated())]) @@ -179,8 +191,14 @@ def allow_any_authenticated(): async def auth_checker(request: Request): # Ensure a remote-user has been set by the /auth endpoint username = request.headers.get("remote-user") - if username is None: - raise HTTPException(status_code=401, detail="Authentication required") + + # Internal port requests have admin role and should be allowed + role = request.headers.get("remote-role") + + if role != "admin": + if username is None or not _is_authenticated(request): + raise HTTPException(status_code=401, detail="Authentication required") + return return auth_checker @@ -570,12 +588,18 @@ def resolve_role( def auth(request: Request): auth_config: AuthConfig = request.app.frigate_config.auth proxy_config: ProxyConfig = request.app.frigate_config.proxy + networking_config: NetworkingConfig = request.app.frigate_config.networking success_response = Response("", status_code=202) + # handle case where internal port is a string with ip:port + internal_port = networking_config.listen.internal + if type(internal_port) is str: + internal_port = int(internal_port.split(":")[-1]) + # dont require auth if the request is on the internal port # this header is set by Frigate's nginx proxy, so it cant be spoofed - if int(request.headers.get("x-server-port", default=0)) == 5000: + if int(request.headers.get("x-server-port", default=0)) == internal_port: success_response.headers["remote-user"] = "anonymous" success_response.headers["remote-role"] = "admin" return success_response @@ -1013,4 +1037,4 @@ async def get_allowed_cameras_for_filter(request: Request): role = current_user["role"] all_camera_names = set(request.app.frigate_config.cameras.keys()) roles_dict = request.app.frigate_config.auth.roles - return User.get_allowed_cameras(role, roles_dict, all_camera_names) + return User.get_allowed_cameras(role, roles_dict, all_camera_names) \ No newline at end of file diff --git a/frigate/config/__init__.py b/frigate/config/__init__.py index c6ff535b0..88f7b79f9 100644 --- a/frigate/config/__init__.py +++ b/frigate/config/__init__.py @@ -8,6 +8,7 @@ from .config import * # noqa: F403 from .database import * # noqa: F403 from .logger import * # noqa: F403 from .mqtt import * # noqa: F403 +from .network import * # noqa: F403 from .proxy import * # noqa: F403 from .telemetry import * # noqa: F403 from .tls import * # noqa: F403 diff --git a/frigate/config/network.py b/frigate/config/network.py index c8b3cfd1c..ab4e5b83e 100644 --- a/frigate/config/network.py +++ b/frigate/config/network.py @@ -1,13 +1,27 @@ +from typing import Union + from pydantic import Field from .base import FrigateBaseModel -__all__ = ["IPv6Config", "NetworkingConfig"] +__all__ = ["IPv6Config", "ListenConfig", "NetworkingConfig"] class IPv6Config(FrigateBaseModel): enabled: bool = Field(default=False, title="Enable IPv6 for port 5000 and/or 8971") +class ListenConfig(FrigateBaseModel): + internal: Union[int, str] = Field( + default=5000, title="Internal listening port for Frigate" + ) + external: Union[int, str] = Field( + default=8971, title="External listening port for Frigate" + ) + + class NetworkingConfig(FrigateBaseModel): - ipv6: IPv6Config = Field(default_factory=IPv6Config, title="Network configuration") + ipv6: IPv6Config = Field(default_factory=IPv6Config, title="IPv6 configuration") + listen: ListenConfig = Field( + default_factory=ListenConfig, title="Listening ports configuration" + ) diff --git a/frigate/const.py b/frigate/const.py index 7229785a7..87fdb8e70 100644 --- a/frigate/const.py +++ b/frigate/const.py @@ -14,7 +14,6 @@ RECORD_DIR = f"{BASE_DIR}/recordings" TRIGGER_DIR = f"{CLIPS_DIR}/triggers" BIRDSEYE_PIPE = "/tmp/cache/birdseye" CACHE_DIR = "/tmp/cache" -FRIGATE_LOCALHOST = "http://127.0.0.1:5000" PLUS_ENV_VAR = "PLUS_API_KEY" PLUS_API_HOST = "https://api.frigate.video" diff --git a/frigate/record/export.py b/frigate/record/export.py index afaed1a2a..c1c478ef4 100644 --- a/frigate/record/export.py +++ b/frigate/record/export.py @@ -184,8 +184,13 @@ class RecordingExporter(threading.Thread): def get_record_export_command( self, video_path: str, use_hwaccel: bool = True ) -> list[str]: + # handle case where internal port is a string with ip:port + internal_port = self.config.networking.listen.internal + if type(internal_port) is str: + internal_port = int(internal_port.split(":")[-1]) + if (self.end_time - self.start_time) <= MAX_PLAYLIST_SECONDS: - playlist_lines = f"http://127.0.0.1:5000/vod/{self.camera}/start/{self.start_time}/end/{self.end_time}/index.m3u8" + playlist_lines = f"http://127.0.0.1:{internal_port}/vod/{self.camera}/start/{self.start_time}/end/{self.end_time}/index.m3u8" ffmpeg_input = ( f"-y -protocol_whitelist pipe,file,http,tcp -i {playlist_lines}" ) @@ -217,7 +222,7 @@ class RecordingExporter(threading.Thread): for page in range(1, num_pages + 1): playlist = export_recordings.paginate(page, page_size) playlist_lines.append( - f"file 'http://127.0.0.1:5000/vod/{self.camera}/start/{float(playlist[0].start_time)}/end/{float(playlist[-1].end_time)}/index.m3u8'" + f"file 'http://127.0.0.1:{internal_port}/vod/{self.camera}/start/{float(playlist[0].start_time)}/end/{float(playlist[-1].end_time)}/index.m3u8'" ) ffmpeg_input = "-y -protocol_whitelist pipe,file,http,tcp -f concat -safe 0 -i /dev/stdin" diff --git a/web/public/locales/en/config/networking.json b/web/public/locales/en/config/networking.json index 0f8d9cc54..592ea9477 100644 --- a/web/public/locales/en/config/networking.json +++ b/web/public/locales/en/config/networking.json @@ -2,12 +2,23 @@ "label": "Networking configuration", "properties": { "ipv6": { - "label": "Network configuration", + "label": "IPv6 configuration", "properties": { "enabled": { "label": "Enable IPv6 for port 5000 and/or 8971" } } + }, + "listen": { + "label": "Listening ports configuration", + "properties": { + "internal": { + "label": "Internal listening port for Frigate" + }, + "external": { + "label": "External listening port for Frigate" + } + } } } -} \ No newline at end of file +} diff --git a/web/src/components/auth/ProtectedRoute.tsx b/web/src/components/auth/ProtectedRoute.tsx index cedf5a15a..a7d1b3596 100644 --- a/web/src/components/auth/ProtectedRoute.tsx +++ b/web/src/components/auth/ProtectedRoute.tsx @@ -47,7 +47,7 @@ export default function ProtectedRoute({ return ; } - // Authenticated mode (8971): require login + // Authenticated mode (external port): require login if (!auth.user) { return ( diff --git a/web/src/hooks/use-allowed-cameras.ts b/web/src/hooks/use-allowed-cameras.ts index 9eae59fc2..05941922a 100644 --- a/web/src/hooks/use-allowed-cameras.ts +++ b/web/src/hooks/use-allowed-cameras.ts @@ -12,7 +12,7 @@ export function useAllowedCameras() { if ( auth.user?.role === "viewer" || auth.user?.role === "admin" || - !auth.isAuthenticated // anonymous port 5000 + !auth.isAuthenticated // anonymous internal port ) { // return all cameras return config?.cameras ? Object.keys(config.cameras) : []; From fef1fb36ccdee2cad183b59fb82ea0e71abc4707 Mon Sep 17 00:00:00 2001 From: FL42 <46161216+fl42@users.noreply.github.com> Date: Sun, 8 Feb 2026 15:47:06 +0100 Subject: [PATCH 29/56] feat: add X-Frame-Time when returning snapshot (#21932) Co-authored-by: Florent MORICONI <170678386+fmcloudconsulting@users.noreply.github.com> --- frigate/api/media.py | 3 ++- frigate/track/object_processing.py | 2 +- frigate/track/tracked_object.py | 19 ++++++++++--------- 3 files changed, 13 insertions(+), 11 deletions(-) diff --git a/frigate/api/media.py b/frigate/api/media.py index 7d0d02a45..01776f903 100644 --- a/frigate/api/media.py +++ b/frigate/api/media.py @@ -761,7 +761,7 @@ async def event_snapshot( if event_id in camera_state.tracked_objects: tracked_obj = camera_state.tracked_objects.get(event_id) if tracked_obj is not None: - jpg_bytes = tracked_obj.get_img_bytes( + jpg_bytes, frame_time = tracked_obj.get_img_bytes( ext="jpg", timestamp=params.timestamp, bounding_box=params.bbox, @@ -790,6 +790,7 @@ async def event_snapshot( headers = { "Content-Type": "image/jpeg", "Cache-Control": "private, max-age=31536000" if event_complete else "no-store", + "X-Frame-Time": frame_time, } if params.download: diff --git a/frigate/track/object_processing.py b/frigate/track/object_processing.py index e0ee74228..f44f21be3 100644 --- a/frigate/track/object_processing.py +++ b/frigate/track/object_processing.py @@ -185,7 +185,7 @@ class TrackedObjectProcessor(threading.Thread): def snapshot(camera: str, obj: TrackedObject) -> bool: mqtt_config: CameraMqttConfig = self.config.cameras[camera].mqtt if mqtt_config.enabled and self.should_mqtt_snapshot(camera, obj): - jpg_bytes = obj.get_img_bytes( + jpg_bytes, _ = obj.get_img_bytes( ext="jpg", timestamp=mqtt_config.timestamp, bounding_box=mqtt_config.bounding_box, diff --git a/frigate/track/tracked_object.py b/frigate/track/tracked_object.py index a95221bbd..f435de7b6 100644 --- a/frigate/track/tracked_object.py +++ b/frigate/track/tracked_object.py @@ -434,7 +434,7 @@ class TrackedObject: return count > (self.camera_config.detect.stationary.threshold or 50) def get_thumbnail(self, ext: str) -> bytes | None: - img_bytes = self.get_img_bytes( + img_bytes, _ = self.get_img_bytes( ext, timestamp=False, bounding_box=False, crop=True, height=175 ) @@ -475,20 +475,21 @@ class TrackedObject: crop: bool = False, height: int | None = None, quality: int | None = None, - ) -> bytes | None: + ) -> tuple[bytes | None, float | None]: if self.thumbnail_data is None: - return None + return None, None try: + frame_time = self.thumbnail_data["frame_time"] best_frame = cv2.cvtColor( - self.frame_cache[self.thumbnail_data["frame_time"]]["frame"], + self.frame_cache[frame_time]["frame"], cv2.COLOR_YUV2BGR_I420, ) except KeyError: logger.warning( - f"Unable to create jpg because frame {self.thumbnail_data['frame_time']} is not in the cache" + f"Unable to create jpg because frame {frame_time} is not in the cache" ) - return None + return None, None if bounding_box: thickness = 2 @@ -570,13 +571,13 @@ class TrackedObject: ret, jpg = cv2.imencode(f".{ext}", best_frame, quality_params) if ret: - return jpg.tobytes() + return jpg.tobytes(), frame_time else: - return None + return None, None def write_snapshot_to_disk(self) -> None: snapshot_config: SnapshotsConfig = self.camera_config.snapshots - jpg_bytes = self.get_img_bytes( + jpg_bytes, _ = self.get_img_bytes( ext="jpg", timestamp=snapshot_config.timestamp, bounding_box=snapshot_config.bounding_box, From 12506f8c80b3e078416c0900b0d44f365f26e4c1 Mon Sep 17 00:00:00 2001 From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> Date: Mon, 9 Feb 2026 10:54:45 -0600 Subject: [PATCH 30/56] Improve jsmpeg player websocket handling (#21943) * improve jsmpeg player websocket handling prevent websocket console messages from appearing when player is destroyed * reformat files after ruff upgrade --- web/src/components/player/JSMpegPlayer.tsx | 28 +++++++++++++++++----- 1 file changed, 22 insertions(+), 6 deletions(-) diff --git a/web/src/components/player/JSMpegPlayer.tsx b/web/src/components/player/JSMpegPlayer.tsx index f85535013..c522ff0a8 100644 --- a/web/src/components/player/JSMpegPlayer.tsx +++ b/web/src/components/player/JSMpegPlayer.tsx @@ -118,6 +118,8 @@ export default function JSMpegPlayer({ const videoWrapper = videoRef.current; const canvas = canvasRef.current; let videoElement: JSMpeg.VideoElement | null = null; + let socket: WebSocket | null = null; + let socketMessageHandler: ((event: MessageEvent) => void) | null = null; let frameCount = 0; @@ -152,12 +154,14 @@ export default function JSMpegPlayer({ videoElement.player.source && videoElement.player.source.socket ) { - const socket = videoElement.player.source.socket; - socket.addEventListener("message", (event: MessageEvent) => { + socket = videoElement.player.source.socket as WebSocket; + socketMessageHandler = (event: MessageEvent) => { if (event.data instanceof ArrayBuffer) { bytesReceivedRef.current += event.data.byteLength; } - }); + }; + + socket.addEventListener("message", socketMessageHandler); } // Update stats every second @@ -197,11 +201,23 @@ export default function JSMpegPlayer({ } if (videoElement) { try { - // this causes issues in react strict mode - // https://stackoverflow.com/questions/76822128/issue-with-cycjimmy-jsmpeg-player-in-react-18-cannot-read-properties-of-null-o - videoElement.destroy(); + videoElement.player?.destroy(); // eslint-disable-next-line no-empty } catch (e) {} + + if (videoWrapper) { + videoWrapper.innerHTML = ""; + // @ts-expect-error playerInstance is set by jsmpeg + videoWrapper.playerInstance = null; + } + } + if (socket) { + if (socketMessageHandler) { + socket.removeEventListener("message", socketMessageHandler); + } + + socket = null; + socketMessageHandler = null; } }; } From bb6e889449e2256586ece8d8924b4e5bdf57b421 Mon Sep 17 00:00:00 2001 From: nulledy <254504350+nulledy@users.noreply.github.com> Date: Wed, 11 Feb 2026 17:09:26 -0500 Subject: [PATCH 31/56] Allow API Events to be Detections or Alerts, depending on the Event Label (#21923) * - API created events will be alerts OR detections, depending on the event label, defaulting to alerts - Indefinite API events will extend the recording segment until those events are ended - API event start time is the actual start time, instead of having a pre-buffer of record.event_pre_capture * Instead of checking for indefinite events on a camera before deciding if we should end the segment, only update last_detection_time and last_alert_time if frame_time is greater, which should have the same effect * Add the ability to set a pre_capture number of seconds when creating a manual event via the API. Default behavior unchanged * Remove unnecessary _publish_segment_start() call * Formatting * handle last_alert_time or last_detection_time being None when checking them against the frame_time * comment manual_info["label"].split(": ")[0] for clarity --- docs/static/frigate-api.yaml | 7 +++ frigate/api/defs/request/events_body.py | 1 + frigate/api/event.py | 1 + frigate/review/maintainer.py | 66 ++++++++++++++++++++----- frigate/track/object_processing.py | 9 +++- 5 files changed, 70 insertions(+), 14 deletions(-) diff --git a/docs/static/frigate-api.yaml b/docs/static/frigate-api.yaml index 36b346422..2063514ac 100644 --- a/docs/static/frigate-api.yaml +++ b/docs/static/frigate-api.yaml @@ -3200,6 +3200,7 @@ paths: duration: 30 include_recording: true draw: {} + pre_capture: null responses: "200": description: Successful Response @@ -5002,6 +5003,12 @@ components: - type: "null" title: Draw default: {} + pre_capture: + anyOf: + - type: integer + - type: "null" + title: Pre Capture Seconds + default: null type: object title: EventsCreateBody EventsDeleteBody: diff --git a/frigate/api/defs/request/events_body.py b/frigate/api/defs/request/events_body.py index 50754e92a..d844c31ca 100644 --- a/frigate/api/defs/request/events_body.py +++ b/frigate/api/defs/request/events_body.py @@ -41,6 +41,7 @@ class EventsCreateBody(BaseModel): duration: Optional[int] = 30 include_recording: Optional[bool] = True draw: Optional[dict] = {} + pre_capture: Optional[int] = None class EventsEndBody(BaseModel): diff --git a/frigate/api/event.py b/frigate/api/event.py index c03cfb431..b0a749018 100644 --- a/frigate/api/event.py +++ b/frigate/api/event.py @@ -1782,6 +1782,7 @@ def create_event( body.duration, "api", body.draw, + body.pre_capture, ), EventMetadataTypeEnum.manual_event_create.value, ) diff --git a/frigate/review/maintainer.py b/frigate/review/maintainer.py index 917c0c5ac..6afdc8de9 100644 --- a/frigate/review/maintainer.py +++ b/frigate/review/maintainer.py @@ -394,7 +394,11 @@ class ReviewSegmentMaintainer(threading.Thread): if activity.has_activity_category(SeverityEnum.alert): # update current time for last alert activity - segment.last_alert_time = frame_time + if ( + segment.last_alert_time is None + or frame_time > segment.last_alert_time + ): + segment.last_alert_time = frame_time if segment.severity != SeverityEnum.alert: # if segment is not alert category but current activity is @@ -404,7 +408,11 @@ class ReviewSegmentMaintainer(threading.Thread): should_update_image = True if activity.has_activity_category(SeverityEnum.detection): - segment.last_detection_time = frame_time + if ( + segment.last_detection_time is None + or frame_time > segment.last_detection_time + ): + segment.last_detection_time = frame_time for object in activity.get_all_objects(): # Alert-level objects should always be added (they extend/upgrade the segment) @@ -695,17 +703,28 @@ class ReviewSegmentMaintainer(threading.Thread): current_segment.detections[manual_info["event_id"]] = ( manual_info["label"] ) - if ( - topic == DetectionTypeEnum.api - and self.config.cameras[camera].review.alerts.enabled - ): - current_segment.severity = SeverityEnum.alert + if topic == DetectionTypeEnum.api: + # manual_info["label"] contains 'label: sub_label' + # so split out the label without modifying manual_info + if ( + self.config.cameras[camera].review.detections.enabled + and manual_info["label"].split(": ")[0] + in self.config.cameras[camera].review.detections.labels + ): + current_segment.last_detection_time = manual_info[ + "end_time" + ] + elif self.config.cameras[camera].review.alerts.enabled: + current_segment.severity = SeverityEnum.alert + current_segment.last_alert_time = manual_info[ + "end_time" + ] elif ( topic == DetectionTypeEnum.lpr and self.config.cameras[camera].review.detections.enabled ): current_segment.severity = SeverityEnum.detection - current_segment.last_alert_time = manual_info["end_time"] + current_segment.last_alert_time = manual_info["end_time"] elif manual_info["state"] == ManualEventState.start: self.indefinite_events[camera][manual_info["event_id"]] = ( manual_info["label"] @@ -717,7 +736,18 @@ class ReviewSegmentMaintainer(threading.Thread): topic == DetectionTypeEnum.api and self.config.cameras[camera].review.alerts.enabled ): - current_segment.severity = SeverityEnum.alert + # manual_info["label"] contains 'label: sub_label' + # so split out the label without modifying manual_info + if ( + not self.config.cameras[ + camera + ].review.detections.enabled + or manual_info["label"].split(": ")[0] + not in self.config.cameras[ + camera + ].review.detections.labels + ): + current_segment.severity = SeverityEnum.alert elif ( topic == DetectionTypeEnum.lpr and self.config.cameras[camera].review.detections.enabled @@ -789,11 +819,23 @@ class ReviewSegmentMaintainer(threading.Thread): detections, ) elif topic == DetectionTypeEnum.api: - if self.config.cameras[camera].review.alerts.enabled: + severity = None + # manual_info["label"] contains 'label: sub_label' + # so split out the label without modifying manual_info + if ( + self.config.cameras[camera].review.detections.enabled + and manual_info["label"].split(": ")[0] + in self.config.cameras[camera].review.detections.labels + ): + severity = SeverityEnum.detection + elif self.config.cameras[camera].review.alerts.enabled: + severity = SeverityEnum.alert + + if severity: self.active_review_segments[camera] = PendingReviewSegment( camera, frame_time, - SeverityEnum.alert, + severity, {manual_info["event_id"]: manual_info["label"]}, {}, [], @@ -820,7 +862,7 @@ class ReviewSegmentMaintainer(threading.Thread): ].last_detection_time = manual_info["end_time"] else: logger.warning( - f"Manual event API has been called for {camera}, but alerts are disabled. This manual event will not appear as an alert." + f"Manual event API has been called for {camera}, but alerts and detections are disabled. This manual event will not appear as an alert or detection." ) elif topic == DetectionTypeEnum.lpr: if self.config.cameras[camera].review.detections.enabled: diff --git a/frigate/track/object_processing.py b/frigate/track/object_processing.py index f44f21be3..9ac04b42a 100644 --- a/frigate/track/object_processing.py +++ b/frigate/track/object_processing.py @@ -515,6 +515,7 @@ class TrackedObjectProcessor(threading.Thread): duration, source_type, draw, + pre_capture, ) = payload # save the snapshot image @@ -522,6 +523,11 @@ class TrackedObjectProcessor(threading.Thread): None, event_id, label, draw ) end_time = frame_time + duration if duration is not None else None + start_time = ( + frame_time - self.config.cameras[camera_name].record.event_pre_capture + if pre_capture is None + else frame_time - pre_capture + ) # send event to event maintainer self.event_sender.publish( @@ -536,8 +542,7 @@ class TrackedObjectProcessor(threading.Thread): "sub_label": sub_label, "score": score, "camera": camera_name, - "start_time": frame_time - - self.config.cameras[camera_name].record.event_pre_capture, + "start_time": start_time, "end_time": end_time, "has_clip": self.config.cameras[camera_name].record.enabled and include_recording, From 84760c42cb61c7f9b30aae2a5d939c6532ee26e1 Mon Sep 17 00:00:00 2001 From: nulledy <254504350+nulledy@users.noreply.github.com> Date: Sun, 15 Feb 2026 10:35:41 -0500 Subject: [PATCH 32/56] ffmpeg Preview Segment Optimization for "high" and "very_high" (#21996) * Introduce qmax parameter for ffmpeg preview encoding Added PREVIEW_QMAX_PARAM to control ffmpeg encoding quality. * formatting * Fix spacing in qmax parameters for preview quality --- frigate/output/preview.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/frigate/output/preview.py b/frigate/output/preview.py index f16bb3bd7..b66c1298a 100644 --- a/frigate/output/preview.py +++ b/frigate/output/preview.py @@ -47,6 +47,15 @@ PREVIEW_QUALITY_BIT_RATES = { RecordQualityEnum.high: 9864, RecordQualityEnum.very_high: 10096, } +# the -qmax param for ffmpeg prevents the encoder from overly compressing frames while still trying to hit the bitrate target +# lower values are higher quality. This is especially important for iniitial frames in the segment +PREVIEW_QMAX_PARAM = { + RecordQualityEnum.very_low: "", + RecordQualityEnum.low: "", + RecordQualityEnum.medium: "", + RecordQualityEnum.high: " -qmax 25", + RecordQualityEnum.very_high: " -qmax 25", +} def get_cache_image_name(camera: str, frame_time: float) -> str: @@ -125,7 +134,7 @@ class FFMpegConverter(threading.Thread): config.ffmpeg.ffmpeg_path, "default", input="-f concat -y -protocol_whitelist pipe,file -safe 0 -threads 1 -i /dev/stdin", - output=f"-threads 1 -g {PREVIEW_KEYFRAME_INTERVAL} -bf 0 -b:v {PREVIEW_QUALITY_BIT_RATES[self.config.record.preview.quality]} {FPS_VFR_PARAM} -movflags +faststart -pix_fmt yuv420p {self.path}", + output=f"-threads 1 -g {PREVIEW_KEYFRAME_INTERVAL} -bf 0 -b:v {PREVIEW_QUALITY_BIT_RATES[self.config.record.preview.quality]}{PREVIEW_QMAX_PARAM[self.config.record.preview.quality]} {FPS_VFR_PARAM} -movflags +faststart -pix_fmt yuv420p {self.path}", type=EncodeTypeEnum.preview, ) From 5f02e33e5508b1a514de40de982e646d24847142 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Wed, 25 Feb 2026 09:19:56 -0700 Subject: [PATCH 33/56] Adapt to new Gemini format --- frigate/genai/gemini.py | 275 +++++++++++++++++++++------------------- 1 file changed, 143 insertions(+), 132 deletions(-) diff --git a/frigate/genai/gemini.py b/frigate/genai/gemini.py index c2e5d9f7e..fd273faec 100644 --- a/frigate/genai/gemini.py +++ b/frigate/genai/gemini.py @@ -1,6 +1,5 @@ """Gemini Provider for Frigate AI.""" -import json import logging from typing import Any, Optional @@ -84,147 +83,169 @@ class GeminiClient(GenAIClient): tools: Optional[list[dict[str, Any]]] = None, tool_choice: Optional[str] = "auto", ) -> dict[str, Any]: + """ + Send chat messages to Gemini with optional tool definitions. + + Implements function calling/tool usage for Gemini models. + """ try: + # Convert messages to Gemini format + gemini_messages = [] + for msg in messages: + role = msg.get("role", "user") + content = msg.get("content", "") + + # Map roles to Gemini format + if role == "system": + # Gemini doesn't have system role, prepend to first user message + if gemini_messages and gemini_messages[0].role == "user": + gemini_messages[0].parts[ + 0 + ].text = f"{content}\n\n{gemini_messages[0].parts[0].text}" + else: + gemini_messages.append( + types.Content( + role="user", parts=[types.Part.from_text(text=content)] + ) + ) + elif role == "assistant": + gemini_messages.append( + types.Content( + role="model", parts=[types.Part.from_text(text=content)] + ) + ) + elif role == "tool": + # Handle tool response + function_response = { + "name": msg.get("name", ""), + "response": content, + } + gemini_messages.append( + types.Content( + role="function", + parts=[ + types.Part.from_function_response(function_response) + ], + ) + ) + else: # user + gemini_messages.append( + types.Content( + role="user", parts=[types.Part.from_text(text=content)] + ) + ) + + # Convert tools to Gemini format + gemini_tools = None if tools: - function_declarations = [] + gemini_tools = [] for tool in tools: if tool.get("type") == "function": - func_def = tool.get("function", {}) - function_declarations.append( - genai.protos.FunctionDeclaration( - name=func_def.get("name"), - description=func_def.get("description"), - parameters=genai.protos.Schema( - type=genai.protos.Type.OBJECT, - properties={ - prop_name: genai.protos.Schema( - type=_convert_json_type_to_gemini( - prop.get("type") - ), - description=prop.get("description"), - ) - for prop_name, prop in func_def.get( - "parameters", {} - ) - .get("properties", {}) - .items() - }, - required=func_def.get("parameters", {}).get( - "required", [] - ), - ), + func = tool.get("function", {}) + gemini_tools.append( + types.Tool( + function_declarations=[ + types.FunctionDeclaration( + name=func.get("name", ""), + description=func.get("description", ""), + parameters=func.get("parameters", {}), + ) + ] ) ) - tool_config = genai.protos.Tool( - function_declarations=function_declarations - ) - + # Configure tool choice + tool_config = None + if tool_choice: if tool_choice == "none": - function_calling_config = genai.protos.FunctionCallingConfig( - mode=genai.protos.FunctionCallingConfig.Mode.NONE + tool_config = types.ToolConfig( + function_calling_config=types.FunctionCallingConfig(mode="NONE") + ) + elif tool_choice == "auto": + tool_config = types.ToolConfig( + function_calling_config=types.FunctionCallingConfig(mode="AUTO") ) elif tool_choice == "required": - function_calling_config = genai.protos.FunctionCallingConfig( - mode=genai.protos.FunctionCallingConfig.Mode.ANY - ) - else: - function_calling_config = genai.protos.FunctionCallingConfig( - mode=genai.protos.FunctionCallingConfig.Mode.AUTO - ) - else: - tool_config = None - function_calling_config = None - - contents = [] - for msg in messages: - role = msg.get("role") - content = msg.get("content", "") - - if role == "system": - continue - elif role == "user": - contents.append({"role": "user", "parts": [content]}) - elif role == "assistant": - parts = [content] if content else [] - if "tool_calls" in msg: - for tc in msg["tool_calls"]: - parts.append( - genai.protos.FunctionCall( - name=tc["function"]["name"], - args=json.loads(tc["function"]["arguments"]), - ) - ) - contents.append({"role": "model", "parts": parts}) - elif role == "tool": - tool_name = msg.get("name", "") - tool_result = ( - json.loads(content) if isinstance(content, str) else content - ) - contents.append( - { - "role": "function", - "parts": [ - genai.protos.FunctionResponse( - name=tool_name, - response=tool_result, - ) - ], - } + tool_config = types.ToolConfig( + function_calling_config=types.FunctionCallingConfig(mode="ANY") ) - generation_config = genai.types.GenerationConfig( - candidate_count=1, - ) - if function_calling_config: - generation_config.function_calling_config = function_calling_config + # Build request config + config_params = {"candidate_count": 1} - response = self.provider.generate_content( - contents, - tools=[tool_config] if tool_config else None, - generation_config=generation_config, - request_options=genai.types.RequestOptions(timeout=self.timeout), + if gemini_tools: + config_params["tools"] = gemini_tools + + if tool_config: + config_params["tool_config"] = tool_config + + # Merge runtime_options + if isinstance(self.genai_config.runtime_options, dict): + config_params.update(self.genai_config.runtime_options) + + response = self.provider.models.generate_content( + model=self.genai_config.model, + contents=gemini_messages, + config=types.GenerateContentConfig(**config_params), ) + # Check if response is valid + if not response or not response.candidates: + return { + "content": None, + "tool_calls": None, + "finish_reason": "error", + } + + candidate = response.candidates[0] content = None tool_calls = None - if response.candidates and response.candidates[0].content: - parts = response.candidates[0].content.parts - text_parts = [p.text for p in parts if hasattr(p, "text") and p.text] - if text_parts: - content = " ".join(text_parts).strip() + # Extract content and tool calls from response + if candidate.content and candidate.content.parts: + for part in candidate.content.parts: + if part.text: + content = part.text.strip() + elif part.function_call: + # Handle function call + if tool_calls is None: + tool_calls = [] + + try: + arguments = ( + dict(part.function_call.args) + if part.function_call.args + else {} + ) + except Exception: + arguments = {} - function_calls = [ - p.function_call - for p in parts - if hasattr(p, "function_call") and p.function_call - ] - if function_calls: - tool_calls = [] - for fc in function_calls: tool_calls.append( { - "id": f"call_{hash(fc.name)}", - "name": fc.name, - "arguments": dict(fc.args) - if hasattr(fc, "args") - else {}, + "id": part.function_call.name or "", + "name": part.function_call.name or "", + "arguments": arguments, } ) + # Determine finish reason finish_reason = "error" - if response.candidates: - finish_reason_map = { - genai.types.FinishReason.STOP: "stop", - genai.types.FinishReason.MAX_TOKENS: "length", - genai.types.FinishReason.SAFETY: "stop", - genai.types.FinishReason.RECITATION: "stop", - genai.types.FinishReason.OTHER: "error", - } - finish_reason = finish_reason_map.get( - response.candidates[0].finish_reason, "error" - ) + if hasattr(candidate, "finish_reason") and candidate.finish_reason: + from google.genai.types import FinishReason + + if candidate.finish_reason == FinishReason.STOP: + finish_reason = "stop" + elif candidate.finish_reason == FinishReason.MAX_TOKENS: + finish_reason = "length" + elif candidate.finish_reason in [ + FinishReason.SAFETY, + FinishReason.RECITATION, + ]: + finish_reason = "error" + elif tool_calls: + finish_reason = "tool_calls" + elif content: + finish_reason = "stop" elif tool_calls: finish_reason = "tool_calls" elif content: @@ -236,29 +257,19 @@ class GeminiClient(GenAIClient): "finish_reason": finish_reason, } - except GoogleAPICallError as e: - logger.warning("Gemini returned an error: %s", str(e)) + except errors.APIError as e: + logger.warning("Gemini API error during chat_with_tools: %s", str(e)) return { "content": None, "tool_calls": None, "finish_reason": "error", } except Exception as e: - logger.warning("Unexpected error in Gemini chat_with_tools: %s", str(e)) + logger.warning( + "Gemini returned an error during chat_with_tools: %s", str(e) + ) return { "content": None, "tool_calls": None, "finish_reason": "error", } - - -def _convert_json_type_to_gemini(json_type: str) -> genai.protos.Type: - type_map = { - "string": genai.protos.Type.STRING, - "integer": genai.protos.Type.INTEGER, - "number": genai.protos.Type.NUMBER, - "boolean": genai.protos.Type.BOOLEAN, - "array": genai.protos.Type.ARRAY, - "object": genai.protos.Type.OBJECT, - } - return type_map.get(json_type, genai.protos.Type.STRING) From e5087b092d0c86d96124e47f122ca0778676447f Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Thu, 26 Feb 2026 08:38:42 -0700 Subject: [PATCH 34/56] Fix frame time access --- frigate/api/media.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/frigate/api/media.py b/frigate/api/media.py index 01776f903..3cfd97674 100644 --- a/frigate/api/media.py +++ b/frigate/api/media.py @@ -737,6 +737,7 @@ async def event_snapshot( ): event_complete = False jpg_bytes = None + frame_time = 0 try: event = Event.get(Event.id == event_id, Event.end_time != None) event_complete = True @@ -790,7 +791,7 @@ async def event_snapshot( headers = { "Content-Type": "image/jpeg", "Cache-Control": "private, max-age=31536000" if event_complete else "no-store", - "X-Frame-Time": frame_time, + "X-Frame-Time": str(frame_time), } if params.download: From 91714b8743ff0a73e2d47e4bac0a06c1aeb35f52 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Thu, 26 Feb 2026 21:04:38 -0700 Subject: [PATCH 35/56] Remove exceptions --- frigate/api/chat.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/frigate/api/chat.py b/frigate/api/chat.py index 444650e13..1f5cc2297 100644 --- a/frigate/api/chat.py +++ b/frigate/api/chat.py @@ -190,7 +190,7 @@ async def _execute_search_objects( return JSONResponse( content={ "success": False, - "message": f"Error searching objects: {str(e)}", + "message": "Error searching objects", }, status_code=500, ) @@ -279,7 +279,7 @@ async def _execute_get_live_context( except Exception as e: logger.error(f"Error executing get_live_context: {e}", exc_info=True) return { - "error": f"Error getting live context: {str(e)}", + "error": "Error getting live context", } @@ -599,9 +599,7 @@ Always be accurate with time calculations based on the current date provided.{ca f"Error executing tool {tool_name} (id: {tool_call_id}): {e}", exc_info=True, ) - error_content = json.dumps( - {"error": f"Tool execution failed: {str(e)}"} - ) + error_content = json.dumps({"error": "Tool execution failed"}) tool_results.append( { "role": "tool", From 9c3a74b4f561876e284216f8f84576d73420ac65 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Thu, 26 Feb 2026 21:04:51 -0700 Subject: [PATCH 36/56] Cleanup --- frigate/api/auth.py | 2 +- frigate/detectors/detection_runners.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/frigate/api/auth.py b/frigate/api/auth.py index af21b1d4b..04a5bd19a 100644 --- a/frigate/api/auth.py +++ b/frigate/api/auth.py @@ -1037,4 +1037,4 @@ async def get_allowed_cameras_for_filter(request: Request): role = current_user["role"] all_camera_names = set(request.app.frigate_config.cameras.keys()) roles_dict = request.app.frigate_config.auth.roles - return User.get_allowed_cameras(role, roles_dict, all_camera_names) \ No newline at end of file + return User.get_allowed_cameras(role, roles_dict, all_camera_names) diff --git a/frigate/detectors/detection_runners.py b/frigate/detectors/detection_runners.py index 8b2f5ecf4..da7df9d36 100644 --- a/frigate/detectors/detection_runners.py +++ b/frigate/detectors/detection_runners.py @@ -603,4 +603,4 @@ def get_optimized_runner( provider_options=options, ), model_type=model_type, - ) \ No newline at end of file + ) From b4eac11cbd32b7e471f1fe31e085b486a235705d Mon Sep 17 00:00:00 2001 From: Martin Weinelt Date: Fri, 27 Feb 2026 05:53:26 +0100 Subject: [PATCH 37/56] Clean up trailing whitespaces in cpu stats process cmdline (#22089) The psutil library reads the process commandline as by opening /proc/pid/cmdline which returns a buffer that is larger than just the program cmdline due to rounded memory allocation sizes. That means that if the library does not detect a Null-terminated string it keeps appending empty strings which add up as whitespaces when joined. --- frigate/util/services.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/frigate/util/services.py b/frigate/util/services.py index 19ec4efdf..f1eedb01e 100644 --- a/frigate/util/services.py +++ b/frigate/util/services.py @@ -121,7 +121,7 @@ def get_cpu_stats() -> dict[str, dict]: pid = str(process.info["pid"]) try: cpu_percent = process.info["cpu_percent"] - cmdline = process.info["cmdline"] + cmdline = " ".join(process.info["cmdline"]).rstrip() with open(f"/proc/{pid}/stat", "r") as f: stats = f.readline().split() @@ -155,7 +155,7 @@ def get_cpu_stats() -> dict[str, dict]: "cpu": str(cpu_percent), "cpu_average": str(round(cpu_average_usage, 2)), "mem": f"{mem_pct}", - "cmdline": clean_camera_user_pass(" ".join(cmdline)), + "cmdline": clean_camera_user_pass(cmdline), } except Exception: continue From b88186983a4de0c7059ccde42ee62b7100bfa435 Mon Sep 17 00:00:00 2001 From: Felipe Santos Date: Fri, 27 Feb 2026 01:54:00 -0300 Subject: [PATCH 38/56] Increase maximum stream timeout to 15s (#21936) * Increase maximum stream timeout to 15s * Use predefined intervals instead for the stream timeout --- web/src/views/settings/UiSettingsView.tsx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web/src/views/settings/UiSettingsView.tsx b/web/src/views/settings/UiSettingsView.tsx index 44ed7818b..6e5f4d3b2 100644 --- a/web/src/views/settings/UiSettingsView.tsx +++ b/web/src/views/settings/UiSettingsView.tsx @@ -196,7 +196,7 @@ export default function UiSettingsView() { - {[1, 2, 3, 4, 5, 6, 7, 8, 9, 10].map((timeout) => ( + {[1, 2, 3, 5, 8, 10, 12, 15].map((timeout) => ( Date: Fri, 27 Feb 2026 05:55:29 +0100 Subject: [PATCH 39/56] Fallback from tflite-runtime to ai-edge-litert (#21876) The fallback to tensorflow was established back in 2023, because we could not provide tflite-runtime downstream in nixpkgs. By now we have ai-edge-litert available, which is the successor to the tflite-runtime. It still provides the same entrypoints as tflite-runtime and functionality has been verified in multiple deployments for the last two weeks. --- frigate/data_processing/real_time/bird.py | 2 +- frigate/data_processing/real_time/custom_classification.py | 4 ++-- frigate/detectors/detector_utils.py | 2 +- frigate/detectors/plugins/cpu_tfl.py | 2 +- frigate/detectors/plugins/edgetpu_tfl.py | 2 +- frigate/embeddings/onnx/face_embedding.py | 2 +- frigate/events/audio.py | 2 +- 7 files changed, 8 insertions(+), 8 deletions(-) diff --git a/frigate/data_processing/real_time/bird.py b/frigate/data_processing/real_time/bird.py index 7851c0997..520440005 100644 --- a/frigate/data_processing/real_time/bird.py +++ b/frigate/data_processing/real_time/bird.py @@ -22,7 +22,7 @@ from .api import RealTimeProcessorApi try: from tflite_runtime.interpreter import Interpreter except ModuleNotFoundError: - from tensorflow.lite.python.interpreter import Interpreter + from ai_edge_litert.interpreter import Interpreter logger = logging.getLogger(__name__) diff --git a/frigate/data_processing/real_time/custom_classification.py b/frigate/data_processing/real_time/custom_classification.py index 229383d9f..2c74a6575 100644 --- a/frigate/data_processing/real_time/custom_classification.py +++ b/frigate/data_processing/real_time/custom_classification.py @@ -32,7 +32,7 @@ from .api import RealTimeProcessorApi try: from tflite_runtime.interpreter import Interpreter except ModuleNotFoundError: - from tensorflow.lite.python.interpreter import Interpreter + from ai_edge_litert.interpreter import Interpreter logger = logging.getLogger(__name__) @@ -76,7 +76,7 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi): try: from tflite_runtime.interpreter import Interpreter except ModuleNotFoundError: - from tensorflow.lite.python.interpreter import Interpreter + from ai_edge_litert.interpreter import Interpreter model_path = os.path.join(self.model_dir, "model.tflite") labelmap_path = os.path.join(self.model_dir, "labelmap.txt") diff --git a/frigate/detectors/detector_utils.py b/frigate/detectors/detector_utils.py index d732de871..d8930b2ae 100644 --- a/frigate/detectors/detector_utils.py +++ b/frigate/detectors/detector_utils.py @@ -6,7 +6,7 @@ import numpy as np try: from tflite_runtime.interpreter import Interpreter, load_delegate except ModuleNotFoundError: - from tensorflow.lite.python.interpreter import Interpreter, load_delegate + from ai_edge_litert.interpreter import Interpreter, load_delegate logger = logging.getLogger(__name__) diff --git a/frigate/detectors/plugins/cpu_tfl.py b/frigate/detectors/plugins/cpu_tfl.py index 00351f519..6d336bb6b 100644 --- a/frigate/detectors/plugins/cpu_tfl.py +++ b/frigate/detectors/plugins/cpu_tfl.py @@ -12,7 +12,7 @@ from ..detector_utils import tflite_detect_raw, tflite_init try: from tflite_runtime.interpreter import Interpreter except ModuleNotFoundError: - from tensorflow.lite.python.interpreter import Interpreter + from ai_edge_litert.interpreter import Interpreter logger = logging.getLogger(__name__) diff --git a/frigate/detectors/plugins/edgetpu_tfl.py b/frigate/detectors/plugins/edgetpu_tfl.py index 2b94fde39..36c769b4b 100644 --- a/frigate/detectors/plugins/edgetpu_tfl.py +++ b/frigate/detectors/plugins/edgetpu_tfl.py @@ -13,7 +13,7 @@ from frigate.detectors.detector_config import BaseDetectorConfig, ModelTypeEnum try: from tflite_runtime.interpreter import Interpreter, load_delegate except ModuleNotFoundError: - from tensorflow.lite.python.interpreter import Interpreter, load_delegate + from ai_edge_litert.interpreter import Interpreter, load_delegate logger = logging.getLogger(__name__) diff --git a/frigate/embeddings/onnx/face_embedding.py b/frigate/embeddings/onnx/face_embedding.py index 04d756897..75dfedc94 100644 --- a/frigate/embeddings/onnx/face_embedding.py +++ b/frigate/embeddings/onnx/face_embedding.py @@ -17,7 +17,7 @@ from .base_embedding import BaseEmbedding try: from tflite_runtime.interpreter import Interpreter except ModuleNotFoundError: - from tensorflow.lite.python.interpreter import Interpreter + from ai_edge_litert.interpreter import Interpreter logger = logging.getLogger(__name__) diff --git a/frigate/events/audio.py b/frigate/events/audio.py index e88f2ae71..ad87d19c1 100644 --- a/frigate/events/audio.py +++ b/frigate/events/audio.py @@ -43,7 +43,7 @@ from frigate.video import start_or_restart_ffmpeg, stop_ffmpeg try: from tflite_runtime.interpreter import Interpreter except ModuleNotFoundError: - from tensorflow.lite.python.interpreter import Interpreter + from ai_edge_litert.interpreter import Interpreter logger = logging.getLogger(__name__) From ba0e7bbc1a3ad1adce0f70eb63f3aeb893764d3d Mon Sep 17 00:00:00 2001 From: Martin Weinelt Date: Fri, 27 Feb 2026 13:37:17 +0100 Subject: [PATCH 40/56] Remove redundant tensorflow import in BirdRealTimeProcessor (#22127) Was added in ae0c1ca (#21301) and then incompletely reverted in ec1d794 (#21320). --- frigate/data_processing/real_time/custom_classification.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/frigate/data_processing/real_time/custom_classification.py b/frigate/data_processing/real_time/custom_classification.py index 2c74a6575..1a2512e43 100644 --- a/frigate/data_processing/real_time/custom_classification.py +++ b/frigate/data_processing/real_time/custom_classification.py @@ -73,11 +73,6 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi): self.__build_detector() def __build_detector(self) -> None: - try: - from tflite_runtime.interpreter import Interpreter - except ModuleNotFoundError: - from ai_edge_litert.interpreter import Interpreter - model_path = os.path.join(self.model_dir, "model.tflite") labelmap_path = os.path.join(self.model_dir, "labelmap.txt") From eeefbf2bb5b6c56d6287b728f1c5fd6c88956b06 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Fri, 27 Feb 2026 08:35:33 -0700 Subject: [PATCH 41/56] Add support for multiple GenAI Providers (#22144) * GenAI client manager * Add config migration * Convert to roles list * Support getting client via manager * Cleanup * Fix import issues * Set model in llama.cpp config * Clenaup * Use config update * Clenaup * Add new title and desc --- frigate/api/app.py | 1 + frigate/api/chat.py | 3 +- frigate/api/fastapi_app.py | 2 + frigate/api/review.py | 5 +- frigate/config/camera/genai.py | 52 ++++++++++++++++--- frigate/config/config.py | 20 +++++-- frigate/embeddings/maintainer.py | 15 +++--- frigate/genai/__init__.py | 26 +++++----- frigate/genai/llama_cpp.py | 2 + frigate/genai/manager.py | 89 ++++++++++++++++++++++++++++++++ frigate/util/config.py | 7 +++ 11 files changed, 186 insertions(+), 36 deletions(-) create mode 100644 frigate/genai/manager.py diff --git a/frigate/api/app.py b/frigate/api/app.py index 126c613a7..9246095ca 100644 --- a/frigate/api/app.py +++ b/frigate/api/app.py @@ -432,6 +432,7 @@ def config_set(request: Request, body: AppConfigSetBody): if body.requires_restart == 0 or body.update_topic: old_config: FrigateConfig = request.app.frigate_config request.app.frigate_config = config + request.app.genai_manager.update_config(config) if body.update_topic: if body.update_topic.startswith("config/cameras/"): diff --git a/frigate/api/chat.py b/frigate/api/chat.py index 1f5cc2297..415f422da 100644 --- a/frigate/api/chat.py +++ b/frigate/api/chat.py @@ -23,7 +23,6 @@ from frigate.api.defs.response.chat_response import ( ) from frigate.api.defs.tags import Tags from frigate.api.event import events -from frigate.genai import get_genai_client logger = logging.getLogger(__name__) @@ -383,7 +382,7 @@ async def chat_completion( 6. Repeats until final answer 7. Returns response to user """ - genai_client = get_genai_client(request.app.frigate_config) + genai_client = request.app.genai_manager.tool_client if not genai_client: return JSONResponse( content={ diff --git a/frigate/api/fastapi_app.py b/frigate/api/fastapi_app.py index 496c8fada..3206c7b4a 100644 --- a/frigate/api/fastapi_app.py +++ b/frigate/api/fastapi_app.py @@ -33,6 +33,7 @@ from frigate.comms.event_metadata_updater import ( from frigate.config import FrigateConfig from frigate.config.camera.updater import CameraConfigUpdatePublisher from frigate.embeddings import EmbeddingsContext +from frigate.genai import GenAIClientManager from frigate.ptz.onvif import OnvifController from frigate.stats.emitter import StatsEmitter from frigate.storage import StorageMaintainer @@ -134,6 +135,7 @@ def create_fastapi_app( app.include_router(record.router) # App Properties app.frigate_config = frigate_config + app.genai_manager = GenAIClientManager(frigate_config) app.embeddings = embeddings app.detected_frames_processor = detected_frames_processor app.storage_maintainer = storage_maintainer diff --git a/frigate/api/review.py b/frigate/api/review.py index 76619dcb2..d2e8063d5 100644 --- a/frigate/api/review.py +++ b/frigate/api/review.py @@ -33,7 +33,6 @@ from frigate.api.defs.response.review_response import ( ReviewSummaryResponse, ) from frigate.api.defs.tags import Tags -from frigate.config import FrigateConfig from frigate.embeddings import EmbeddingsContext from frigate.models import Recordings, ReviewSegment, UserReviewStatus from frigate.review.types import SeverityEnum @@ -747,9 +746,7 @@ async def set_not_reviewed( description="Use GenAI to summarize review items over a period of time.", ) def generate_review_summary(request: Request, start_ts: float, end_ts: float): - config: FrigateConfig = request.app.frigate_config - - if not config.genai.provider: + if not request.app.genai_manager.vision_client: return JSONResponse( content=( { diff --git a/frigate/config/camera/genai.py b/frigate/config/camera/genai.py index 3dd596c3b..56d7322f5 100644 --- a/frigate/config/camera/genai.py +++ b/frigate/config/camera/genai.py @@ -6,7 +6,7 @@ from pydantic import Field from ..base import FrigateBaseModel from ..env import EnvString -__all__ = ["GenAIConfig", "GenAIProviderEnum"] +__all__ = ["GenAIConfig", "GenAIProviderEnum", "GenAIRoleEnum"] class GenAIProviderEnum(str, Enum): @@ -17,15 +17,55 @@ class GenAIProviderEnum(str, Enum): llamacpp = "llamacpp" +class GenAIRoleEnum(str, Enum): + tools = "tools" + vision = "vision" + embeddings = "embeddings" + + class GenAIConfig(FrigateBaseModel): """Primary GenAI Config to define GenAI Provider.""" - api_key: Optional[EnvString] = Field(default=None, title="Provider API key.") - base_url: Optional[str] = Field(default=None, title="Provider base url.") - model: str = Field(default="gpt-4o", title="GenAI model.") - provider: GenAIProviderEnum | None = Field(default=None, title="GenAI provider.") + api_key: Optional[EnvString] = Field( + default=None, + title="API key", + description="API key required by some providers (can also be set via environment variables).", + ) + base_url: Optional[str] = Field( + default=None, + title="Base URL", + description="Base URL for self-hosted or compatible providers (for example an Ollama instance).", + ) + model: str = Field( + default="gpt-4o", + title="Model", + description="The model to use from the provider for generating descriptions or summaries.", + ) + provider: GenAIProviderEnum | None = Field( + default=None, + title="Provider", + description="The GenAI provider to use (for example: ollama, gemini, openai).", + ) + roles: list[GenAIRoleEnum] = Field( + default_factory=lambda: [ + GenAIRoleEnum.embeddings, + GenAIRoleEnum.vision, + GenAIRoleEnum.tools, + ], + title="Roles", + description="GenAI roles (tools, vision, embeddings); one provider per role.", + ) provider_options: dict[str, Any] = Field( - default={}, title="GenAI Provider extra options." + default={}, + title="Provider options", + description="Additional provider-specific options to pass to the GenAI client.", + json_schema_extra={"additionalProperties": {"type": "string"}}, + ) + runtime_options: dict[str, Any] = Field( + default={}, + title="Runtime options", + description="Runtime options passed to the provider for each inference call.", + json_schema_extra={"additionalProperties": {"type": "string"}}, ) runtime_options: dict[str, Any] = Field( default={}, title="Options to pass during inference calls." diff --git a/frigate/config/config.py b/frigate/config/config.py index 370c89458..e31e3d8c8 100644 --- a/frigate/config/config.py +++ b/frigate/config/config.py @@ -45,7 +45,7 @@ from .camera.audio import AudioConfig from .camera.birdseye import BirdseyeConfig from .camera.detect import DetectConfig from .camera.ffmpeg import FfmpegConfig -from .camera.genai import GenAIConfig +from .camera.genai import GenAIConfig, GenAIRoleEnum from .camera.motion import MotionConfig from .camera.notification import NotificationConfig from .camera.objects import FilterConfig, ObjectConfig @@ -347,9 +347,9 @@ class FrigateConfig(FrigateBaseModel): default_factory=ModelConfig, title="Detection model configuration." ) - # GenAI config - genai: GenAIConfig = Field( - default_factory=GenAIConfig, title="Generative AI configuration." + # GenAI config (named provider configs: name -> GenAIConfig) + genai: Dict[str, GenAIConfig] = Field( + default_factory=dict, title="Generative AI configuration (named providers)." ) # Camera config @@ -431,6 +431,18 @@ class FrigateConfig(FrigateBaseModel): # set notifications state self.notifications.enabled_in_config = self.notifications.enabled + # validate genai: each role (tools, vision, embeddings) at most once + role_to_name: dict[GenAIRoleEnum, str] = {} + for name, genai_cfg in self.genai.items(): + for role in genai_cfg.roles: + if role in role_to_name: + raise ValueError( + f"GenAI role '{role.value}' is assigned to both " + f"'{role_to_name[role]}' and '{name}'; each role must have " + "exactly one provider." + ) + role_to_name[role] = name + # set default min_score for object attributes for attribute in self.model.all_attributes: if not self.objects.filters.get(attribute): diff --git a/frigate/embeddings/maintainer.py b/frigate/embeddings/maintainer.py index bd707de15..54831942a 100644 --- a/frigate/embeddings/maintainer.py +++ b/frigate/embeddings/maintainer.py @@ -59,7 +59,7 @@ from frigate.data_processing.real_time.license_plate import ( from frigate.data_processing.types import DataProcessorMetrics, PostProcessDataEnum from frigate.db.sqlitevecq import SqliteVecQueueDatabase from frigate.events.types import EventTypeEnum, RegenerateDescriptionEnum -from frigate.genai import get_genai_client +from frigate.genai import GenAIClientManager from frigate.models import Event, Recordings, ReviewSegment, Trigger from frigate.util.builtin import serialize from frigate.util.file import get_event_thumbnail_bytes @@ -144,7 +144,7 @@ class EmbeddingMaintainer(threading.Thread): self.frame_manager = SharedMemoryFrameManager() self.detected_license_plates: dict[str, dict[str, Any]] = {} - self.genai_client = get_genai_client(config) + self.genai_manager = GenAIClientManager(config) # model runners to share between realtime and post processors if self.config.lpr.enabled: @@ -203,12 +203,15 @@ class EmbeddingMaintainer(threading.Thread): # post processors self.post_processors: list[PostProcessorApi] = [] - if self.genai_client is not None and any( + if self.genai_manager.vision_client is not None and any( c.review.genai.enabled_in_config for c in self.config.cameras.values() ): self.post_processors.append( ReviewDescriptionProcessor( - self.config, self.requestor, self.metrics, self.genai_client + self.config, + self.requestor, + self.metrics, + self.genai_manager.vision_client, ) ) @@ -246,7 +249,7 @@ class EmbeddingMaintainer(threading.Thread): ) self.post_processors.append(semantic_trigger_processor) - if self.genai_client is not None and any( + if self.genai_manager.vision_client is not None and any( c.objects.genai.enabled_in_config for c in self.config.cameras.values() ): self.post_processors.append( @@ -255,7 +258,7 @@ class EmbeddingMaintainer(threading.Thread): self.embeddings, self.requestor, self.metrics, - self.genai_client, + self.genai_manager.vision_client, semantic_trigger_processor, ) ) diff --git a/frigate/genai/__init__.py b/frigate/genai/__init__.py index 0ae664b9f..f52a19e45 100644 --- a/frigate/genai/__init__.py +++ b/frigate/genai/__init__.py @@ -9,13 +9,24 @@ from typing import Any, Optional from playhouse.shortcuts import model_to_dict -from frigate.config import CameraConfig, FrigateConfig, GenAIConfig, GenAIProviderEnum +from frigate.config import CameraConfig, GenAIConfig, GenAIProviderEnum from frigate.const import CLIPS_DIR from frigate.data_processing.post.types import ReviewMetadata +from frigate.genai.manager import GenAIClientManager from frigate.models import Event logger = logging.getLogger(__name__) +__all__ = [ + "GenAIClient", + "GenAIClientManager", + "GenAIConfig", + "GenAIProviderEnum", + "PROVIDERS", + "load_providers", + "register_genai_provider", +] + PROVIDERS = {} @@ -352,19 +363,6 @@ Guidelines: } -def get_genai_client(config: FrigateConfig) -> Optional[GenAIClient]: - """Get the GenAI client.""" - if not config.genai.provider: - return None - - load_providers() - provider = PROVIDERS.get(config.genai.provider) - if provider: - return provider(config.genai) - - return None - - def load_providers(): package_dir = os.path.dirname(__file__) for filename in os.listdir(package_dir): diff --git a/frigate/genai/llama_cpp.py b/frigate/genai/llama_cpp.py index fafef74ae..70a94eec5 100644 --- a/frigate/genai/llama_cpp.py +++ b/frigate/genai/llama_cpp.py @@ -67,6 +67,7 @@ class LlamaCppClient(GenAIClient): # Build request payload with llama.cpp native options payload = { + "model": self.genai_config.model, "messages": [ { "role": "user", @@ -134,6 +135,7 @@ class LlamaCppClient(GenAIClient): openai_tool_choice = "required" payload = { + "model": self.genai_config.model, "messages": messages, } diff --git a/frigate/genai/manager.py b/frigate/genai/manager.py new file mode 100644 index 000000000..e462a0c39 --- /dev/null +++ b/frigate/genai/manager.py @@ -0,0 +1,89 @@ +"""GenAI client manager for Frigate. + +Manages GenAI provider clients from Frigate config. Configuration is read only +in _update_config(); no other code should read config.genai. Exposes clients +by role: tool_client, vision_client, embeddings_client. +""" + +import logging +from typing import TYPE_CHECKING, Optional + +from frigate.config import FrigateConfig +from frigate.config.camera.genai import GenAIRoleEnum + +if TYPE_CHECKING: + from frigate.genai import GenAIClient + +logger = logging.getLogger(__name__) + + +class GenAIClientManager: + """Manages GenAI provider clients from Frigate config.""" + + def __init__(self, config: FrigateConfig) -> None: + self._config = config + self._tool_client: Optional[GenAIClient] = None + self._vision_client: Optional[GenAIClient] = None + self._embeddings_client: Optional[GenAIClient] = None + self._update_config() + + def _update_config(self) -> None: + """Build role clients from current Frigate config.genai. + + Called from __init__ and can be called again when config is reloaded. + Each role (tools, vision, embeddings) gets the client for the provider + that has that role in its roles list. + """ + from frigate.genai import PROVIDERS, load_providers + + self._tool_client = None + self._vision_client = None + self._embeddings_client = None + + if not self._config.genai: + return + + load_providers() + + for _name, genai_cfg in self._config.genai.items(): + if not genai_cfg.provider: + continue + provider_cls = PROVIDERS.get(genai_cfg.provider) + if not provider_cls: + logger.warning( + "Unknown GenAI provider %s in config, skipping.", + genai_cfg.provider, + ) + continue + try: + client = provider_cls(genai_cfg) + except Exception as e: + logger.exception( + "Failed to create GenAI client for provider %s: %s", + genai_cfg.provider, + e, + ) + continue + + for role in genai_cfg.roles: + if role == GenAIRoleEnum.tools: + self._tool_client = client + elif role == GenAIRoleEnum.vision: + self._vision_client = client + elif role == GenAIRoleEnum.embeddings: + self._embeddings_client = client + + @property + def tool_client(self) -> "Optional[GenAIClient]": + """Client configured for the tools role (e.g. chat with function calling).""" + return self._tool_client + + @property + def vision_client(self) -> "Optional[GenAIClient]": + """Client configured for the vision role (e.g. review descriptions, object descriptions).""" + return self._vision_client + + @property + def embeddings_client(self) -> "Optional[GenAIClient]": + """Client configured for the embeddings role.""" + return self._embeddings_client diff --git a/frigate/util/config.py b/frigate/util/config.py index 1af5c8e4e..62db3c42b 100644 --- a/frigate/util/config.py +++ b/frigate/util/config.py @@ -438,6 +438,13 @@ def migrate_018_0(config: dict[str, dict[str, Any]]) -> dict[str, dict[str, Any] """Handle migrating frigate config to 0.18-0""" new_config = config.copy() + # Migrate GenAI to new format + genai = new_config.get("genai") + + if genai and genai.get("provider"): + genai["roles"] = ["embeddings", "vision", "tools"] + new_config["genai"] = {"default": genai} + # Remove deprecated sync_recordings from global record config if new_config.get("record", {}).get("sync_recordings") is not None: del new_config["record"]["sync_recordings"] From e7250f24cbbf093f524b0a9e596856d5aee2a3eb Mon Sep 17 00:00:00 2001 From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> Date: Fri, 27 Feb 2026 09:55:36 -0600 Subject: [PATCH 42/56] Full UI configuration (#22151) * use react-jsonschema-form for UI config * don't use properties wrapper when generating config i18n json * configure for full i18n support * section fields * add descriptions to all fields for i18n * motion i18n * fix nullable fields * sanitize internal fields * add switches widgets and use friendly names * fix nullable schema entries * ensure update_topic is added to api calls this needs further backend implementation to work correctly * add global sections, camera config overrides, and reset button * i18n * add reset logic to global config view * tweaks * fix sections and live validation * fix validation for schema objects that can be null * generic and custom per-field validation * improve generic error validation messages * remove show advanced fields switch * tweaks * use shadcn theme * fix array field template * i18n tweaks * remove collapsible around root section * deep merge schema for advanced fields * add array field item template and fix ffmpeg section * add missing i18n keys * tweaks * comment out api call for testing * add config groups as a separate i18n namespace * add descriptions to all pydantic fields * make titles more concise * new titles as i18n * update i18n config generation script to use json schema * tweaks * tweaks * rebase * clean up * form tweaks * add wildcards and fix object filter fields * add field template for additionalproperties schema objects * improve typing * add section description from schema and clarify global vs camera level descriptions * separate and consolidate global and camera i18n namespaces * clean up now obsolete namespaces * tweaks * refactor sections and overrides * add ability to render components before and after fields * fix titles * chore(sections): remove legacy single-section components replaced by template * refactor configs to use individual files with a template * fix review description * apply hidden fields after ui schema * move util * remove unused i18n * clean up error messages * fix fast refresh * add custom validation and use it for ffmpeg input roles * update nav tree * remove unused * re-add override and modified indicators * mark pending changes and add confirmation dialog for resets * fix red unsaved dot * tweaks * add docs links, readonly keys, and restart required per field * add special case and comments for global motion section * add section form special cases * combine review sections * tweaks * add audio labels endpoint * add audio label switches and input to filter list * fix type * remove key from config when resetting to default/global * don't show description for new key/val fields * tweaks * spacing tweaks * add activity indicator and scrollbar tweaks * add docs to filter fields * wording changes * fix global ffmpeg section * add review classification zones to review form * add backend endpoint and frontend widget for ffmpeg presets and manual args * improve wording * hide descriptions for additional properties arrays * add warning log about incorrectly nested model config * spacing and language tweaks * fix i18n keys * networking section docs and description * small wording tweaks * add layout grid field * refactor with shared utilities * field order * add individual detectors to schema add detector titles and descriptions (docstrings in pydantic are used for descriptions) and add i18n keys to globals * clean up detectors section and i18n * don't save model config back to yaml when saving detectors * add full detectors config to api model dump works around the way we use detector plugins so we can have the full detector config for the frontend * add restart button to toast when restart is required * add ui option to remove inner cards * fix buttons * section tweaks * don't zoom into text on mobile * make buttons sticky at bottom of sections * small tweaks * highlight label of changed fields * add null to enum list when unwrapping * refactor to shared utils and add save all button * add undo all button * add RJSF to dictionary * consolidate utils * preserve form data when changing cameras * add mono fonts * add popover to show what fields will be saved * fix mobile menu not re-rendering with unsaved dots * tweaks * fix logger and env vars config section saving use escaped periods in keys to retain them in the config file (eg "frigate.embeddings") * add timezone widget * role map field with validation * fix validation for model section * add another hidden field * add footer message for required restart * use rjsf for notifications view * fix config saving * add replace rules field * default column layout and add field sizing * clean up field template * refactor profile settings to match rjsf forms * tweaks * refactor frigate+ view and make tweaks to sections * show frigate+ model info in detection model settings when using a frigate+ model * update restartRequired for all fields * fix restart fields * tweaks and add ability enable disabled cameras more backend changes required * require restart when enabling camera that is disabled in config * disable save when form is invalid * refactor ffmpeg section for readability * change label * clean up camera inputs fields * misc tweaks to ffmpeg section - add raw paths endpoint to ensure credentials get saved - restart required tooltip * maintenance settings tweaks * don't mutate with lodash * fix description re-rendering for nullable object fields * hide reindex field * update rjsf * add frigate+ description to settings pane * disable save all when any section is invalid * show translated field name in validation error pane * clean up * remove unused * fix genai merge * fix genai --- .cspell/frigate-dictionary.txt | 1 + frigate/api/app.py | 64 +- frigate/config/auth.py | 46 +- frigate/config/camera/audio.py | 36 +- frigate/config/camera/birdseye.py | 71 +- frigate/config/camera/camera.py | 110 +- frigate/config/camera/detect.py | 54 +- frigate/config/camera/ffmpeg.py | 66 +- frigate/config/camera/genai.py | 3 - frigate/config/camera/live.py | 17 +- frigate/config/camera/motion.py | 56 +- frigate/config/camera/mqtt.py | 36 +- frigate/config/camera/notification.py | 21 +- frigate/config/camera/objects.py | 83 +- frigate/config/camera/onvif.py | 76 +- frigate/config/camera/record.py | 76 +- frigate/config/camera/review.py | 88 +- frigate/config/camera/snapshots.py | 56 +- frigate/config/camera/timestamp.py | 50 +- frigate/config/camera/ui.py | 12 +- frigate/config/camera/zone.py | 26 +- frigate/config/camera_group.py | 18 +- frigate/config/classification.py | 248 +- frigate/config/config.py | 146 +- frigate/config/database.py | 6 +- frigate/config/logger.py | 10 +- frigate/config/mqtt.py | 74 +- frigate/config/network.py | 26 +- frigate/config/proxy.py | 27 +- frigate/config/telemetry.py | 33 +- frigate/config/tls.py | 6 +- frigate/config/ui.py | 22 +- frigate/detectors/detector_config.py | 61 +- frigate/detectors/plugins/cpu_tfl.py | 14 +- frigate/detectors/plugins/deepstack.py | 24 +- frigate/detectors/plugins/degirum.py | 26 +- frigate/detectors/plugins/edgetpu_tfl.py | 14 +- frigate/detectors/plugins/hailo8l.py | 14 +- frigate/detectors/plugins/memryx.py | 14 +- frigate/detectors/plugins/onnx.py | 14 +- frigate/detectors/plugins/openvino.py | 14 +- frigate/detectors/plugins/rknn.py | 16 +- frigate/detectors/plugins/synaptics.py | 7 + frigate/detectors/plugins/teflon_tfl.py | 7 + frigate/detectors/plugins/tensorrt.py | 12 +- frigate/detectors/plugins/zmq_ipc.py | 22 +- frigate/util/builtin.py | 40 +- frigate/util/schema.py | 46 + generate_config_translations.py | 527 +++- web/package-lock.json | 1415 ++++++++++- web/package.json | 4 + web/public/locales/en/common.json | 11 +- web/public/locales/en/config/audio.json | 26 - .../en/config/audio_transcription.json | 23 - web/public/locales/en/config/auth.json | 35 - web/public/locales/en/config/birdseye.json | 37 - .../locales/en/config/camera_groups.json | 14 - web/public/locales/en/config/cameras.json | 1613 ++++++------ .../locales/en/config/classification.json | 58 - web/public/locales/en/config/database.json | 8 - web/public/locales/en/config/detect.json | 51 - web/public/locales/en/config/detectors.json | 14 - .../locales/en/config/environment_vars.json | 3 - .../locales/en/config/face_recognition.json | 36 - web/public/locales/en/config/ffmpeg.json | 34 - web/public/locales/en/config/genai.json | 23 - web/public/locales/en/config/global.json | 2185 +++++++++++++++++ web/public/locales/en/config/go2rtc.json | 3 - web/public/locales/en/config/groups.json | 73 + web/public/locales/en/config/live.json | 14 - web/public/locales/en/config/logger.json | 11 - web/public/locales/en/config/lpr.json | 45 - web/public/locales/en/config/model.json | 35 - web/public/locales/en/config/motion.json | 3 - web/public/locales/en/config/mqtt.json | 44 - web/public/locales/en/config/networking.json | 24 - .../locales/en/config/notifications.json | 17 - web/public/locales/en/config/objects.json | 77 - web/public/locales/en/config/proxy.json | 31 - web/public/locales/en/config/record.json | 90 - web/public/locales/en/config/review.json | 74 - web/public/locales/en/config/safe_mode.json | 3 - .../locales/en/config/semantic_search.json | 21 - web/public/locales/en/config/snapshots.json | 43 - web/public/locales/en/config/telemetry.json | 28 - .../locales/en/config/timestamp_style.json | 31 - web/public/locales/en/config/tls.json | 8 - web/public/locales/en/config/ui.json | 20 - web/public/locales/en/config/validation.json | 32 + web/public/locales/en/config/version.json | 3 - web/public/locales/en/views/settings.json | 291 ++- web/src/components/card/SettingsGroupCard.tsx | 56 + web/src/components/config-form/ConfigForm.tsx | 370 +++ .../config-form/section-configs/audio.ts | 42 + .../section-configs/audio_transcription.ts | 19 + .../config-form/section-configs/auth.ts | 49 + .../config-form/section-configs/birdseye.ts | 45 + .../section-configs/classification.ts | 12 + .../config-form/section-configs/database.ts | 17 + .../config-form/section-configs/detect.ts | 49 + .../config-form/section-configs/detectors.ts | 28 + .../section-configs/environment_vars.ts | 16 + .../section-configs/face_recognition.ts | 50 + .../config-form/section-configs/ffmpeg.ts | 179 ++ .../config-form/section-configs/genai.ts | 48 + .../config-form/section-configs/live.ts | 21 + .../config-form/section-configs/logger.ts | 12 + .../config-form/section-configs/lpr.ts | 73 + .../config-form/section-configs/model.ts | 53 + .../config-form/section-configs/motion.ts | 49 + .../config-form/section-configs/mqtt.ts | 73 + .../config-form/section-configs/networking.ts | 30 + .../section-configs/notifications.ts | 26 + .../config-form/section-configs/objects.ts | 104 + .../config-form/section-configs/onvif.ts | 46 + .../config-form/section-configs/proxy.ts | 33 + .../config-form/section-configs/record.ts | 48 + .../config-form/section-configs/review.ts | 54 + .../section-configs/semantic_search.ts | 24 + .../config-form/section-configs/snapshots.ts | 45 + .../config-form/section-configs/telemetry.ts | 19 + .../section-configs/timestamp_style.ts | 27 + .../config-form/section-configs/tls.ts | 20 + .../config-form/section-configs/types.ts | 7 + .../config-form/section-configs/ui.ts | 30 + .../config-form/section-validations/ffmpeg.ts | 84 + .../config-form/section-validations/index.ts | 31 + .../config-form/section-validations/proxy.ts | 37 + .../components/config-form/sectionConfigs.ts | 85 + .../CameraReviewClassification.tsx | 403 +++ .../CameraReviewStatusToggles.tsx | 164 ++ .../NotificationsSettingsExtras.tsx | 843 +++++++ .../sectionExtras/ProxyRoleMap.tsx | 201 ++ .../sectionExtras/SemanticSearchReindex.tsx | 106 + .../config-form/sectionExtras/registry.ts | 57 + .../config-form/sections/BaseSection.tsx | 1009 ++++++++ .../sections/ConfigSectionTemplate.tsx | 33 + .../components/config-form/sections/index.ts | 14 + .../sections/section-special-cases.ts | 203 ++ .../config-form/theme/components/index.tsx | 136 + .../theme/fields/CameraInputsField.tsx | 426 ++++ .../theme/fields/DetectorHardwareField.tsx | 891 +++++++ .../theme/fields/LayoutGridField.tsx | 587 +++++ .../theme/fields/ReplaceRulesField.tsx | 253 ++ .../config-form/theme/fields/index.ts | 4 + .../config-form/theme/fields/nullableUtils.ts | 60 + .../config-form/theme/frigateTheme.ts | 95 + web/src/components/config-form/theme/index.ts | 5 + .../templates/ArrayFieldItemTemplate.tsx | 58 + .../theme/templates/ArrayFieldTemplate.tsx | 60 + .../theme/templates/BaseInputTemplate.tsx | 48 + .../templates/DescriptionFieldTemplate.tsx | 37 + .../theme/templates/ErrorListTemplate.tsx | 193 ++ .../theme/templates/FieldTemplate.tsx | 616 +++++ .../templates/MultiSchemaFieldTemplate.tsx | 45 + .../theme/templates/ObjectFieldTemplate.tsx | 503 ++++ .../theme/templates/TitleFieldTemplate.tsx | 17 + .../templates/WrapIfAdditionalTemplate.tsx | 123 + .../config-form/theme/utils/fieldSizing.ts | 37 + .../config-form/theme/utils/i18n.ts | 182 ++ .../config-form/theme/utils/index.ts | 18 + .../config-form/theme/utils/overrides.ts | 128 + .../theme/widgets/ArrayAsTextWidget.tsx | 36 + .../widgets/AudioLabelSwitchesWidget.tsx | 101 + .../theme/widgets/CameraPathWidget.tsx | 202 ++ .../theme/widgets/CheckboxWidget.tsx | 17 + .../config-form/theme/widgets/ColorWidget.tsx | 53 + .../theme/widgets/FfmpegArgsWidget.tsx | 344 +++ .../theme/widgets/InputRolesWidget.tsx | 67 + .../theme/widgets/NumberWidget.tsx | 44 + .../widgets/ObjectLabelSwitchesWidget.tsx | 101 + .../theme/widgets/PasswordWidget.tsx | 59 + .../config-form/theme/widgets/RangeWidget.tsx | 31 + .../theme/widgets/SelectWidget.tsx | 51 + .../theme/widgets/SwitchWidget.tsx | 17 + .../theme/widgets/SwitchesWidget.tsx | 231 ++ .../config-form/theme/widgets/TagsWidget.tsx | 74 + .../config-form/theme/widgets/TextWidget.tsx | 48 + .../theme/widgets/TextareaWidget.tsx | 48 + .../theme/widgets/TimezoneSelectWidget.tsx | 64 + .../theme/widgets/ZoneSwitchesWidget.tsx | 49 + .../indicators/RestartRequiredIndicator.tsx | 38 + .../overlay/detail/SaveAllPreviewPopover.tsx | 142 ++ web/src/hooks/use-config-override.ts | 279 +++ web/src/hooks/use-config-schema.ts | 132 + web/src/lib/config-schema/errorMessages.ts | 115 + web/src/lib/config-schema/index.ts | 17 + web/src/lib/config-schema/transformer.ts | 682 +++++ web/src/lib/utils.ts | 51 + web/src/pages/Logs.tsx | 2 +- web/src/pages/Settings.tsx | 1043 +++++++- web/src/types/configForm.ts | 45 + web/src/types/frigateConfig.ts | 2 +- web/src/utils/configUtil.ts | 526 ++++ web/src/utils/i18n.ts | 4 + .../views/settings/CameraManagementView.tsx | 226 +- .../settings/CameraReviewSettingsView.tsx | 751 ------ .../settings/FrigatePlusSettingsView.tsx | 584 ++--- .../settings/MaintenanceSettingsView.tsx | 34 +- .../settings/NotificationsSettingsView.tsx | 785 ------ web/src/views/settings/SingleSectionPage.tsx | 166 ++ .../SystemDetectionModelSettingsView.tsx | 88 + web/src/views/settings/UiSettingsView.tsx | 508 ++-- .../FrigatePlusCurrentModelSummary.tsx | 61 + web/tailwind.config.cjs | 10 + web/vite.config.ts | 2 +- 206 files changed, 22204 insertions(+), 4439 deletions(-) create mode 100644 frigate/util/schema.py delete mode 100644 web/public/locales/en/config/audio.json delete mode 100644 web/public/locales/en/config/audio_transcription.json delete mode 100644 web/public/locales/en/config/auth.json delete mode 100644 web/public/locales/en/config/birdseye.json delete mode 100644 web/public/locales/en/config/camera_groups.json delete mode 100644 web/public/locales/en/config/classification.json delete mode 100644 web/public/locales/en/config/database.json delete mode 100644 web/public/locales/en/config/detect.json delete mode 100644 web/public/locales/en/config/detectors.json delete mode 100644 web/public/locales/en/config/environment_vars.json delete mode 100644 web/public/locales/en/config/face_recognition.json delete mode 100644 web/public/locales/en/config/ffmpeg.json delete mode 100644 web/public/locales/en/config/genai.json create mode 100644 web/public/locales/en/config/global.json delete mode 100644 web/public/locales/en/config/go2rtc.json create mode 100644 web/public/locales/en/config/groups.json delete mode 100644 web/public/locales/en/config/live.json delete mode 100644 web/public/locales/en/config/logger.json delete mode 100644 web/public/locales/en/config/lpr.json delete mode 100644 web/public/locales/en/config/model.json delete mode 100644 web/public/locales/en/config/motion.json delete mode 100644 web/public/locales/en/config/mqtt.json delete mode 100644 web/public/locales/en/config/networking.json delete mode 100644 web/public/locales/en/config/notifications.json delete mode 100644 web/public/locales/en/config/objects.json delete mode 100644 web/public/locales/en/config/proxy.json delete mode 100644 web/public/locales/en/config/record.json delete mode 100644 web/public/locales/en/config/review.json delete mode 100644 web/public/locales/en/config/safe_mode.json delete mode 100644 web/public/locales/en/config/semantic_search.json delete mode 100644 web/public/locales/en/config/snapshots.json delete mode 100644 web/public/locales/en/config/telemetry.json delete mode 100644 web/public/locales/en/config/timestamp_style.json delete mode 100644 web/public/locales/en/config/tls.json delete mode 100644 web/public/locales/en/config/ui.json create mode 100644 web/public/locales/en/config/validation.json delete mode 100644 web/public/locales/en/config/version.json create mode 100644 web/src/components/card/SettingsGroupCard.tsx create mode 100644 web/src/components/config-form/ConfigForm.tsx create mode 100644 web/src/components/config-form/section-configs/audio.ts create mode 100644 web/src/components/config-form/section-configs/audio_transcription.ts create mode 100644 web/src/components/config-form/section-configs/auth.ts create mode 100644 web/src/components/config-form/section-configs/birdseye.ts create mode 100644 web/src/components/config-form/section-configs/classification.ts create mode 100644 web/src/components/config-form/section-configs/database.ts create mode 100644 web/src/components/config-form/section-configs/detect.ts create mode 100644 web/src/components/config-form/section-configs/detectors.ts create mode 100644 web/src/components/config-form/section-configs/environment_vars.ts create mode 100644 web/src/components/config-form/section-configs/face_recognition.ts create mode 100644 web/src/components/config-form/section-configs/ffmpeg.ts create mode 100644 web/src/components/config-form/section-configs/genai.ts create mode 100644 web/src/components/config-form/section-configs/live.ts create mode 100644 web/src/components/config-form/section-configs/logger.ts create mode 100644 web/src/components/config-form/section-configs/lpr.ts create mode 100644 web/src/components/config-form/section-configs/model.ts create mode 100644 web/src/components/config-form/section-configs/motion.ts create mode 100644 web/src/components/config-form/section-configs/mqtt.ts create mode 100644 web/src/components/config-form/section-configs/networking.ts create mode 100644 web/src/components/config-form/section-configs/notifications.ts create mode 100644 web/src/components/config-form/section-configs/objects.ts create mode 100644 web/src/components/config-form/section-configs/onvif.ts create mode 100644 web/src/components/config-form/section-configs/proxy.ts create mode 100644 web/src/components/config-form/section-configs/record.ts create mode 100644 web/src/components/config-form/section-configs/review.ts create mode 100644 web/src/components/config-form/section-configs/semantic_search.ts create mode 100644 web/src/components/config-form/section-configs/snapshots.ts create mode 100644 web/src/components/config-form/section-configs/telemetry.ts create mode 100644 web/src/components/config-form/section-configs/timestamp_style.ts create mode 100644 web/src/components/config-form/section-configs/tls.ts create mode 100644 web/src/components/config-form/section-configs/types.ts create mode 100644 web/src/components/config-form/section-configs/ui.ts create mode 100644 web/src/components/config-form/section-validations/ffmpeg.ts create mode 100644 web/src/components/config-form/section-validations/index.ts create mode 100644 web/src/components/config-form/section-validations/proxy.ts create mode 100644 web/src/components/config-form/sectionConfigs.ts create mode 100644 web/src/components/config-form/sectionExtras/CameraReviewClassification.tsx create mode 100644 web/src/components/config-form/sectionExtras/CameraReviewStatusToggles.tsx create mode 100644 web/src/components/config-form/sectionExtras/NotificationsSettingsExtras.tsx create mode 100644 web/src/components/config-form/sectionExtras/ProxyRoleMap.tsx create mode 100644 web/src/components/config-form/sectionExtras/SemanticSearchReindex.tsx create mode 100644 web/src/components/config-form/sectionExtras/registry.ts create mode 100644 web/src/components/config-form/sections/BaseSection.tsx create mode 100644 web/src/components/config-form/sections/ConfigSectionTemplate.tsx create mode 100644 web/src/components/config-form/sections/index.ts create mode 100644 web/src/components/config-form/sections/section-special-cases.ts create mode 100644 web/src/components/config-form/theme/components/index.tsx create mode 100644 web/src/components/config-form/theme/fields/CameraInputsField.tsx create mode 100644 web/src/components/config-form/theme/fields/DetectorHardwareField.tsx create mode 100644 web/src/components/config-form/theme/fields/LayoutGridField.tsx create mode 100644 web/src/components/config-form/theme/fields/ReplaceRulesField.tsx create mode 100644 web/src/components/config-form/theme/fields/index.ts create mode 100644 web/src/components/config-form/theme/fields/nullableUtils.ts create mode 100644 web/src/components/config-form/theme/frigateTheme.ts create mode 100644 web/src/components/config-form/theme/index.ts create mode 100644 web/src/components/config-form/theme/templates/ArrayFieldItemTemplate.tsx create mode 100644 web/src/components/config-form/theme/templates/ArrayFieldTemplate.tsx create mode 100644 web/src/components/config-form/theme/templates/BaseInputTemplate.tsx create mode 100644 web/src/components/config-form/theme/templates/DescriptionFieldTemplate.tsx create mode 100644 web/src/components/config-form/theme/templates/ErrorListTemplate.tsx create mode 100644 web/src/components/config-form/theme/templates/FieldTemplate.tsx create mode 100644 web/src/components/config-form/theme/templates/MultiSchemaFieldTemplate.tsx create mode 100644 web/src/components/config-form/theme/templates/ObjectFieldTemplate.tsx create mode 100644 web/src/components/config-form/theme/templates/TitleFieldTemplate.tsx create mode 100644 web/src/components/config-form/theme/templates/WrapIfAdditionalTemplate.tsx create mode 100644 web/src/components/config-form/theme/utils/fieldSizing.ts create mode 100644 web/src/components/config-form/theme/utils/i18n.ts create mode 100644 web/src/components/config-form/theme/utils/index.ts create mode 100644 web/src/components/config-form/theme/utils/overrides.ts create mode 100644 web/src/components/config-form/theme/widgets/ArrayAsTextWidget.tsx create mode 100644 web/src/components/config-form/theme/widgets/AudioLabelSwitchesWidget.tsx create mode 100644 web/src/components/config-form/theme/widgets/CameraPathWidget.tsx create mode 100644 web/src/components/config-form/theme/widgets/CheckboxWidget.tsx create mode 100644 web/src/components/config-form/theme/widgets/ColorWidget.tsx create mode 100644 web/src/components/config-form/theme/widgets/FfmpegArgsWidget.tsx create mode 100644 web/src/components/config-form/theme/widgets/InputRolesWidget.tsx create mode 100644 web/src/components/config-form/theme/widgets/NumberWidget.tsx create mode 100644 web/src/components/config-form/theme/widgets/ObjectLabelSwitchesWidget.tsx create mode 100644 web/src/components/config-form/theme/widgets/PasswordWidget.tsx create mode 100644 web/src/components/config-form/theme/widgets/RangeWidget.tsx create mode 100644 web/src/components/config-form/theme/widgets/SelectWidget.tsx create mode 100644 web/src/components/config-form/theme/widgets/SwitchWidget.tsx create mode 100644 web/src/components/config-form/theme/widgets/SwitchesWidget.tsx create mode 100644 web/src/components/config-form/theme/widgets/TagsWidget.tsx create mode 100644 web/src/components/config-form/theme/widgets/TextWidget.tsx create mode 100644 web/src/components/config-form/theme/widgets/TextareaWidget.tsx create mode 100644 web/src/components/config-form/theme/widgets/TimezoneSelectWidget.tsx create mode 100644 web/src/components/config-form/theme/widgets/ZoneSwitchesWidget.tsx create mode 100644 web/src/components/indicators/RestartRequiredIndicator.tsx create mode 100644 web/src/components/overlay/detail/SaveAllPreviewPopover.tsx create mode 100644 web/src/hooks/use-config-override.ts create mode 100644 web/src/hooks/use-config-schema.ts create mode 100644 web/src/lib/config-schema/errorMessages.ts create mode 100644 web/src/lib/config-schema/index.ts create mode 100644 web/src/lib/config-schema/transformer.ts create mode 100644 web/src/types/configForm.ts create mode 100644 web/src/utils/configUtil.ts delete mode 100644 web/src/views/settings/CameraReviewSettingsView.tsx delete mode 100644 web/src/views/settings/NotificationsSettingsView.tsx create mode 100644 web/src/views/settings/SingleSectionPage.tsx create mode 100644 web/src/views/settings/SystemDetectionModelSettingsView.tsx create mode 100644 web/src/views/settings/components/FrigatePlusCurrentModelSummary.tsx diff --git a/.cspell/frigate-dictionary.txt b/.cspell/frigate-dictionary.txt index f2bcf417a..f5292b167 100644 --- a/.cspell/frigate-dictionary.txt +++ b/.cspell/frigate-dictionary.txt @@ -229,6 +229,7 @@ Reolink restream restreamed restreaming +RJSF rkmpp rknn rkrga diff --git a/frigate/api/app.py b/frigate/api/app.py index 9246095ca..d24d9e868 100644 --- a/frigate/api/app.py +++ b/frigate/api/app.py @@ -49,10 +49,12 @@ from frigate.types import JobStatusTypesEnum from frigate.util.builtin import ( clean_camera_user_pass, flatten_config_data, + load_labels, process_config_query_string, update_yaml_file_bulk, ) from frigate.util.config import find_config_file +from frigate.util.schema import get_config_schema from frigate.util.services import ( get_nvidia_driver_info, process_logs, @@ -77,9 +79,7 @@ def is_healthy(): @router.get("/config/schema.json", dependencies=[Depends(allow_public())]) def config_schema(request: Request): - return Response( - content=request.app.frigate_config.schema_json(), media_type="application/json" - ) + return JSONResponse(content=get_config_schema(FrigateConfig)) @router.get( @@ -125,6 +125,10 @@ def config(request: Request): config: dict[str, dict[str, Any]] = config_obj.model_dump( mode="json", warnings="none", exclude_none=True ) + config["detectors"] = { + name: detector.model_dump(mode="json", warnings="none", exclude_none=True) + for name, detector in config_obj.detectors.items() + } # remove the mqtt password config["mqtt"].pop("password", None) @@ -195,6 +199,54 @@ def config(request: Request): return JSONResponse(content=config) +@router.get("/ffmpeg/presets", dependencies=[Depends(allow_any_authenticated())]) +def ffmpeg_presets(): + """Return available ffmpeg preset keys for config UI usage.""" + + # Whitelist based on documented presets in ffmpeg_presets.md + hwaccel_presets = [ + "preset-rpi-64-h264", + "preset-rpi-64-h265", + "preset-vaapi", + "preset-intel-qsv-h264", + "preset-intel-qsv-h265", + "preset-nvidia", + "preset-jetson-h264", + "preset-jetson-h265", + "preset-rkmpp", + ] + input_presets = [ + "preset-http-jpeg-generic", + "preset-http-mjpeg-generic", + "preset-http-reolink", + "preset-rtmp-generic", + "preset-rtsp-generic", + "preset-rtsp-restream", + "preset-rtsp-restream-low-latency", + "preset-rtsp-udp", + "preset-rtsp-blue-iris", + ] + record_output_presets = [ + "preset-record-generic", + "preset-record-generic-audio-copy", + "preset-record-generic-audio-aac", + "preset-record-mjpeg", + "preset-record-jpeg", + "preset-record-ubiquiti", + ] + + return JSONResponse( + content={ + "hwaccel_args": hwaccel_presets, + "input_args": input_presets, + "output_args": { + "record": record_output_presets, + "detect": [], + }, + } + ) + + @router.get("/config/raw_paths", dependencies=[Depends(require_role(["admin"]))]) def config_raw_paths(request: Request): """Admin-only endpoint that returns camera paths and go2rtc streams without credential masking.""" @@ -755,6 +807,12 @@ def get_sub_labels(split_joined: Optional[int] = None): return JSONResponse(content=sub_labels) +@router.get("/audio_labels", dependencies=[Depends(allow_any_authenticated())]) +def get_audio_labels(): + labels = load_labels("/audio-labelmap.txt", prefill=521) + return JSONResponse(content=labels) + + @router.get("/plus/models", dependencies=[Depends(allow_any_authenticated())]) def plusModels(request: Request, filterByCurrentModelDetector: bool = False): if not request.app.frigate_config.plus_api.is_active(): diff --git a/frigate/config/auth.py b/frigate/config/auth.py index 6935350a0..fccbfbaf2 100644 --- a/frigate/config/auth.py +++ b/frigate/config/auth.py @@ -8,39 +8,63 @@ __all__ = ["AuthConfig"] class AuthConfig(FrigateBaseModel): - enabled: bool = Field(default=True, title="Enable authentication") + enabled: bool = Field( + default=True, + title="Enable authentication", + description="Enable native authentication for the Frigate UI.", + ) reset_admin_password: bool = Field( - default=False, title="Reset the admin password on startup" + default=False, + title="Reset admin password", + description="If true, reset the admin user's password on startup and print the new password in logs.", ) cookie_name: str = Field( - default="frigate_token", title="Name for jwt token cookie", pattern=r"^[a-z_]+$" + default="frigate_token", + title="JWT cookie name", + description="Name of the cookie used to store the JWT token for native authentication.", + pattern=r"^[a-z_]+$", + ) + cookie_secure: bool = Field( + default=False, + title="Secure cookie flag", + description="Set the secure flag on the auth cookie; should be true when using TLS.", ) - cookie_secure: bool = Field(default=False, title="Set secure flag on cookie") session_length: int = Field( - default=86400, title="Session length for jwt session tokens", ge=60 + default=86400, + title="Session length", + description="Session duration in seconds for JWT-based sessions.", + ge=60, ) refresh_time: int = Field( default=1800, - title="Refresh the session if it is going to expire in this many seconds", + title="Session refresh window", + description="When a session is within this many seconds of expiring, refresh it back to full length.", ge=30, ) failed_login_rate_limit: Optional[str] = Field( default=None, - title="Rate limits for failed login attempts.", + title="Failed login limits", + description="Rate limiting rules for failed login attempts to reduce brute-force attacks.", ) trusted_proxies: list[str] = Field( default=[], - title="Trusted proxies for determining IP address to rate limit", + title="Trusted proxies", + description="List of trusted proxy IPs used when determining client IP for rate limiting.", ) # As of Feb 2023, OWASP recommends 600000 iterations for PBKDF2-SHA256 - hash_iterations: int = Field(default=600000, title="Password hash iterations") + hash_iterations: int = Field( + default=600000, + title="Hash iterations", + description="Number of PBKDF2-SHA256 iterations to use when hashing user passwords.", + ) roles: Dict[str, List[str]] = Field( default_factory=dict, - title="Role to camera mappings. Empty list grants access to all cameras.", + title="Role mappings", + description="Map roles to camera lists. An empty list grants access to all cameras for the role.", ) admin_first_time_login: Optional[bool] = Field( default=False, - title="Internal field to expose first-time admin login flag to the UI", + title="First-time admin flag", description=( "When true the UI may show a help link on the login page informing users how to sign in after an admin password reset. " ), diff --git a/frigate/config/camera/audio.py b/frigate/config/camera/audio.py index 3734455a2..6028802df 100644 --- a/frigate/config/camera/audio.py +++ b/frigate/config/camera/audio.py @@ -17,25 +17,45 @@ class AudioFilterConfig(FrigateBaseModel): default=0.8, ge=AUDIO_MIN_CONFIDENCE, lt=1.0, - title="Minimum detection confidence threshold for audio to be counted.", + title="Minimum audio confidence", + description="Minimum confidence threshold for the audio event to be counted.", ) class AudioConfig(FrigateBaseModel): - enabled: bool = Field(default=False, title="Enable audio events.") + enabled: bool = Field( + default=False, + title="Enable audio detection", + description="Enable or disable audio event detection for all cameras; can be overridden per-camera.", + ) max_not_heard: int = Field( - default=30, title="Seconds of not hearing the type of audio to end the event." + default=30, + title="End timeout", + description="Amount of seconds without the configured audio type before the audio event is ended.", ) min_volume: int = Field( - default=500, title="Min volume required to run audio detection." + default=500, + title="Minimum volume", + description="Minimum RMS volume threshold required to run audio detection; lower values increase sensitivity (e.g., 200 high, 500 medium, 1000 low).", ) listen: list[str] = Field( - default=DEFAULT_LISTEN_AUDIO, title="Audio to listen for." + default=DEFAULT_LISTEN_AUDIO, + title="Listen types", + description="List of audio event types to detect (for example: bark, fire_alarm, scream, speech, yell).", ) filters: Optional[dict[str, AudioFilterConfig]] = Field( - None, title="Audio filters." + None, + title="Audio filters", + description="Per-audio-type filter settings such as confidence thresholds used to reduce false positives.", ) enabled_in_config: Optional[bool] = Field( - None, title="Keep track of original state of audio detection." + None, + title="Original audio state", + description="Indicates whether audio detection was originally enabled in the static config file.", + ) + num_threads: int = Field( + default=2, + title="Detection threads", + description="Number of threads to use for audio detection processing.", + ge=1, ) - num_threads: int = Field(default=2, title="Number of detection threads", ge=1) diff --git a/frigate/config/camera/birdseye.py b/frigate/config/camera/birdseye.py index 1e6f0f335..32aa66a98 100644 --- a/frigate/config/camera/birdseye.py +++ b/frigate/config/camera/birdseye.py @@ -29,45 +29,88 @@ class BirdseyeModeEnum(str, Enum): class BirdseyeLayoutConfig(FrigateBaseModel): scaling_factor: float = Field( - default=2.0, title="Birdseye Scaling Factor", ge=1.0, le=5.0 + default=2.0, + title="Scaling factor", + description="Scaling factor used by the layout calculator (range 1.0 to 5.0).", + ge=1.0, + le=5.0, + ) + max_cameras: Optional[int] = Field( + default=None, + title="Max cameras", + description="Maximum number of cameras to display at once in Birdseye; shows the most recent cameras.", ) - max_cameras: Optional[int] = Field(default=None, title="Max cameras") class BirdseyeConfig(FrigateBaseModel): - enabled: bool = Field(default=True, title="Enable birdseye view.") + enabled: bool = Field( + default=True, + title="Enable Birdseye", + description="Enable or disable the Birdseye view feature.", + ) mode: BirdseyeModeEnum = Field( - default=BirdseyeModeEnum.objects, title="Tracking mode." + default=BirdseyeModeEnum.objects, + title="Tracking mode", + description="Mode for including cameras in Birdseye: 'objects', 'motion', or 'continuous'.", ) - restream: bool = Field(default=False, title="Restream birdseye via RTSP.") - width: int = Field(default=1280, title="Birdseye width.") - height: int = Field(default=720, title="Birdseye height.") + restream: bool = Field( + default=False, + title="Restream RTSP", + description="Re-stream the Birdseye output as an RTSP feed; enabling this will keep Birdseye running continuously.", + ) + width: int = Field( + default=1280, + title="Width", + description="Output width (pixels) of the composed Birdseye frame.", + ) + height: int = Field( + default=720, + title="Height", + description="Output height (pixels) of the composed Birdseye frame.", + ) quality: int = Field( default=8, - title="Encoding quality.", + title="Encoding quality", + description="Encoding quality for the Birdseye mpeg1 feed (1 highest quality, 31 lowest).", ge=1, le=31, ) inactivity_threshold: int = Field( - default=30, title="Birdseye Inactivity Threshold", gt=0 + default=30, + title="Inactivity threshold", + description="Seconds of inactivity after which a camera will stop being shown in Birdseye.", + gt=0, ) layout: BirdseyeLayoutConfig = Field( - default_factory=BirdseyeLayoutConfig, title="Birdseye Layout Config" + default_factory=BirdseyeLayoutConfig, + title="Layout", + description="Layout options for the Birdseye composition.", ) idle_heartbeat_fps: float = Field( default=0.0, ge=0.0, le=10.0, - title="Idle heartbeat FPS (0 disables, max 10)", + title="Idle heartbeat FPS", + description="Frames-per-second to resend the last composed Birdseye frame when idle; set to 0 to disable.", ) # uses BaseModel because some global attributes are not available at the camera level class BirdseyeCameraConfig(BaseModel): - enabled: bool = Field(default=True, title="Enable birdseye view for camera.") + enabled: bool = Field( + default=True, + title="Enable Birdseye", + description="Enable or disable the Birdseye view feature.", + ) mode: BirdseyeModeEnum = Field( - default=BirdseyeModeEnum.objects, title="Tracking mode for camera." + default=BirdseyeModeEnum.objects, + title="Tracking mode", + description="Mode for including cameras in Birdseye: 'objects', 'motion', or 'continuous'.", ) - order: int = Field(default=0, title="Position of the camera in the birdseye view.") + order: int = Field( + default=0, + title="Position", + description="Numeric position controlling the camera's ordering in the Birdseye layout.", + ) diff --git a/frigate/config/camera/camera.py b/frigate/config/camera/camera.py index 0f2b1c8be..21397065b 100644 --- a/frigate/config/camera/camera.py +++ b/frigate/config/camera/camera.py @@ -50,10 +50,17 @@ class CameraTypeEnum(str, Enum): class CameraConfig(FrigateBaseModel): - name: Optional[str] = Field(None, title="Camera name.", pattern=REGEX_CAMERA_NAME) + name: Optional[str] = Field( + None, + title="Camera name", + description="Camera name is required", + pattern=REGEX_CAMERA_NAME, + ) friendly_name: Optional[str] = Field( - None, title="Camera friendly name used in the Frigate UI." + None, + title="Friendly name", + description="Camera friendly name used in the Frigate UI", ) @model_validator(mode="before") @@ -63,80 +70,129 @@ class CameraConfig(FrigateBaseModel): pass return values - enabled: bool = Field(default=True, title="Enable camera.") + enabled: bool = Field(default=True, title="Enabled", description="Enabled") # Options with global fallback audio: AudioConfig = Field( - default_factory=AudioConfig, title="Audio events configuration." + default_factory=AudioConfig, + title="Audio events", + description="Settings for audio-based event detection for this camera.", ) audio_transcription: CameraAudioTranscriptionConfig = Field( default_factory=CameraAudioTranscriptionConfig, - title="Audio transcription config.", + title="Audio transcription", + description="Settings for live and speech audio transcription used for events and live captions.", ) birdseye: BirdseyeCameraConfig = Field( - default_factory=BirdseyeCameraConfig, title="Birdseye camera configuration." + default_factory=BirdseyeCameraConfig, + title="Birdseye", + description="Settings for the Birdseye composite view that composes multiple camera feeds into a single layout.", ) detect: DetectConfig = Field( - default_factory=DetectConfig, title="Object detection configuration." + default_factory=DetectConfig, + title="Object Detection", + description="Settings for the detection/detect role used to run object detection and initialize trackers.", ) face_recognition: CameraFaceRecognitionConfig = Field( - default_factory=CameraFaceRecognitionConfig, title="Face recognition config." + default_factory=CameraFaceRecognitionConfig, + title="Face recognition", + description="Settings for face detection and recognition for this camera.", + ) + ffmpeg: CameraFfmpegConfig = Field( + title="FFmpeg", + description="FFmpeg settings including binary path, args, hwaccel options, and per-role output args.", ) - ffmpeg: CameraFfmpegConfig = Field(title="FFmpeg configuration for the camera.") live: CameraLiveConfig = Field( - default_factory=CameraLiveConfig, title="Live playback settings." + default_factory=CameraLiveConfig, + title="Live playback", + description="Settings used by the Web UI to control live stream selection, resolution and quality.", ) lpr: CameraLicensePlateRecognitionConfig = Field( - default_factory=CameraLicensePlateRecognitionConfig, title="LPR config." + default_factory=CameraLicensePlateRecognitionConfig, + title="License Plate Recognition", + description="License plate recognition settings including detection thresholds, formatting, and known plates.", + ) + motion: MotionConfig = Field( + None, + title="Motion detection", + description="Default motion detection settings for this camera.", ) - motion: MotionConfig = Field(None, title="Motion detection configuration.") objects: ObjectConfig = Field( - default_factory=ObjectConfig, title="Object configuration." + default_factory=ObjectConfig, + title="Objects", + description="Object tracking defaults including which labels to track and per-object filters.", ) record: RecordConfig = Field( - default_factory=RecordConfig, title="Record configuration." + default_factory=RecordConfig, + title="Recording", + description="Recording and retention settings for this camera.", ) review: ReviewConfig = Field( - default_factory=ReviewConfig, title="Review configuration." + default_factory=ReviewConfig, + title="Review", + description="Settings that control alerts, detections, and GenAI review summaries used by the UI and storage for this camera.", ) semantic_search: CameraSemanticSearchConfig = Field( default_factory=CameraSemanticSearchConfig, - title="Semantic search configuration.", + title="Semantic Search", + description="Settings for semantic search which builds and queries object embeddings to find similar items.", ) snapshots: SnapshotsConfig = Field( - default_factory=SnapshotsConfig, title="Snapshot configuration." + default_factory=SnapshotsConfig, + title="Snapshots", + description="Settings for saved JPEG snapshots of tracked objects for this camera.", ) timestamp_style: TimestampStyleConfig = Field( - default_factory=TimestampStyleConfig, title="Timestamp style configuration." + default_factory=TimestampStyleConfig, + title="Timestamp style", + description="Styling options for in-feed timestamps applied to recordings and snapshots.", ) # Options without global fallback best_image_timeout: int = Field( default=60, - title="How long to wait for the image with the highest confidence score.", + title="Best image timeout", + description="How long to wait for the image with the highest confidence score.", ) mqtt: CameraMqttConfig = Field( - default_factory=CameraMqttConfig, title="MQTT configuration." + default_factory=CameraMqttConfig, + title="MQTT", + description="MQTT image publishing settings.", ) notifications: NotificationConfig = Field( - default_factory=NotificationConfig, title="Notifications configuration." + default_factory=NotificationConfig, + title="Notifications", + description="Settings to enable and control notifications for this camera.", ) onvif: OnvifConfig = Field( - default_factory=OnvifConfig, title="Camera Onvif Configuration." + default_factory=OnvifConfig, + title="ONVIF", + description="ONVIF connection and PTZ autotracking settings for this camera.", + ) + type: CameraTypeEnum = Field( + default=CameraTypeEnum.generic, + title="Camera type", + description="Camera Type", ) - type: CameraTypeEnum = Field(default=CameraTypeEnum.generic, title="Camera Type") ui: CameraUiConfig = Field( - default_factory=CameraUiConfig, title="Camera UI Modifications." + default_factory=CameraUiConfig, + title="Camera UI", + description="Display ordering and visibility for this camera in the UI. Ordering affects the default dashboard. For more granular control, use camera groups.", ) webui_url: Optional[str] = Field( None, - title="URL to visit the camera directly from system page", + title="Camera URL", + description="URL to visit the camera directly from system page", ) zones: dict[str, ZoneConfig] = Field( - default_factory=dict, title="Zone configuration." + default_factory=dict, + title="Zones", + description="Zones allow you to define a specific area of the frame so you can determine whether or not an object is within a particular area.", ) enabled_in_config: Optional[bool] = Field( - default=None, title="Keep track of original state of camera." + default=None, + title="Original camera state", + description="Keep track of original state of camera.", ) _ffmpeg_cmds: list[dict[str, list[str]]] = PrivateAttr() diff --git a/frigate/config/camera/detect.py b/frigate/config/camera/detect.py index 1926f3254..19ba670a6 100644 --- a/frigate/config/camera/detect.py +++ b/frigate/config/camera/detect.py @@ -8,56 +8,82 @@ __all__ = ["DetectConfig", "StationaryConfig", "StationaryMaxFramesConfig"] class StationaryMaxFramesConfig(FrigateBaseModel): - default: Optional[int] = Field(default=None, title="Default max frames.", ge=1) + default: Optional[int] = Field( + default=None, + title="Default max frames", + description="Default maximum frames to track a stationary object before stopping.", + ge=1, + ) objects: dict[str, int] = Field( - default_factory=dict, title="Object specific max frames." + default_factory=dict, + title="Object max frames", + description="Per-object overrides for maximum frames to track stationary objects.", ) class StationaryConfig(FrigateBaseModel): interval: Optional[int] = Field( default=None, - title="Frame interval for checking stationary objects.", + title="Stationary interval", + description="How often (in frames) to run a detection check to confirm a stationary object.", gt=0, ) threshold: Optional[int] = Field( default=None, - title="Number of frames without a position change for an object to be considered stationary", + title="Stationary threshold", + description="Number of frames with no position change required to mark an object as stationary.", ge=1, ) max_frames: StationaryMaxFramesConfig = Field( default_factory=StationaryMaxFramesConfig, - title="Max frames for stationary objects.", + title="Max frames", + description="Limits how long stationary objects are tracked before being discarded.", ) classifier: bool = Field( default=True, - title="Enable visual classifier for determing if objects with jittery bounding boxes are stationary.", + title="Enable visual classifier", + description="Use a visual classifier to detect truly stationary objects even when bounding boxes jitter.", ) class DetectConfig(FrigateBaseModel): - enabled: bool = Field(default=False, title="Detection Enabled.") + enabled: bool = Field( + default=False, + title="Detection enabled", + description="Enable or disable object detection for all cameras; can be overridden per-camera. Detection must be enabled for object tracking to run.", + ) height: Optional[int] = Field( - default=None, title="Height of the stream for the detect role." + default=None, + title="Detect height", + description="Height (pixels) of frames used for the detect stream; leave empty to use the native stream resolution.", ) width: Optional[int] = Field( - default=None, title="Width of the stream for the detect role." + default=None, + title="Detect width", + description="Width (pixels) of frames used for the detect stream; leave empty to use the native stream resolution.", ) fps: int = Field( - default=5, title="Number of frames per second to process through detection." + default=5, + title="Detect FPS", + description="Desired frames per second to run detection on; lower values reduce CPU usage (recommended value is 5, only set higher - at most 10 - if tracking extremely fast moving objects).", ) min_initialized: Optional[int] = Field( default=None, - title="Minimum number of consecutive hits for an object to be initialized by the tracker.", + title="Minimum initialization frames", + description="Number of consecutive detection hits required before creating a tracked object. Increase to reduce false initializations. Default value is fps divided by 2.", ) max_disappeared: Optional[int] = Field( default=None, - title="Maximum number of frames the object can disappear before detection ends.", + title="Maximum disappeared frames", + description="Number of frames without a detection before a tracked object is considered gone.", ) stationary: StationaryConfig = Field( default_factory=StationaryConfig, - title="Stationary objects config.", + title="Stationary objects config", + description="Settings to detect and manage objects that remain stationary for a period of time.", ) annotation_offset: int = Field( - default=0, title="Milliseconds to offset detect annotations by." + default=0, + title="Annotation offset", + description="Milliseconds to shift detect annotations to better align timeline bounding boxes with recordings; can be positive or negative.", ) diff --git a/frigate/config/camera/ffmpeg.py b/frigate/config/camera/ffmpeg.py index 2c1e4cdca..05769dc66 100644 --- a/frigate/config/camera/ffmpeg.py +++ b/frigate/config/camera/ffmpeg.py @@ -35,39 +35,58 @@ DETECT_FFMPEG_OUTPUT_ARGS_DEFAULT = [ class FfmpegOutputArgsConfig(FrigateBaseModel): detect: Union[str, list[str]] = Field( default=DETECT_FFMPEG_OUTPUT_ARGS_DEFAULT, - title="Detect role FFmpeg output arguments.", + title="Detect output arguments", + description="Default output arguments for detect role streams.", ) record: Union[str, list[str]] = Field( default=RECORD_FFMPEG_OUTPUT_ARGS_DEFAULT, - title="Record role FFmpeg output arguments.", + title="Record output arguments", + description="Default output arguments for record role streams.", ) class FfmpegConfig(FrigateBaseModel): - path: str = Field(default="default", title="FFmpeg path") + path: str = Field( + default="default", + title="FFmpeg path", + description='Path to the FFmpeg binary to use or a version alias ("5.0" or "7.0").', + ) global_args: Union[str, list[str]] = Field( - default=FFMPEG_GLOBAL_ARGS_DEFAULT, title="Global FFmpeg arguments." + default=FFMPEG_GLOBAL_ARGS_DEFAULT, + title="FFmpeg global arguments", + description="Global arguments passed to FFmpeg processes.", ) hwaccel_args: Union[str, list[str]] = Field( - default="auto", title="FFmpeg hardware acceleration arguments." + default="auto", + title="Hardware acceleration arguments", + description="Hardware acceleration arguments for FFmpeg. Provider-specific presets are recommended.", ) input_args: Union[str, list[str]] = Field( - default=FFMPEG_INPUT_ARGS_DEFAULT, title="FFmpeg input arguments." + default=FFMPEG_INPUT_ARGS_DEFAULT, + title="Input arguments", + description="Input arguments applied to FFmpeg input streams.", ) output_args: FfmpegOutputArgsConfig = Field( default_factory=FfmpegOutputArgsConfig, - title="FFmpeg output arguments per role.", + title="Output arguments", + description="Default output arguments used for different FFmpeg roles such as detect and record.", ) retry_interval: float = Field( default=10.0, - title="Time in seconds to wait before FFmpeg retries connecting to the camera.", + title="FFmpeg retry time", + description="Seconds to wait before attempting to reconnect a camera stream after failure. Default is 10.", gt=0.0, ) apple_compatibility: bool = Field( default=False, - title="Set tag on HEVC (H.265) recording stream to improve compatibility with Apple players.", + title="Apple compatibility", + description="Enable HEVC tagging for better Apple player compatibility when recording H.265.", + ) + gpu: int = Field( + default=0, + title="GPU index", + description="Default GPU index used for hardware acceleration if available.", ) - gpu: int = Field(default=0, title="GPU index to use for hardware acceleration.") @property def ffmpeg_path(self) -> str: @@ -95,21 +114,36 @@ class CameraRoleEnum(str, Enum): class CameraInput(FrigateBaseModel): - path: EnvString = Field(title="Camera input path.") - roles: list[CameraRoleEnum] = Field(title="Roles assigned to this input.") + path: EnvString = Field( + title="Input path", + description="Camera input stream URL or path.", + ) + roles: list[CameraRoleEnum] = Field( + title="Input roles", + description="Roles for this input stream.", + ) global_args: Union[str, list[str]] = Field( - default_factory=list, title="FFmpeg global arguments." + default_factory=list, + title="FFmpeg global arguments", + description="FFmpeg global arguments for this input stream.", ) hwaccel_args: Union[str, list[str]] = Field( - default_factory=list, title="FFmpeg hardware acceleration arguments." + default_factory=list, + title="Hardware acceleration arguments", + description="Hardware acceleration arguments for this input stream.", ) input_args: Union[str, list[str]] = Field( - default_factory=list, title="FFmpeg input arguments." + default_factory=list, + title="Input arguments", + description="Input arguments specific to this stream.", ) class CameraFfmpegConfig(FfmpegConfig): - inputs: list[CameraInput] = Field(title="Camera inputs.") + inputs: list[CameraInput] = Field( + title="Camera inputs", + description="List of input stream definitions (paths and roles) for this camera.", + ) @field_validator("inputs") @classmethod diff --git a/frigate/config/camera/genai.py b/frigate/config/camera/genai.py index 56d7322f5..fae0ae577 100644 --- a/frigate/config/camera/genai.py +++ b/frigate/config/camera/genai.py @@ -67,6 +67,3 @@ class GenAIConfig(FrigateBaseModel): description="Runtime options passed to the provider for each inference call.", json_schema_extra={"additionalProperties": {"type": "string"}}, ) - runtime_options: dict[str, Any] = Field( - default={}, title="Options to pass during inference calls." - ) diff --git a/frigate/config/camera/live.py b/frigate/config/camera/live.py index 13ae2d04f..54b5a2bfd 100644 --- a/frigate/config/camera/live.py +++ b/frigate/config/camera/live.py @@ -10,7 +10,18 @@ __all__ = ["CameraLiveConfig"] class CameraLiveConfig(FrigateBaseModel): streams: Dict[str, str] = Field( default_factory=list, - title="Friendly names and restream names to use for live view.", + title="Live stream names", + description="Mapping of configured stream names to restream/go2rtc names used for live playback.", + ) + height: int = Field( + default=720, + title="Live height", + description="Height (pixels) to render the jsmpeg live stream in the Web UI; must be <= detect stream height.", + ) + quality: int = Field( + default=8, + ge=1, + le=31, + title="Live quality", + description="Encoding quality for the jsmpeg stream (1 highest, 31 lowest).", ) - height: int = Field(default=720, title="Live camera view height") - quality: int = Field(default=8, ge=1, le=31, title="Live camera view quality") diff --git a/frigate/config/camera/motion.py b/frigate/config/camera/motion.py index 65c03f731..d39130108 100644 --- a/frigate/config/camera/motion.py +++ b/frigate/config/camera/motion.py @@ -8,30 +8,64 @@ __all__ = ["MotionConfig"] class MotionConfig(FrigateBaseModel): - enabled: bool = Field(default=True, title="Enable motion on all cameras.") + enabled: bool = Field( + default=True, + title="Enable motion detection", + description="Enable or disable motion detection for all cameras; can be overridden per-camera.", + ) threshold: int = Field( default=30, - title="Motion detection threshold (1-255).", + title="Motion threshold", + description="Pixel difference threshold used by the motion detector; higher values reduce sensitivity (range 1-255).", ge=1, le=255, ) lightning_threshold: float = Field( - default=0.8, title="Lightning detection threshold (0.3-1.0).", ge=0.3, le=1.0 + default=0.8, + title="Lightning threshold", + description="Threshold to detect and ignore brief lighting spikes (lower is more sensitive, values between 0.3 and 1.0).", + ge=0.3, + le=1.0, + ) + improve_contrast: bool = Field( + default=True, + title="Improve contrast", + description="Apply contrast improvement to frames before motion analysis to help detection.", + ) + contour_area: Optional[int] = Field( + default=10, + title="Contour area", + description="Minimum contour area in pixels required for a motion contour to be counted.", + ) + delta_alpha: float = Field( + default=0.2, + title="Delta alpha", + description="Alpha blending factor used in frame differencing for motion calculation.", + ) + frame_alpha: float = Field( + default=0.01, + title="Frame alpha", + description="Alpha value used when blending frames for motion preprocessing.", + ) + frame_height: Optional[int] = Field( + default=100, + title="Frame height", + description="Height in pixels to scale frames to when computing motion.", ) - improve_contrast: bool = Field(default=True, title="Improve Contrast") - contour_area: Optional[int] = Field(default=10, title="Contour Area") - delta_alpha: float = Field(default=0.2, title="Delta Alpha") - frame_alpha: float = Field(default=0.01, title="Frame Alpha") - frame_height: Optional[int] = Field(default=100, title="Frame Height") mask: Union[str, list[str]] = Field( - default="", title="Coordinates polygon for the motion mask." + default="", + title="Mask coordinates", + description="Ordered x,y coordinates defining the motion mask polygon used to include/exclude areas.", ) mqtt_off_delay: int = Field( default=30, - title="Delay for updating MQTT with no motion detected.", + title="MQTT off delay", + description="Seconds to wait after last motion before publishing an MQTT 'off' state.", ) enabled_in_config: Optional[bool] = Field( - default=None, title="Keep track of original state of motion detection." + default=None, + title="Original motion state", + description="Indicates whether motion detection was enabled in the original static configuration.", ) raw_mask: Union[str, list[str]] = "" diff --git a/frigate/config/camera/mqtt.py b/frigate/config/camera/mqtt.py index 132fee059..5f8da1a73 100644 --- a/frigate/config/camera/mqtt.py +++ b/frigate/config/camera/mqtt.py @@ -6,18 +6,40 @@ __all__ = ["CameraMqttConfig"] class CameraMqttConfig(FrigateBaseModel): - enabled: bool = Field(default=True, title="Send image over MQTT.") - timestamp: bool = Field(default=True, title="Add timestamp to MQTT image.") - bounding_box: bool = Field(default=True, title="Add bounding box to MQTT image.") - crop: bool = Field(default=True, title="Crop MQTT image to detected object.") - height: int = Field(default=270, title="MQTT image height.") + enabled: bool = Field( + default=True, + title="Send image", + description="Enable publishing image snapshots for objects to MQTT topics for this camera.", + ) + timestamp: bool = Field( + default=True, + title="Add timestamp", + description="Overlay a timestamp on images published to MQTT.", + ) + bounding_box: bool = Field( + default=True, + title="Add bounding box", + description="Draw bounding boxes on images published over MQTT.", + ) + crop: bool = Field( + default=True, + title="Crop image", + description="Crop images published to MQTT to the detected object's bounding box.", + ) + height: int = Field( + default=270, + title="Image height", + description="Height (pixels) to resize images published over MQTT.", + ) required_zones: list[str] = Field( default_factory=list, - title="List of required zones to be entered in order to send the image.", + title="Required zones", + description="Zones that an object must enter for an MQTT image to be published.", ) quality: int = Field( default=70, - title="Quality of the encoded jpeg (0-100).", + title="JPEG quality", + description="JPEG quality for images published to MQTT (0-100).", ge=0, le=100, ) diff --git a/frigate/config/camera/notification.py b/frigate/config/camera/notification.py index ce1ac8223..dabf94675 100644 --- a/frigate/config/camera/notification.py +++ b/frigate/config/camera/notification.py @@ -8,11 +8,24 @@ __all__ = ["NotificationConfig"] class NotificationConfig(FrigateBaseModel): - enabled: bool = Field(default=False, title="Enable notifications") - email: Optional[str] = Field(default=None, title="Email required for push.") + enabled: bool = Field( + default=False, + title="Enable notifications", + description="Enable or disable notifications for all cameras; can be overridden per-camera.", + ) + email: Optional[str] = Field( + default=None, + title="Notification email", + description="Email address used for push notifications or required by certain notification providers.", + ) cooldown: int = Field( - default=0, ge=0, title="Cooldown period for notifications (time in seconds)." + default=0, + ge=0, + title="Cooldown period", + description="Cooldown (seconds) between notifications to avoid spamming recipients.", ) enabled_in_config: Optional[bool] = Field( - default=None, title="Keep track of original state of notifications." + default=None, + title="Original notifications state", + description="Indicates whether notifications were enabled in the original static configuration.", ) diff --git a/frigate/config/camera/objects.py b/frigate/config/camera/objects.py index 7b6317dd0..97a4d5b7c 100644 --- a/frigate/config/camera/objects.py +++ b/frigate/config/camera/objects.py @@ -13,30 +13,38 @@ DEFAULT_TRACKED_OBJECTS = ["person"] class FilterConfig(FrigateBaseModel): min_area: Union[int, float] = Field( default=0, - title="Minimum area of bounding box for object to be counted. Can be pixels (int) or percentage (float between 0.000001 and 0.99).", + title="Minimum object area", + description="Minimum bounding box area (pixels or percentage) required for this object type. Can be pixels (int) or percentage (float between 0.000001 and 0.99).", ) max_area: Union[int, float] = Field( default=24000000, - title="Maximum area of bounding box for object to be counted. Can be pixels (int) or percentage (float between 0.000001 and 0.99).", + title="Maximum object area", + description="Maximum bounding box area (pixels or percentage) allowed for this object type. Can be pixels (int) or percentage (float between 0.000001 and 0.99).", ) min_ratio: float = Field( default=0, - title="Minimum ratio of bounding box's width/height for object to be counted.", + title="Minimum aspect ratio", + description="Minimum width/height ratio required for the bounding box to qualify.", ) max_ratio: float = Field( default=24000000, - title="Maximum ratio of bounding box's width/height for object to be counted.", + title="Maximum aspect ratio", + description="Maximum width/height ratio allowed for the bounding box to qualify.", ) threshold: float = Field( default=0.7, - title="Average detection confidence threshold for object to be counted.", + title="Confidence threshold", + description="Average detection confidence threshold required for the object to be considered a true positive.", ) min_score: float = Field( - default=0.5, title="Minimum detection confidence for object to be counted." + default=0.5, + title="Minimum confidence", + description="Minimum single-frame detection confidence required for the object to be counted.", ) mask: Optional[Union[str, list[str]]] = Field( default=None, - title="Detection area polygon mask for this filter configuration.", + title="Filter mask", + description="Polygon coordinates defining where this filter applies within the frame.", ) raw_mask: Union[str, list[str]] = "" @@ -51,46 +59,64 @@ class FilterConfig(FrigateBaseModel): class GenAIObjectTriggerConfig(FrigateBaseModel): tracked_object_end: bool = Field( - default=True, title="Send once the object is no longer tracked." + default=True, + title="Send on end", + description="Send a request to GenAI when the tracked object ends.", ) after_significant_updates: Optional[int] = Field( default=None, - title="Send an early request to generative AI when X frames accumulated.", + title="Early GenAI trigger", + description="Send a request to GenAI after a specified number of significant updates for the tracked object.", ge=1, ) class GenAIObjectConfig(FrigateBaseModel): - enabled: bool = Field(default=False, title="Enable GenAI for camera.") + enabled: bool = Field( + default=False, + title="Enable GenAI", + description="Enable GenAI generation of descriptions for tracked objects by default.", + ) use_snapshot: bool = Field( - default=False, title="Use snapshots for generating descriptions." + default=False, + title="Use snapshots", + description="Use object snapshots instead of thumbnails for GenAI description generation.", ) prompt: str = Field( default="Analyze the sequence of images containing the {label}. Focus on the likely intent or behavior of the {label} based on its actions and movement, rather than describing its appearance or the surroundings. Consider what the {label} is doing, why, and what it might do next.", - title="Default caption prompt.", + title="Caption prompt", + description="Default prompt template used when generating descriptions with GenAI.", ) object_prompts: dict[str, str] = Field( - default_factory=dict, title="Object specific prompts." + default_factory=dict, + title="Object prompts", + description="Per-object prompts to customize GenAI outputs for specific labels.", ) objects: Union[str, list[str]] = Field( default_factory=list, - title="List of objects to run generative AI for.", + title="GenAI objects", + description="List of object labels to send to GenAI by default.", ) required_zones: Union[str, list[str]] = Field( default_factory=list, - title="List of required zones to be entered in order to run generative AI.", + title="Required zones", + description="Zones that must be entered for objects to qualify for GenAI description generation.", ) debug_save_thumbnails: bool = Field( default=False, - title="Save thumbnails sent to generative AI for debugging purposes.", + title="Save thumbnails", + description="Save thumbnails sent to GenAI for debugging and review.", ) send_triggers: GenAIObjectTriggerConfig = Field( default_factory=GenAIObjectTriggerConfig, - title="What triggers to use to send frames to generative AI for a tracked object.", + title="GenAI triggers", + description="Defines when frames should be sent to GenAI (on end, after updates, etc.).", ) enabled_in_config: Optional[bool] = Field( - default=None, title="Keep track of original state of generative AI." + default=None, + title="Original GenAI state", + description="Indicates whether GenAI was enabled in the original static config.", ) @field_validator("required_zones", mode="before") @@ -103,14 +129,25 @@ class GenAIObjectConfig(FrigateBaseModel): class ObjectConfig(FrigateBaseModel): - track: list[str] = Field(default=DEFAULT_TRACKED_OBJECTS, title="Objects to track.") - filters: dict[str, FilterConfig] = Field( - default_factory=dict, title="Object filters." + track: list[str] = Field( + default=DEFAULT_TRACKED_OBJECTS, + title="Objects to track", + description="List of object labels to track for all cameras; can be overridden per-camera.", + ) + filters: dict[str, FilterConfig] = Field( + default_factory=dict, + title="Object filters", + description="Filters applied to detected objects to reduce false positives (area, ratio, confidence).", + ) + mask: Union[str, list[str]] = Field( + default="", + title="Object mask", + description="Mask polygon used to prevent object detection in specified areas.", ) - mask: Union[str, list[str]] = Field(default="", title="Object mask.") genai: GenAIObjectConfig = Field( default_factory=GenAIObjectConfig, - title="Config for using genai to analyze objects.", + title="GenAI object config", + description="GenAI options for describing tracked objects and sending frames for generation.", ) _all_objects: list[str] = PrivateAttr() diff --git a/frigate/config/camera/onvif.py b/frigate/config/camera/onvif.py index d4955799b..c5f1e19f3 100644 --- a/frigate/config/camera/onvif.py +++ b/frigate/config/camera/onvif.py @@ -17,37 +17,57 @@ class ZoomingModeEnum(str, Enum): class PtzAutotrackConfig(FrigateBaseModel): - enabled: bool = Field(default=False, title="Enable PTZ object autotracking.") + enabled: bool = Field( + default=False, + title="Enable Autotracking", + description="Enable or disable automatic PTZ camera tracking of detected objects.", + ) calibrate_on_startup: bool = Field( - default=False, title="Perform a camera calibration when Frigate starts." + default=False, + title="Calibrate on start", + description="Measure PTZ motor speeds on startup to improve tracking accuracy. Frigate will update config with movement_weights after calibration.", ) zooming: ZoomingModeEnum = Field( - default=ZoomingModeEnum.disabled, title="Autotracker zooming mode." + default=ZoomingModeEnum.disabled, + title="Zoom mode", + description="Control zoom behavior: disabled (pan/tilt only), absolute (most compatible), or relative (concurrent pan/tilt/zoom).", ) zoom_factor: float = Field( default=0.3, - title="Zooming factor (0.1-0.75).", + title="Zoom factor", + description="Control zoom level on tracked objects. Lower values keep more scene in view; higher values zoom in closer but may lose tracking. Values between 0.1 and 0.75.", ge=0.1, le=0.75, ) - track: list[str] = Field(default=DEFAULT_TRACKED_OBJECTS, title="Objects to track.") + track: list[str] = Field( + default=DEFAULT_TRACKED_OBJECTS, + title="Tracked objects", + description="List of object types that should trigger autotracking.", + ) required_zones: list[str] = Field( default_factory=list, - title="List of required zones to be entered in order to begin autotracking.", + title="Required zones", + description="Objects must enter one of these zones before autotracking begins.", ) return_preset: str = Field( default="home", - title="Name of camera preset to return to when object tracking is over.", + title="Return preset", + description="ONVIF preset name configured in camera firmware to return to after tracking ends.", ) timeout: int = Field( - default=10, title="Seconds to delay before returning to preset." + default=10, + title="Return timeout", + description="Wait this many seconds after losing tracking before returning camera to preset position.", ) movement_weights: Optional[Union[str, list[str]]] = Field( default_factory=list, - title="Internal value used for PTZ movements based on the speed of your camera's motor.", + title="Movement weights", + description="Calibration values automatically generated by camera calibration. Do not modify manually.", ) enabled_in_config: Optional[bool] = Field( - default=None, title="Keep track of original state of autotracking." + default=None, + title="Original autotrack state", + description="Internal field to track whether autotracking was enabled in configuration.", ) @field_validator("movement_weights", mode="before") @@ -72,16 +92,38 @@ class PtzAutotrackConfig(FrigateBaseModel): class OnvifConfig(FrigateBaseModel): - host: str = Field(default="", title="Onvif Host") - port: int = Field(default=8000, title="Onvif Port") - user: Optional[EnvString] = Field(default=None, title="Onvif Username") - password: Optional[EnvString] = Field(default=None, title="Onvif Password") - tls_insecure: bool = Field(default=False, title="Onvif Disable TLS verification") + host: str = Field( + default="", + title="ONVIF host", + description="Host (and optional scheme) for the ONVIF service for this camera.", + ) + port: int = Field( + default=8000, + title="ONVIF port", + description="Port number for the ONVIF service.", + ) + user: Optional[EnvString] = Field( + default=None, + title="ONVIF username", + description="Username for ONVIF authentication; some devices require admin user for ONVIF.", + ) + password: Optional[EnvString] = Field( + default=None, + title="ONVIF password", + description="Password for ONVIF authentication.", + ) + tls_insecure: bool = Field( + default=False, + title="Disable TLS verify", + description="Skip TLS verification and disable digest auth for ONVIF (unsafe; use in safe networks only).", + ) autotracking: PtzAutotrackConfig = Field( default_factory=PtzAutotrackConfig, - title="PTZ auto tracking config.", + title="Autotracking", + description="Automatically track moving objects and keep them centered in the frame using PTZ camera movements.", ) ignore_time_mismatch: bool = Field( default=False, - title="Onvif Ignore Time Synchronization Mismatch Between Camera and Server", + title="Ignore time mismatch", + description="Ignore time synchronization differences between camera and Frigate server for ONVIF communication.", ) diff --git a/frigate/config/camera/record.py b/frigate/config/camera/record.py index fe24cf522..7eae7500d 100644 --- a/frigate/config/camera/record.py +++ b/frigate/config/camera/record.py @@ -21,7 +21,12 @@ __all__ = [ class RecordRetainConfig(FrigateBaseModel): - days: float = Field(default=0, ge=0, title="Default retention period.") + days: float = Field( + default=0, + ge=0, + title="Retention days", + description="Days to retain recordings.", + ) class RetainModeEnum(str, Enum): @@ -31,22 +36,37 @@ class RetainModeEnum(str, Enum): class ReviewRetainConfig(FrigateBaseModel): - days: float = Field(default=10, ge=0, title="Default retention period.") - mode: RetainModeEnum = Field(default=RetainModeEnum.motion, title="Retain mode.") + days: float = Field( + default=10, + ge=0, + title="Retention days", + description="Number of days to retain recordings of detection events.", + ) + mode: RetainModeEnum = Field( + default=RetainModeEnum.motion, + title="Retention mode", + description="Mode for retention: all (save all segments), motion (save segments with motion), or active_objects (save segments with active objects).", + ) class EventsConfig(FrigateBaseModel): pre_capture: int = Field( default=5, - title="Seconds to retain before event starts.", + title="Pre-capture seconds", + description="Number of seconds before the detection event to include in the recording.", le=MAX_PRE_CAPTURE, ge=0, ) post_capture: int = Field( - default=5, ge=0, title="Seconds to retain after event ends." + default=5, + ge=0, + title="Post-capture seconds", + description="Number of seconds after the detection event to include in the recording.", ) retain: ReviewRetainConfig = Field( - default_factory=ReviewRetainConfig, title="Event retention settings." + default_factory=ReviewRetainConfig, + title="Event retention", + description="Retention settings for recordings of detection events.", ) @@ -60,43 +80,65 @@ class RecordQualityEnum(str, Enum): class RecordPreviewConfig(FrigateBaseModel): quality: RecordQualityEnum = Field( - default=RecordQualityEnum.medium, title="Quality of recording preview." + default=RecordQualityEnum.medium, + title="Preview quality", + description="Preview quality level (very_low, low, medium, high, very_high).", ) class RecordExportConfig(FrigateBaseModel): hwaccel_args: Union[str, list[str]] = Field( - default="auto", title="Export-specific FFmpeg hardware acceleration arguments." + default="auto", + title="Export hwaccel args", + description="Hardware acceleration args to use for export/transcode operations.", ) class RecordConfig(FrigateBaseModel): - enabled: bool = Field(default=False, title="Enable record on all cameras.") + enabled: bool = Field( + default=False, + title="Enable recording", + description="Enable or disable recording for all cameras; can be overridden per-camera.", + ) expire_interval: int = Field( default=60, - title="Number of minutes to wait between cleanup runs.", + title="Record cleanup interval", + description="Minutes between cleanup passes that remove expired recording segments.", ) continuous: RecordRetainConfig = Field( default_factory=RecordRetainConfig, - title="Continuous recording retention settings.", + title="Continuous retention", + description="Number of days to retain recordings regardless of tracked objects or motion. Set to 0 if you only want to retain recordings of alerts and detections.", ) motion: RecordRetainConfig = Field( - default_factory=RecordRetainConfig, title="Motion recording retention settings." + default_factory=RecordRetainConfig, + title="Motion retention", + description="Number of days to retain recordings triggered by motion regardless of tracked objects. Set to 0 if you only want to retain recordings of alerts and detections.", ) detections: EventsConfig = Field( - default_factory=EventsConfig, title="Detection specific retention settings." + default_factory=EventsConfig, + title="Detection retention", + description="Recording retention settings for detection events including pre/post capture durations.", ) alerts: EventsConfig = Field( - default_factory=EventsConfig, title="Alert specific retention settings." + default_factory=EventsConfig, + title="Alert retention", + description="Recording retention settings for alert events including pre/post capture durations.", ) export: RecordExportConfig = Field( - default_factory=RecordExportConfig, title="Recording Export Config" + default_factory=RecordExportConfig, + title="Export config", + description="Settings used when exporting recordings such as timelapse and hardware acceleration.", ) preview: RecordPreviewConfig = Field( - default_factory=RecordPreviewConfig, title="Recording Preview Config" + default_factory=RecordPreviewConfig, + title="Preview config", + description="Settings controlling the quality of recording previews shown in the UI.", ) enabled_in_config: Optional[bool] = Field( - default=None, title="Keep track of original state of recording." + default=None, + title="Original recording state", + description="Indicates whether recording was enabled in the original static configuration.", ) @property diff --git a/frigate/config/camera/review.py b/frigate/config/camera/review.py index 6e55b6242..ff07fb368 100644 --- a/frigate/config/camera/review.py +++ b/frigate/config/camera/review.py @@ -21,22 +21,32 @@ DEFAULT_ALERT_OBJECTS = ["person", "car"] class AlertsConfig(FrigateBaseModel): """Configure alerts""" - enabled: bool = Field(default=True, title="Enable alerts.") + enabled: bool = Field( + default=True, + title="Enable alerts", + description="Enable or disable alert generation for all cameras; can be overridden per-camera.", + ) labels: list[str] = Field( - default=DEFAULT_ALERT_OBJECTS, title="Labels to create alerts for." + default=DEFAULT_ALERT_OBJECTS, + title="Alert labels", + description="List of object labels that qualify as alerts (for example: car, person).", ) required_zones: Union[str, list[str]] = Field( default_factory=list, - title="List of required zones to be entered in order to save the event as an alert.", + title="Required zones", + description="Zones that an object must enter to be considered an alert; leave empty to allow any zone.", ) enabled_in_config: Optional[bool] = Field( - default=None, title="Keep track of original state of alerts." + default=None, + title="Original alerts state", + description="Tracks whether alerts were originally enabled in the static configuration.", ) cutoff_time: int = Field( default=40, - title="Time to cutoff alerts after no alert-causing activity has occurred.", + title="Alerts cutoff time", + description="Seconds to wait after no alert-causing activity before cutting off an alert.", ) @field_validator("required_zones", mode="before") @@ -51,22 +61,32 @@ class AlertsConfig(FrigateBaseModel): class DetectionsConfig(FrigateBaseModel): """Configure detections""" - enabled: bool = Field(default=True, title="Enable detections.") + enabled: bool = Field( + default=True, + title="Enable detections", + description="Enable or disable detection events for all cameras; can be overridden per-camera.", + ) labels: Optional[list[str]] = Field( - default=None, title="Labels to create detections for." + default=None, + title="Detection labels", + description="List of object labels that qualify as detection events.", ) required_zones: Union[str, list[str]] = Field( default_factory=list, - title="List of required zones to be entered in order to save the event as a detection.", + title="Required zones", + description="Zones that an object must enter to be considered a detection; leave empty to allow any zone.", ) cutoff_time: int = Field( default=30, - title="Time to cutoff detection after no detection-causing activity has occurred.", + title="Detections cutoff time", + description="Seconds to wait after no detection-causing activity before cutting off a detection.", ) enabled_in_config: Optional[bool] = Field( - default=None, title="Keep track of original state of detections." + default=None, + title="Original detections state", + description="Tracks whether detections were originally enabled in the static configuration.", ) @field_validator("required_zones", mode="before") @@ -81,27 +101,42 @@ class DetectionsConfig(FrigateBaseModel): class GenAIReviewConfig(FrigateBaseModel): enabled: bool = Field( default=False, - title="Enable GenAI descriptions for review items.", + title="Enable GenAI descriptions", + description="Enable or disable GenAI-generated descriptions and summaries for review items.", + ) + alerts: bool = Field( + default=True, + title="Enable GenAI for alerts", + description="Use GenAI to generate descriptions for alert items.", + ) + detections: bool = Field( + default=False, + title="Enable GenAI for detections", + description="Use GenAI to generate descriptions for detection items.", ) - alerts: bool = Field(default=True, title="Enable GenAI for alerts.") - detections: bool = Field(default=False, title="Enable GenAI for detections.") image_source: ImageSourceEnum = Field( default=ImageSourceEnum.preview, - title="Image source for review descriptions.", + title="Review image source", + description="Source of images sent to GenAI ('preview' or 'recordings'); 'recordings' uses higher quality frames but more tokens.", ) additional_concerns: list[str] = Field( default=[], - title="Additional concerns that GenAI should make note of on this camera.", + title="Additional concerns", + description="A list of additional concerns or notes the GenAI should consider when evaluating activity on this camera.", ) debug_save_thumbnails: bool = Field( default=False, - title="Save thumbnails sent to generative AI for debugging purposes.", + title="Save thumbnails", + description="Save thumbnails that are sent to the GenAI provider for debugging and review.", ) enabled_in_config: Optional[bool] = Field( - default=None, title="Keep track of original state of generative AI." + default=None, + title="Original GenAI state", + description="Tracks whether GenAI review was originally enabled in the static configuration.", ) preferred_language: str | None = Field( - title="Preferred language for GenAI Response", + title="Preferred language", + description="Preferred language to request from the GenAI provider for generated responses.", default=None, ) activity_context_prompt: str = Field( @@ -139,19 +174,24 @@ Evaluate in this order: 3. **Escalate to Level 2 if:** Weapons, break-in tools, forced entry in progress, violence, or active property damage visible (escalates from Level 0 or 1) The mere presence of an unidentified person in private areas during late night hours is inherently suspicious and warrants human review, regardless of what activity they appear to be doing or how brief the sequence is.""", - title="Custom activity context prompt defining normal and suspicious activity patterns for this property.", + title="Activity context prompt", + description="Custom prompt describing what is and is not suspicious activity to provide context for GenAI summaries.", ) class ReviewConfig(FrigateBaseModel): - """Configure reviews""" - alerts: AlertsConfig = Field( - default_factory=AlertsConfig, title="Review alerts config." + default_factory=AlertsConfig, + title="Alerts config", + description="Settings for which tracked objects generate alerts and how alerts are retained.", ) detections: DetectionsConfig = Field( - default_factory=DetectionsConfig, title="Review detections config." + default_factory=DetectionsConfig, + title="Detections config", + description="Settings for creating detection events (non-alert) and how long to keep them.", ) genai: GenAIReviewConfig = Field( - default_factory=GenAIReviewConfig, title="Review description genai config." + default_factory=GenAIReviewConfig, + title="GenAI config", + description="Controls use of generative AI for producing descriptions and summaries of review items.", ) diff --git a/frigate/config/camera/snapshots.py b/frigate/config/camera/snapshots.py index 156b56a7e..c367aad8e 100644 --- a/frigate/config/camera/snapshots.py +++ b/frigate/config/camera/snapshots.py @@ -9,36 +9,68 @@ __all__ = ["SnapshotsConfig", "RetainConfig"] class RetainConfig(FrigateBaseModel): - default: float = Field(default=10, title="Default retention period.") - mode: RetainModeEnum = Field(default=RetainModeEnum.motion, title="Retain mode.") + default: float = Field( + default=10, + title="Default retention", + description="Default number of days to retain snapshots.", + ) + mode: RetainModeEnum = Field( + default=RetainModeEnum.motion, + title="Retention mode", + description="Mode for retention: all (save all segments), motion (save segments with motion), or active_objects (save segments with active objects).", + ) objects: dict[str, float] = Field( - default_factory=dict, title="Object retention period." + default_factory=dict, + title="Object retention", + description="Per-object overrides for snapshot retention days.", ) class SnapshotsConfig(FrigateBaseModel): - enabled: bool = Field(default=False, title="Snapshots enabled.") + enabled: bool = Field( + default=False, + title="Snapshots enabled", + description="Enable or disable saving snapshots for all cameras; can be overridden per-camera.", + ) clean_copy: bool = Field( - default=True, title="Create a clean copy of the snapshot image." + default=True, + title="Save clean copy", + description="Save an unannotated clean copy of snapshots in addition to annotated ones.", ) timestamp: bool = Field( - default=False, title="Add a timestamp overlay on the snapshot." + default=False, + title="Timestamp overlay", + description="Overlay a timestamp on saved snapshots.", ) bounding_box: bool = Field( - default=True, title="Add a bounding box overlay on the snapshot." + default=True, + title="Bounding box overlay", + description="Draw bounding boxes for tracked objects on saved snapshots.", + ) + crop: bool = Field( + default=False, + title="Crop snapshot", + description="Crop saved snapshots to the detected object's bounding box.", ) - crop: bool = Field(default=False, title="Crop the snapshot to the detected object.") required_zones: list[str] = Field( default_factory=list, - title="List of required zones to be entered in order to save a snapshot.", + title="Required zones", + description="Zones an object must enter for a snapshot to be saved.", + ) + height: Optional[int] = Field( + default=None, + title="Snapshot height", + description="Height (pixels) to resize saved snapshots to; leave empty to preserve original size.", ) - height: Optional[int] = Field(default=None, title="Snapshot image height.") retain: RetainConfig = Field( - default_factory=RetainConfig, title="Snapshot retention." + default_factory=RetainConfig, + title="Snapshot retention", + description="Retention settings for saved snapshots including default days and per-object overrides.", ) quality: int = Field( default=70, - title="Quality of the encoded jpeg (0-100).", + title="JPEG quality", + description="JPEG encode quality for saved snapshots (0-100).", ge=0, le=100, ) diff --git a/frigate/config/camera/timestamp.py b/frigate/config/camera/timestamp.py index fcf352a9b..48ec8240b 100644 --- a/frigate/config/camera/timestamp.py +++ b/frigate/config/camera/timestamp.py @@ -27,9 +27,27 @@ class TimestampPositionEnum(str, Enum): class ColorConfig(FrigateBaseModel): - red: int = Field(default=255, ge=0, le=255, title="Red") - green: int = Field(default=255, ge=0, le=255, title="Green") - blue: int = Field(default=255, ge=0, le=255, title="Blue") + red: int = Field( + default=255, + ge=0, + le=255, + title="Red", + description="Red component (0-255) for timestamp color.", + ) + green: int = Field( + default=255, + ge=0, + le=255, + title="Green", + description="Green component (0-255) for timestamp color.", + ) + blue: int = Field( + default=255, + ge=0, + le=255, + title="Blue", + description="Blue component (0-255) for timestamp color.", + ) class TimestampEffectEnum(str, Enum): @@ -39,11 +57,27 @@ class TimestampEffectEnum(str, Enum): class TimestampStyleConfig(FrigateBaseModel): position: TimestampPositionEnum = Field( - default=TimestampPositionEnum.tl, title="Timestamp position." + default=TimestampPositionEnum.tl, + title="Timestamp position", + description="Position of the timestamp on the image (tl/tr/bl/br).", + ) + format: str = Field( + default=DEFAULT_TIME_FORMAT, + title="Timestamp format", + description="Datetime format string used for timestamps (Python datetime format codes).", + ) + color: ColorConfig = Field( + default_factory=ColorConfig, + title="Timestamp color", + description="RGB color values for the timestamp text (all values 0-255).", + ) + thickness: int = Field( + default=2, + title="Timestamp thickness", + description="Line thickness of the timestamp text.", ) - format: str = Field(default=DEFAULT_TIME_FORMAT, title="Timestamp format.") - color: ColorConfig = Field(default_factory=ColorConfig, title="Timestamp color.") - thickness: int = Field(default=2, title="Timestamp thickness.") effect: Optional[TimestampEffectEnum] = Field( - default=None, title="Timestamp effect." + default=None, + title="Timestamp effect", + description="Visual effect for the timestamp text (none, solid, shadow).", ) diff --git a/frigate/config/camera/ui.py b/frigate/config/camera/ui.py index b6b9c58ad..5e903b254 100644 --- a/frigate/config/camera/ui.py +++ b/frigate/config/camera/ui.py @@ -6,7 +6,13 @@ __all__ = ["CameraUiConfig"] class CameraUiConfig(FrigateBaseModel): - order: int = Field(default=0, title="Order of camera in UI.") - dashboard: bool = Field( - default=True, title="Show this camera in Frigate dashboard UI." + order: int = Field( + default=0, + title="UI order", + description="Numeric order used to sort the camera in the UI (default dashboard and lists); larger numbers appear later.", + ) + dashboard: bool = Field( + default=True, + title="Show in UI", + description="Toggle whether this camera is visible everywhere in the Frigate UI. Disabling this will require manually editing the config to view this camera in the UI again.", ) diff --git a/frigate/config/camera/zone.py b/frigate/config/camera/zone.py index 7df1a1f25..1ee25d4b6 100644 --- a/frigate/config/camera/zone.py +++ b/frigate/config/camera/zone.py @@ -14,36 +14,46 @@ logger = logging.getLogger(__name__) class ZoneConfig(BaseModel): friendly_name: Optional[str] = Field( - None, title="Zone friendly name used in the Frigate UI." + None, + title="Zone name", + description="A user-friendly name for the zone, displayed in the Frigate UI. If not set, a formatted version of the zone name will be used.", ) filters: dict[str, FilterConfig] = Field( - default_factory=dict, title="Zone filters." + default_factory=dict, + title="Zone filters", + description="Filters to apply to objects within this zone. Used to reduce false positives or restrict which objects are considered present in the zone.", ) coordinates: Union[str, list[str]] = Field( - title="Coordinates polygon for the defined zone." + title="Coordinates", + description="Polygon coordinates that define the zone area. Can be a comma-separated string or a list of coordinate strings. Coordinates should be relative (0-1) or absolute (legacy).", ) distances: Optional[Union[str, list[str]]] = Field( default_factory=list, - title="Real-world distances for the sides of quadrilateral for the defined zone.", + title="Real-world distances", + description="Optional real-world distances for each side of the zone quadrilateral, used for speed or distance calculations. Must have exactly 4 values if set.", ) inertia: int = Field( default=3, - title="Number of consecutive frames required for object to be considered present in the zone.", + title="Inertia frames", gt=0, + description="Number of consecutive frames an object must be detected in the zone before it is considered present. Helps filter out transient detections.", ) loitering_time: int = Field( default=0, ge=0, - title="Number of seconds that an object must loiter to be considered in the zone.", + title="Loitering seconds", + description="Number of seconds an object must remain in the zone to be considered as loitering. Set to 0 to disable loitering detection.", ) speed_threshold: Optional[float] = Field( default=None, ge=0.1, - title="Minimum speed value for an object to be considered in the zone.", + title="Minimum speed", + description="Minimum speed (in real-world units if distances are set) required for an object to be considered present in the zone. Used for speed-based zone triggers.", ) objects: Union[str, list[str]] = Field( default_factory=list, - title="List of objects that can trigger the zone.", + title="Trigger objects", + description="List of object types (from labelmap) that can trigger this zone. Can be a string or a list of strings. If empty, all objects are considered.", ) _color: Optional[tuple[int, int, int]] = PrivateAttr() _contour: np.ndarray = PrivateAttr() diff --git a/frigate/config/camera_group.py b/frigate/config/camera_group.py index 7449e86a1..65319001a 100644 --- a/frigate/config/camera_group.py +++ b/frigate/config/camera_group.py @@ -8,13 +8,21 @@ __all__ = ["CameraGroupConfig"] class CameraGroupConfig(FrigateBaseModel): - """Represents a group of cameras.""" - cameras: Union[str, list[str]] = Field( - default_factory=list, title="List of cameras in this group." + default_factory=list, + title="Camera list", + description="Array of camera names included in this group.", + ) + icon: str = Field( + default="generic", + title="Group icon", + description="Icon used to represent the camera group in the UI.", + ) + order: int = Field( + default=0, + title="Sort order", + description="Numeric order used to sort camera groups in the UI; larger numbers appear later.", ) - icon: str = Field(default="generic", title="Icon that represents camera group.") - order: int = Field(default=0, title="Sort order for group.") @field_validator("cameras", mode="before") @classmethod diff --git a/frigate/config/classification.py b/frigate/config/classification.py index fb8e3de29..a1e7b89a5 100644 --- a/frigate/config/classification.py +++ b/frigate/config/classification.py @@ -43,28 +43,43 @@ class ObjectClassificationType(str, Enum): class AudioTranscriptionConfig(FrigateBaseModel): - enabled: bool = Field(default=False, title="Enable audio transcription.") + enabled: bool = Field( + default=False, + title="Enable audio transcription", + description="Enable or disable automatic audio transcription for all cameras; can be overridden per-camera.", + ) language: str = Field( default="en", - title="Language abbreviation to use for audio event transcription/translation.", + title="Transcription language", + description="Language code used for transcription/translation (for example 'en' for English). See https://whisper-api.com/docs/languages/ for supported language codes.", ) device: Optional[EnrichmentsDeviceEnum] = Field( default=EnrichmentsDeviceEnum.CPU, - title="The device used for audio transcription.", + title="Transcription device", + description="Device key (CPU/GPU) to run the transcription model on. Only NVIDIA CUDA GPUs are currently supported for transcription.", ) model_size: str = Field( - default="small", title="The size of the embeddings model used." + default="small", + title="Model size", + description="Model size to use for offline audio event transcription.", ) live_enabled: Optional[bool] = Field( - default=False, title="Enable live transcriptions." + default=False, + title="Live transcription", + description="Enable streaming live transcription for audio as it is received.", ) class BirdClassificationConfig(FrigateBaseModel): - enabled: bool = Field(default=False, title="Enable bird classification.") + enabled: bool = Field( + default=False, + title="Bird classification", + description="Enable or disable bird classification.", + ) threshold: float = Field( default=0.9, - title="Minimum classification score required to be considered a match.", + title="Minimum score", + description="Minimum classification score required to accept a bird classification.", gt=0.0, le=1.0, ) @@ -72,42 +87,62 @@ class BirdClassificationConfig(FrigateBaseModel): class CustomClassificationStateCameraConfig(FrigateBaseModel): crop: list[float, float, float, float] = Field( - title="Crop of image frame on this camera to run classification on." + title="Classification crop", + description="Crop coordinates to use for running classification on this camera.", ) class CustomClassificationStateConfig(FrigateBaseModel): cameras: Dict[str, CustomClassificationStateCameraConfig] = Field( - title="Cameras to run classification on." + title="Classification cameras", + description="Per-camera crop and settings for running state classification.", ) motion: bool = Field( default=False, - title="If classification should be run when motion is detected in the crop.", + title="Run on motion", + description="If true, run classification when motion is detected within the specified crop.", ) interval: int | None = Field( default=None, - title="Interval to run classification on in seconds.", + title="Classification interval", + description="Interval (seconds) between periodic classification runs for state classification.", gt=0, ) class CustomClassificationObjectConfig(FrigateBaseModel): - objects: list[str] = Field(title="Object types to classify.") + objects: list[str] = Field( + default_factory=list, + title="Classify objects", + description="List of object types to run object classification on.", + ) classification_type: ObjectClassificationType = Field( default=ObjectClassificationType.sub_label, - title="Type of classification that is applied.", + title="Classification type", + description="Classification type applied: 'sub_label' (adds sub_label) or other supported types.", ) class CustomClassificationConfig(FrigateBaseModel): - enabled: bool = Field(default=True, title="Enable running the model.") - name: str | None = Field(default=None, title="Name of classification model.") + enabled: bool = Field( + default=True, + title="Enable model", + description="Enable or disable the custom classification model.", + ) + name: str | None = Field( + default=None, + title="Model name", + description="Identifier for the custom classification model to use.", + ) threshold: float = Field( - default=0.8, title="Classification score threshold to change the state." + default=0.8, + title="Score threshold", + description="Score threshold used to change the classification state.", ) save_attempts: int | None = Field( default=None, - title="Number of classification attempts to save in the recent classifications tab. If not specified, defaults to 200 for object classification and 100 for state classification.", + title="Save attempts", + description="How many classification attempts to save for recent classifications UI.", ge=0, ) object_config: CustomClassificationObjectConfig | None = Field(default=None) @@ -116,47 +151,76 @@ class CustomClassificationConfig(FrigateBaseModel): class ClassificationConfig(FrigateBaseModel): bird: BirdClassificationConfig = Field( - default_factory=BirdClassificationConfig, title="Bird classification config." + default_factory=BirdClassificationConfig, + title="Bird classification config", + description="Settings specific to bird classification models.", ) custom: Dict[str, CustomClassificationConfig] = Field( - default={}, title="Custom Classification Model Configs." + default={}, + title="Custom Classification Models", + description="Configuration for custom classification models used for objects or state detection.", ) class SemanticSearchConfig(FrigateBaseModel): - enabled: bool = Field(default=False, title="Enable semantic search.") + enabled: bool = Field( + default=False, + title="Enable semantic search", + description="Enable or disable the semantic search feature.", + ) reindex: Optional[bool] = Field( - default=False, title="Reindex all tracked objects on startup." + default=False, + title="Reindex on startup", + description="Trigger a full reindex of historical tracked objects into the embeddings database.", ) model: Optional[SemanticSearchModelEnum] = Field( default=SemanticSearchModelEnum.jinav1, - title="The CLIP model to use for semantic search.", + title="Semantic search model", + description="The embeddings model to use for semantic search (for example 'jinav1').", ) model_size: str = Field( - default="small", title="The size of the embeddings model used." + default="small", + title="Model size", + description="Select model size; 'small' runs on CPU and 'large' typically requires GPU.", ) device: Optional[str] = Field( default=None, - title="The device key to use for semantic search.", + title="Device", description="This is an override, to target a specific device. See https://onnxruntime.ai/docs/execution-providers/ for more information", ) class TriggerConfig(FrigateBaseModel): friendly_name: Optional[str] = Field( - None, title="Trigger friendly name used in the Frigate UI." + None, + title="Friendly name", + description="Optional friendly name displayed in the UI for this trigger.", + ) + enabled: bool = Field( + default=True, + title="Enable this trigger", + description="Enable or disable this semantic search trigger.", + ) + type: TriggerType = Field( + default=TriggerType.DESCRIPTION, + title="Trigger type", + description="Type of trigger: 'thumbnail' (match against image) or 'description' (match against text).", + ) + data: str = Field( + title="Trigger content", + description="Text phrase or thumbnail ID to match against tracked objects.", ) - enabled: bool = Field(default=True, title="Enable this trigger") - type: TriggerType = Field(default=TriggerType.DESCRIPTION, title="Type of trigger") - data: str = Field(title="Trigger content (text phrase or image ID)") threshold: float = Field( - title="Confidence score required to run the trigger", + title="Trigger threshold", + description="Minimum similarity score (0-1) required to activate this trigger.", default=0.8, gt=0.0, le=1.0, ) actions: List[TriggerAction] = Field( - default=[], title="Actions to perform when trigger is matched" + default=[], + title="Trigger actions", + description="List of actions to execute when trigger matches (notification, sub_label, attribute).", ) model_config = ConfigDict(extra="forbid", protected_namespaces=()) @@ -165,147 +229,191 @@ class TriggerConfig(FrigateBaseModel): class CameraSemanticSearchConfig(FrigateBaseModel): triggers: Dict[str, TriggerConfig] = Field( default={}, - title="Trigger actions on tracked objects that match existing thumbnails or descriptions", + title="Triggers", + description="Actions and matching criteria for camera-specific semantic search triggers.", ) model_config = ConfigDict(extra="forbid", protected_namespaces=()) class FaceRecognitionConfig(FrigateBaseModel): - enabled: bool = Field(default=False, title="Enable face recognition.") + enabled: bool = Field( + default=False, + title="Enable face recognition", + description="Enable or disable face recognition for all cameras; can be overridden per-camera.", + ) model_size: str = Field( - default="small", title="The size of the embeddings model used." + default="small", + title="Model size", + description="Model size to use for face embeddings (small/large); larger may require GPU.", ) unknown_score: float = Field( - title="Minimum face distance score required to be marked as a potential match.", + title="Unknown score threshold", + description="Distance threshold below which a face is considered a potential match (higher = stricter).", default=0.8, gt=0.0, le=1.0, ) detection_threshold: float = Field( default=0.7, - title="Minimum face detection score required to be considered a face.", + title="Detection threshold", + description="Minimum detection confidence required to consider a face detection valid.", gt=0.0, le=1.0, ) recognition_threshold: float = Field( default=0.9, - title="Minimum face distance score required to be considered a match.", + title="Recognition threshold", + description="Face embedding distance threshold to consider two faces a match.", gt=0.0, le=1.0, ) min_area: int = Field( - default=750, title="Min area of face box to consider running face recognition." + default=750, + title="Minimum face area", + description="Minimum area (pixels) of a detected face box required to attempt recognition.", ) min_faces: int = Field( default=1, gt=0, le=6, - title="Min face recognitions for the sub label to be applied to the person object.", + title="Minimum faces", + description="Minimum number of face recognitions required before applying a recognized sub-label to a person.", ) save_attempts: int = Field( default=200, ge=0, - title="Number of face attempts to save in the recent recognitions tab.", + title="Save attempts", + description="Number of face recognition attempts to retain for recent recognition UI.", ) blur_confidence_filter: bool = Field( - default=True, title="Apply blur quality filter to face confidence." + default=True, + title="Blur confidence filter", + description="Adjust confidence scores based on image blur to reduce false positives for poor quality faces.", ) device: Optional[str] = Field( default=None, - title="The device key to use for face recognition.", + title="Device", description="This is an override, to target a specific device. See https://onnxruntime.ai/docs/execution-providers/ for more information", ) class CameraFaceRecognitionConfig(FrigateBaseModel): - enabled: bool = Field(default=False, title="Enable face recognition.") + enabled: bool = Field( + default=False, + title="Enable face recognition", + description="Enable or disable face recognition.", + ) min_area: int = Field( - default=750, title="Min area of face box to consider running face recognition." + default=750, + title="Minimum face area", + description="Minimum area (pixels) of a detected face box required to attempt recognition.", ) model_config = ConfigDict(extra="forbid", protected_namespaces=()) class ReplaceRule(FrigateBaseModel): - pattern: str = Field(..., title="Regex pattern to match.") - replacement: str = Field( - ..., title="Replacement string (supports backrefs like '\\1')." - ) + pattern: str = Field(..., title="Regex pattern") + replacement: str = Field(..., title="Replacement string") class LicensePlateRecognitionConfig(FrigateBaseModel): - enabled: bool = Field(default=False, title="Enable license plate recognition.") + enabled: bool = Field( + default=False, + title="Enable LPR", + description="Enable or disable license plate recognition for all cameras; can be overridden per-camera.", + ) model_size: str = Field( - default="small", title="The size of the embeddings model used." + default="small", + title="Model size", + description="Model size used for text detection/recognition. Most users should use 'small'.", ) detection_threshold: float = Field( default=0.7, - title="License plate object confidence score required to begin running recognition.", + title="Detection threshold", + description="Detection confidence threshold to begin running OCR on a suspected plate.", gt=0.0, le=1.0, ) min_area: int = Field( default=1000, - title="Minimum area of license plate to begin running recognition.", + title="Minimum plate area", + description="Minimum plate area (pixels) required to attempt recognition.", ) recognition_threshold: float = Field( default=0.9, - title="Recognition confidence score required to add the plate to the object as a sub label.", + title="Recognition threshold", + description="Confidence threshold required for recognized plate text to be attached as a sub-label.", gt=0.0, le=1.0, ) min_plate_length: int = Field( default=4, - title="Minimum number of characters a license plate must have to be added to the object as a sub label.", + title="Min plate length", + description="Minimum number of characters a recognized plate must contain to be considered valid.", ) format: Optional[str] = Field( default=None, - title="Regular expression for the expected format of license plate.", + title="Plate format regex", + description="Optional regex to validate recognized plate strings against an expected format.", ) match_distance: int = Field( default=1, - title="Allow this number of missing/incorrect characters to still cause a detected plate to match a known plate.", + title="Match distance", + description="Number of character mismatches allowed when comparing detected plates to known plates.", ge=0, ) known_plates: Optional[Dict[str, List[str]]] = Field( - default={}, title="Known plates to track (strings or regular expressions)." + default={}, + title="Known plates", + description="List of plates or regexes to specially track or alert on.", ) enhancement: int = Field( default=0, - title="Amount of contrast adjustment and denoising to apply to license plate images before recognition.", + title="Enhancement level", + description="Enhancement level (0-10) to apply to plate crops prior to OCR; higher values may not always improve results, levels above 5 may only work with night time plates and should be used with caution.", ge=0, le=10, ) debug_save_plates: bool = Field( default=False, - title="Save plates captured for LPR for debugging purposes.", + title="Save debug plates", + description="Save plate crop images for debugging LPR performance.", ) device: Optional[str] = Field( default=None, - title="The device key to use for LPR.", + title="Device", description="This is an override, to target a specific device. See https://onnxruntime.ai/docs/execution-providers/ for more information", ) replace_rules: List[ReplaceRule] = Field( default_factory=list, - title="List of regex replacement rules for normalizing detected plates. Each rule has 'pattern' and 'replacement'.", + title="Replacement rules", + description="Regex replacement rules used to normalize detected plate strings before matching.", ) class CameraLicensePlateRecognitionConfig(FrigateBaseModel): - enabled: bool = Field(default=False, title="Enable license plate recognition.") + enabled: bool = Field( + default=False, + title="Enable LPR", + description="Enable or disable LPR on this camera.", + ) expire_time: int = Field( default=3, - title="Expire plates not seen after number of seconds (for dedicated LPR cameras only).", + title="Expire seconds", + description="Time in seconds after which an unseen plate is expired from the tracker (for dedicated LPR cameras only).", gt=0, ) min_area: int = Field( default=1000, - title="Minimum area of license plate to begin running recognition.", + title="Minimum plate area", + description="Minimum plate area (pixels) required to attempt recognition.", ) enhancement: int = Field( default=0, - title="Amount of contrast adjustment and denoising to apply to license plate images before recognition.", + title="Enhancement level", + description="Enhancement level (0-10) to apply to plate crops prior to OCR; higher values may not always improve results, levels above 5 may only work with night time plates and should be used with caution.", ge=0, le=10, ) @@ -314,12 +422,18 @@ class CameraLicensePlateRecognitionConfig(FrigateBaseModel): class CameraAudioTranscriptionConfig(FrigateBaseModel): - enabled: bool = Field(default=False, title="Enable audio transcription.") + enabled: bool = Field( + default=False, + title="Enable transcription", + description="Enable or disable manually triggered audio event transcription.", + ) enabled_in_config: Optional[bool] = Field( - default=None, title="Keep track of original state of audio transcription." + default=None, title="Original transcription state" ) live_enabled: Optional[bool] = Field( - default=False, title="Enable live transcriptions." + default=False, + title="Live transcription", + description="Enable streaming live transcription for audio as it is received.", ) model_config = ConfigDict(extra="forbid", protected_namespaces=()) diff --git a/frigate/config/config.py b/frigate/config/config.py index e31e3d8c8..3934976d3 100644 --- a/frigate/config/config.py +++ b/frigate/config/config.py @@ -299,116 +299,189 @@ def verify_lpr_and_face( class FrigateConfig(FrigateBaseModel): - version: Optional[str] = Field(default=None, title="Current config version.") + version: Optional[str] = Field( + default=None, + title="Current config version", + description="Numeric or string version of the active configuration to help detect migrations or format changes.", + ) safe_mode: bool = Field( - default=False, title="If Frigate should be started in safe mode." + default=False, + title="Safe mode", + description="When enabled, start Frigate in safe mode with reduced features for troubleshooting.", ) # Fields that install global state should be defined first, so that their validators run first. environment_vars: EnvVars = Field( - default_factory=dict, title="Frigate environment variables." + default_factory=dict, + title="Environment variables", + description="Key/value pairs of environment variables to set for the Frigate process in Home Assistant OS. Non-HAOS users must use Docker environment variable configuration instead.", ) logger: LoggerConfig = Field( default_factory=LoggerConfig, - title="Logging configuration.", + title="Logging", + description="Controls default log verbosity and per-component log level overrides.", validate_default=True, ) # Global config - auth: AuthConfig = Field(default_factory=AuthConfig, title="Auth configuration.") + auth: AuthConfig = Field( + default_factory=AuthConfig, + title="Authentication", + description="Authentication and session-related settings including cookie and rate limit options.", + ) database: DatabaseConfig = Field( - default_factory=DatabaseConfig, title="Database configuration." + default_factory=DatabaseConfig, + title="Database", + description="Settings for the SQLite database used by Frigate to store tracked object and recording metadata.", ) go2rtc: RestreamConfig = Field( - default_factory=RestreamConfig, title="Global restream configuration." + default_factory=RestreamConfig, + title="go2rtc", + description="Settings for the integrated go2rtc restreaming service used for live stream relaying and translation.", + ) + mqtt: MqttConfig = Field( + title="MQTT", + description="Settings for connecting and publishing telemetry, snapshots, and event details to an MQTT broker.", ) - mqtt: MqttConfig = Field(title="MQTT configuration.") notifications: NotificationConfig = Field( - default_factory=NotificationConfig, title="Global notification configuration." + default_factory=NotificationConfig, + title="Notifications", + description="Settings to enable and control notifications for all cameras; can be overridden per-camera.", ) networking: NetworkingConfig = Field( - default_factory=NetworkingConfig, title="Networking configuration" + default_factory=NetworkingConfig, + title="Networking", + description="Network-related settings such as IPv6 enablement for Frigate endpoints.", ) proxy: ProxyConfig = Field( - default_factory=ProxyConfig, title="Proxy configuration." + default_factory=ProxyConfig, + title="Proxy", + description="Settings for integrating Frigate behind a reverse proxy that passes authenticated user headers.", ) telemetry: TelemetryConfig = Field( - default_factory=TelemetryConfig, title="Telemetry configuration." + default_factory=TelemetryConfig, + title="Telemetry", + description="System telemetry and stats options including GPU and network bandwidth monitoring.", + ) + tls: TlsConfig = Field( + default_factory=TlsConfig, + title="TLS", + description="TLS settings for Frigate's web endpoints (port 8971).", + ) + ui: UIConfig = Field( + default_factory=UIConfig, + title="UI", + description="User interface preferences such as timezone, time/date formatting, and units.", ) - tls: TlsConfig = Field(default_factory=TlsConfig, title="TLS configuration.") - ui: UIConfig = Field(default_factory=UIConfig, title="UI configuration.") # Detector config detectors: Dict[str, BaseDetectorConfig] = Field( default=DEFAULT_DETECTORS, - title="Detector hardware configuration.", + title="Detector hardware", + description="Configuration for object detectors (CPU, GPU, ONNX backends) and any detector-specific model settings.", ) model: ModelConfig = Field( - default_factory=ModelConfig, title="Detection model configuration." + default_factory=ModelConfig, + title="Detection model", + description="Settings to configure a custom object detection model and its input shape.", ) # GenAI config (named provider configs: name -> GenAIConfig) genai: Dict[str, GenAIConfig] = Field( - default_factory=dict, title="Generative AI configuration (named providers)." + default_factory=dict, + title="Generative AI configuration (named providers).", + description="Settings for integrated generative AI providers used to generate object descriptions and review summaries.", ) # Camera config - cameras: Dict[str, CameraConfig] = Field(title="Camera configuration.") + cameras: Dict[str, CameraConfig] = Field(title="Cameras", description="Cameras") audio: AudioConfig = Field( - default_factory=AudioConfig, title="Global Audio events configuration." + default_factory=AudioConfig, + title="Audio events", + description="Settings for audio-based event detection for all cameras; can be overridden per-camera.", ) birdseye: BirdseyeConfig = Field( - default_factory=BirdseyeConfig, title="Birdseye configuration." + default_factory=BirdseyeConfig, + title="Birdseye", + description="Settings for the Birdseye composite view that composes multiple camera feeds into a single layout.", ) detect: DetectConfig = Field( - default_factory=DetectConfig, title="Global object tracking configuration." + default_factory=DetectConfig, + title="Object Detection", + description="Settings for the detection/detect role used to run object detection and initialize trackers.", ) ffmpeg: FfmpegConfig = Field( - default_factory=FfmpegConfig, title="Global FFmpeg configuration." + default_factory=FfmpegConfig, + title="FFmpeg", + description="FFmpeg settings including binary path, args, hwaccel options, and per-role output args.", ) live: CameraLiveConfig = Field( - default_factory=CameraLiveConfig, title="Live playback settings." + default_factory=CameraLiveConfig, + title="Live playback", + description="Settings used by the Web UI to control live stream resolution and quality.", ) motion: Optional[MotionConfig] = Field( - default=None, title="Global motion detection configuration." + default=None, + title="Motion detection", + description="Default motion detection settings applied to cameras unless overridden per-camera.", ) objects: ObjectConfig = Field( - default_factory=ObjectConfig, title="Global object configuration." + default_factory=ObjectConfig, + title="Objects", + description="Object tracking defaults including which labels to track and per-object filters.", ) record: RecordConfig = Field( - default_factory=RecordConfig, title="Global record configuration." + default_factory=RecordConfig, + title="Recording", + description="Recording and retention settings applied to cameras unless overridden per-camera.", ) review: ReviewConfig = Field( - default_factory=ReviewConfig, title="Review configuration." + default_factory=ReviewConfig, + title="Review", + description="Settings that control alerts, detections, and GenAI review summaries used by the UI and storage.", ) snapshots: SnapshotsConfig = Field( - default_factory=SnapshotsConfig, title="Global snapshots configuration." + default_factory=SnapshotsConfig, + title="Snapshots", + description="Settings for saved JPEG snapshots of tracked objects for all cameras; can be overridden per-camera.", ) timestamp_style: TimestampStyleConfig = Field( default_factory=TimestampStyleConfig, - title="Global timestamp style configuration.", + title="Timestamp style", + description="Styling options for in-feed timestamps applied to debug view and snapshots.", ) # Classification Config audio_transcription: AudioTranscriptionConfig = Field( - default_factory=AudioTranscriptionConfig, title="Audio transcription config." + default_factory=AudioTranscriptionConfig, + title="Audio transcription", + description="Settings for live and speech audio transcription used for events and live captions.", ) classification: ClassificationConfig = Field( - default_factory=ClassificationConfig, title="Object classification config." + default_factory=ClassificationConfig, + title="Object classification", + description="Settings for classification models used to refine object labels or state classification.", ) semantic_search: SemanticSearchConfig = Field( - default_factory=SemanticSearchConfig, title="Semantic search configuration." + default_factory=SemanticSearchConfig, + title="Semantic Search", + description="Settings for Semantic Search which builds and queries object embeddings to find similar items.", ) face_recognition: FaceRecognitionConfig = Field( - default_factory=FaceRecognitionConfig, title="Face recognition config." + default_factory=FaceRecognitionConfig, + title="Face recognition", + description="Settings for face detection and recognition for all cameras; can be overridden per-camera.", ) lpr: LicensePlateRecognitionConfig = Field( default_factory=LicensePlateRecognitionConfig, - title="License Plate recognition config.", + title="License Plate Recognition", + description="License plate recognition settings including detection thresholds, formatting, and known plates.", ) camera_groups: Dict[str, CameraGroupConfig] = Field( - default_factory=dict, title="Camera group configuration" + default_factory=dict, + title="Camera groups", + description="Configuration for named camera groups used to organize cameras in the UI.", ) _plus_api: PlusApi @@ -487,6 +560,9 @@ class FrigateConfig(FrigateBaseModel): # users should not set model themselves if detector_config.model: + logger.warning( + "The model key should be specified at the root level of the config, not under detectors. The nested model key will be ignored." + ) detector_config.model = None model_config = self.model.model_dump(exclude_unset=True, warnings="none") diff --git a/frigate/config/database.py b/frigate/config/database.py index 8daca0d49..8064561f1 100644 --- a/frigate/config/database.py +++ b/frigate/config/database.py @@ -8,4 +8,8 @@ __all__ = ["DatabaseConfig"] class DatabaseConfig(FrigateBaseModel): - path: str = Field(default=DEFAULT_DB_PATH, title="Database path.") # noqa: F821 + path: str = Field( + default=DEFAULT_DB_PATH, + title="Database path", + description="Filesystem path where the Frigate SQLite database file will be stored.", + ) # noqa: F821 diff --git a/frigate/config/logger.py b/frigate/config/logger.py index 0ba3e6972..c8920a198 100644 --- a/frigate/config/logger.py +++ b/frigate/config/logger.py @@ -9,9 +9,15 @@ __all__ = ["LoggerConfig"] class LoggerConfig(FrigateBaseModel): - default: LogLevel = Field(default=LogLevel.info, title="Default logging level.") + default: LogLevel = Field( + default=LogLevel.info, + title="Logging level", + description="Default global log verbosity (debug, info, warning, error).", + ) logs: dict[str, LogLevel] = Field( - default_factory=dict, title="Log level for specified processes." + default_factory=dict, + title="Per-process log level", + description="Per-component log level overrides to increase or decrease verbosity for specific modules.", ) @model_validator(mode="after") diff --git a/frigate/config/mqtt.py b/frigate/config/mqtt.py index a760d0a1f..abd5c74b2 100644 --- a/frigate/config/mqtt.py +++ b/frigate/config/mqtt.py @@ -12,25 +12,73 @@ __all__ = ["MqttConfig"] class MqttConfig(FrigateBaseModel): - enabled: bool = Field(default=True, title="Enable MQTT Communication.") - host: str = Field(default="", title="MQTT Host") - port: int = Field(default=1883, title="MQTT Port") - topic_prefix: str = Field(default="frigate", title="MQTT Topic Prefix") - client_id: str = Field(default="frigate", title="MQTT Client ID") + enabled: bool = Field( + default=True, + title="Enable MQTT", + description="Enable or disable MQTT integration for state, events, and snapshots.", + ) + host: str = Field( + default="", + title="MQTT host", + description="Hostname or IP address of the MQTT broker.", + ) + port: int = Field( + default=1883, + title="MQTT port", + description="Port of the MQTT broker (usually 1883 for plain MQTT).", + ) + topic_prefix: str = Field( + default="frigate", + title="Topic prefix", + description="MQTT topic prefix for all Frigate topics; must be unique if running multiple instances.", + ) + client_id: str = Field( + default="frigate", + title="Client ID", + description="Client identifier used when connecting to the MQTT broker; should be unique per instance.", + ) stats_interval: int = Field( - default=60, ge=FREQUENCY_STATS_POINTS, title="MQTT Camera Stats Interval" + default=60, + ge=FREQUENCY_STATS_POINTS, + title="Stats interval", + description="Interval in seconds for publishing system and camera stats to MQTT.", + ) + user: Optional[EnvString] = Field( + default=None, + title="MQTT username", + description="Optional MQTT username; can be provided via environment variables or secrets.", ) - user: Optional[EnvString] = Field(default=None, title="MQTT Username") password: Optional[EnvString] = Field( - default=None, title="MQTT Password", validate_default=True + default=None, + title="MQTT password", + description="Optional MQTT password; can be provided via environment variables or secrets.", + validate_default=True, + ) + tls_ca_certs: Optional[str] = Field( + default=None, + title="TLS CA certs", + description="Path to CA certificate for TLS connections to the broker (for self-signed certs).", ) - tls_ca_certs: Optional[str] = Field(default=None, title="MQTT TLS CA Certificates") tls_client_cert: Optional[str] = Field( - default=None, title="MQTT TLS Client Certificate" + default=None, + title="Client cert", + description="Client certificate path for TLS mutual authentication; do not set user/password when using client certs.", + ) + tls_client_key: Optional[str] = Field( + default=None, + title="Client key", + description="Private key path for the client certificate.", + ) + tls_insecure: Optional[bool] = Field( + default=None, + title="TLS insecure", + description="Allow insecure TLS connections by skipping hostname verification (not recommended).", + ) + qos: int = Field( + default=0, + title="MQTT QoS", + description="Quality of Service level for MQTT publishes/subscriptions (0, 1, or 2).", ) - tls_client_key: Optional[str] = Field(default=None, title="MQTT TLS Client Key") - tls_insecure: Optional[bool] = Field(default=None, title="MQTT TLS Insecure") - qos: int = Field(default=0, title="MQTT QoS") @model_validator(mode="after") def user_requires_pass(self, info: ValidationInfo) -> Self: diff --git a/frigate/config/network.py b/frigate/config/network.py index ab4e5b83e..f537c73b9 100644 --- a/frigate/config/network.py +++ b/frigate/config/network.py @@ -8,20 +8,34 @@ __all__ = ["IPv6Config", "ListenConfig", "NetworkingConfig"] class IPv6Config(FrigateBaseModel): - enabled: bool = Field(default=False, title="Enable IPv6 for port 5000 and/or 8971") + enabled: bool = Field( + default=False, + title="Enable IPv6", + description="Enable IPv6 support for Frigate services (API and UI) where applicable.", + ) class ListenConfig(FrigateBaseModel): internal: Union[int, str] = Field( - default=5000, title="Internal listening port for Frigate" + default=5000, + title="Internal port", + description="Internal listening port for Frigate (default 5000).", ) external: Union[int, str] = Field( - default=8971, title="External listening port for Frigate" + default=8971, + title="External port", + description="External listening port for Frigate (default 8971).", ) class NetworkingConfig(FrigateBaseModel): - ipv6: IPv6Config = Field(default_factory=IPv6Config, title="IPv6 configuration") - listen: ListenConfig = Field( - default_factory=ListenConfig, title="Listening ports configuration" + ipv6: IPv6Config = Field( + default_factory=IPv6Config, + title="IPv6 configuration", + description="IPv6-specific settings for Frigate network services.", + ) + listen: ListenConfig = Field( + default_factory=ListenConfig, + title="Listening ports configuration", + description="Configuration for internal and external listening ports. This is for advanced users. For the majority of use cases it's recommended to change the ports section of your Docker compose file.", ) diff --git a/frigate/config/proxy.py b/frigate/config/proxy.py index a46b7b897..2426fcf10 100644 --- a/frigate/config/proxy.py +++ b/frigate/config/proxy.py @@ -10,36 +10,47 @@ __all__ = ["ProxyConfig", "HeaderMappingConfig"] class HeaderMappingConfig(FrigateBaseModel): user: str = Field( - default=None, title="Header name from upstream proxy to identify user." + default=None, + title="User header", + description="Header containing the authenticated username provided by the upstream proxy.", ) role: str = Field( default=None, - title="Header name from upstream proxy to identify user role.", + title="Role header", + description="Header containing the authenticated user's role or groups from the upstream proxy.", ) role_map: Optional[dict[str, list[str]]] = Field( default_factory=dict, - title=("Mapping of Frigate roles to upstream group values. "), + title=("Role mapping"), + description="Map upstream group values to Frigate roles (for example map admin groups to the admin role).", ) class ProxyConfig(FrigateBaseModel): header_map: HeaderMappingConfig = Field( default_factory=HeaderMappingConfig, - title="Header mapping definitions for proxy user passing.", + title="Header mapping", + description="Map incoming proxy headers to Frigate user and role fields for proxy-based auth.", ) logout_url: Optional[str] = Field( - default=None, title="Redirect url for logging out with proxy." + default=None, + title="Logout URL", + description="URL to redirect users to when logging out via the proxy.", ) auth_secret: Optional[EnvString] = Field( default=None, - title="Secret value for proxy authentication.", + title="Proxy secret", + description="Optional secret checked against the X-Proxy-Secret header to verify trusted proxies.", ) default_role: Optional[str] = Field( - default="viewer", title="Default role for proxy users." + default="viewer", + title="Default role", + description="Default role assigned to proxy-authenticated users when no role mapping applies (admin or viewer).", ) separator: Optional[str] = Field( default=",", - title="The character used to separate values in a mapped header.", + title="Separator character", + description="Character used to split multiple values provided in proxy headers.", ) @field_validator("separator", mode="before") diff --git a/frigate/config/telemetry.py b/frigate/config/telemetry.py index ab18831e1..41c3f7bbc 100644 --- a/frigate/config/telemetry.py +++ b/frigate/config/telemetry.py @@ -8,22 +8,41 @@ __all__ = ["TelemetryConfig", "StatsConfig"] class StatsConfig(FrigateBaseModel): - amd_gpu_stats: bool = Field(default=True, title="Enable AMD GPU stats.") - intel_gpu_stats: bool = Field(default=True, title="Enable Intel GPU stats.") + amd_gpu_stats: bool = Field( + default=True, + title="AMD GPU stats", + description="Enable collection of AMD GPU statistics if an AMD GPU is present.", + ) + intel_gpu_stats: bool = Field( + default=True, + title="Intel GPU stats", + description="Enable collection of Intel GPU statistics if an Intel GPU is present.", + ) network_bandwidth: bool = Field( - default=False, title="Enable network bandwidth for ffmpeg processes." + default=False, + title="Network bandwidth", + description="Enable per-process network bandwidth monitoring for camera ffmpeg processes and detectors (requires capabilities).", ) intel_gpu_device: Optional[str] = Field( - default=None, title="Define the device to use when gathering SR-IOV stats." + default=None, + title="SR-IOV device", + description="Device identifier used when treating Intel GPUs as SR-IOV to fix GPU stats.", ) class TelemetryConfig(FrigateBaseModel): network_interfaces: list[str] = Field( default=[], - title="Enabled network interfaces for bandwidth calculation.", + title="Network interfaces", + description="List of network interface name prefixes to monitor for bandwidth statistics.", ) stats: StatsConfig = Field( - default_factory=StatsConfig, title="System Stats Configuration" + default_factory=StatsConfig, + title="System stats", + description="Options to enable/disable collection of various system and GPU statistics.", + ) + version_check: bool = Field( + default=True, + title="Version check", + description="Enable an outbound check to detect if a newer Frigate version is available.", ) - version_check: bool = Field(default=True, title="Enable latest version check.") diff --git a/frigate/config/tls.py b/frigate/config/tls.py index 673e105e9..cada11087 100644 --- a/frigate/config/tls.py +++ b/frigate/config/tls.py @@ -6,4 +6,8 @@ __all__ = ["TlsConfig"] class TlsConfig(FrigateBaseModel): - enabled: bool = Field(default=True, title="Enable TLS for port 8971") + enabled: bool = Field( + default=True, + title="Enable TLS", + description="Enable TLS for Frigate's web UI and API on the configured TLS port.", + ) diff --git a/frigate/config/ui.py b/frigate/config/ui.py index 8e0d4d77d..2c3104bbc 100644 --- a/frigate/config/ui.py +++ b/frigate/config/ui.py @@ -27,16 +27,28 @@ class UnitSystemEnum(str, Enum): class UIConfig(FrigateBaseModel): - timezone: Optional[str] = Field(default=None, title="Override UI timezone.") + timezone: Optional[str] = Field( + default=None, + title="Timezone", + description="Optional timezone to display across the UI (defaults to browser local time if unset).", + ) time_format: TimeFormatEnum = Field( - default=TimeFormatEnum.browser, title="Override UI time format." + default=TimeFormatEnum.browser, + title="Time format", + description="Time format to use in the UI (browser, 12hour, or 24hour).", ) date_style: DateTimeStyleEnum = Field( - default=DateTimeStyleEnum.short, title="Override UI dateStyle." + default=DateTimeStyleEnum.short, + title="Date style", + description="Date style to use in the UI (full, long, medium, short).", ) time_style: DateTimeStyleEnum = Field( - default=DateTimeStyleEnum.medium, title="Override UI timeStyle." + default=DateTimeStyleEnum.medium, + title="Time style", + description="Time style to use in the UI (full, long, medium, short).", ) unit_system: UnitSystemEnum = Field( - default=UnitSystemEnum.metric, title="The unit system to use for measurements." + default=UnitSystemEnum.metric, + title="Unit system", + description="Unit system for display (metric or imperial) used in the UI and MQTT.", ) diff --git a/frigate/detectors/detector_config.py b/frigate/detectors/detector_config.py index aa92f28f4..22623c7d7 100644 --- a/frigate/detectors/detector_config.py +++ b/frigate/detectors/detector_config.py @@ -45,30 +45,55 @@ class ModelTypeEnum(str, Enum): class ModelConfig(BaseModel): - path: Optional[str] = Field(None, title="Custom Object detection model path.") - labelmap_path: Optional[str] = Field( - None, title="Label map for custom object detector." + path: Optional[str] = Field( + None, + title="Custom Object detection model path", + description="Path to a custom detection model file (or plus:// for Frigate+ models).", + ) + labelmap_path: Optional[str] = Field( + None, + title="Label map for custom object detector", + description="Path to a labelmap file that maps numeric classes to string labels for the detector.", + ) + width: int = Field( + default=320, + title="Object detection model input width", + description="Width of the model input tensor in pixels.", + ) + height: int = Field( + default=320, + title="Object detection model input height", + description="Height of the model input tensor in pixels.", ) - width: int = Field(default=320, title="Object detection model input width.") - height: int = Field(default=320, title="Object detection model input height.") labelmap: Dict[int, str] = Field( - default_factory=dict, title="Labelmap customization." + default_factory=dict, + title="Labelmap customization", + description="Overrides or remapping entries to merge into the standard labelmap.", ) attributes_map: Dict[str, list[str]] = Field( default=DEFAULT_ATTRIBUTE_LABEL_MAP, - title="Map of object labels to their attribute labels.", + title="Map of object labels to their attribute labels", + description="Mapping from object labels to attribute labels used to attach metadata (for example 'car' -> ['license_plate']).", ) input_tensor: InputTensorEnum = Field( - default=InputTensorEnum.nhwc, title="Model Input Tensor Shape" + default=InputTensorEnum.nhwc, + title="Model Input Tensor Shape", + description="Tensor format expected by the model: 'nhwc' or 'nchw'.", ) input_pixel_format: PixelFormatEnum = Field( - default=PixelFormatEnum.rgb, title="Model Input Pixel Color Format" + default=PixelFormatEnum.rgb, + title="Model Input Pixel Color Format", + description="Pixel colorspace expected by the model: 'rgb', 'bgr', or 'yuv'.", ) input_dtype: InputDTypeEnum = Field( - default=InputDTypeEnum.int, title="Model Input D Type" + default=InputDTypeEnum.int, + title="Model Input D Type", + description="Data type of the model input tensor (for example 'float32').", ) model_type: ModelTypeEnum = Field( - default=ModelTypeEnum.ssd, title="Object Detection Model Type" + default=ModelTypeEnum.ssd, + title="Object Detection Model Type", + description="Detector model architecture type (ssd, yolox, yolonas) used by some detectors for optimization.", ) _merged_labelmap: Optional[Dict[int, str]] = PrivateAttr() _colormap: Dict[int, Tuple[int, int, int]] = PrivateAttr() @@ -210,12 +235,20 @@ class ModelConfig(BaseModel): class BaseDetectorConfig(BaseModel): # the type field must be defined in all subclasses - type: str = Field(default="cpu", title="Detector Type") + type: str = Field( + default="cpu", + title="Detector Type", + description="Type of detector to use for object detection (for example 'cpu', 'edgetpu', 'openvino').", + ) model: Optional[ModelConfig] = Field( - default=None, title="Detector specific model configuration." + default=None, + title="Detector specific model configuration", + description="Detector-specific model configuration options (path, input size, etc.).", ) model_path: Optional[str] = Field( - default=None, title="Detector specific model path." + default=None, + title="Detector specific model path", + description="File path to the detector model binary if required by the chosen detector.", ) model_config = ConfigDict( extra="allow", arbitrary_types_allowed=True, protected_namespaces=() diff --git a/frigate/detectors/plugins/cpu_tfl.py b/frigate/detectors/plugins/cpu_tfl.py index 6d336bb6b..2224a2bda 100644 --- a/frigate/detectors/plugins/cpu_tfl.py +++ b/frigate/detectors/plugins/cpu_tfl.py @@ -1,6 +1,6 @@ import logging -from pydantic import Field +from pydantic import ConfigDict, Field from typing_extensions import Literal from frigate.detectors.detection_api import DetectionApi @@ -21,8 +21,18 @@ DETECTOR_KEY = "cpu" class CpuDetectorConfig(BaseDetectorConfig): + """CPU TFLite detector that runs TensorFlow Lite models on the host CPU without hardware acceleration. Not recommended.""" + + model_config = ConfigDict( + title="CPU", + ) + type: Literal[DETECTOR_KEY] - num_threads: int = Field(default=3, title="Number of detection threads") + num_threads: int = Field( + default=3, + title="Number of detection threads", + description="The number of threads used for CPU-based inference.", + ) class CpuTfl(DetectionApi): diff --git a/frigate/detectors/plugins/deepstack.py b/frigate/detectors/plugins/deepstack.py index e00a4e70d..9b5fcd5af 100644 --- a/frigate/detectors/plugins/deepstack.py +++ b/frigate/detectors/plugins/deepstack.py @@ -4,7 +4,7 @@ import logging import numpy as np import requests from PIL import Image -from pydantic import Field +from pydantic import ConfigDict, Field from typing_extensions import Literal from frigate.detectors.detection_api import DetectionApi @@ -16,12 +16,28 @@ DETECTOR_KEY = "deepstack" class DeepstackDetectorConfig(BaseDetectorConfig): + """DeepStack/CodeProject.AI detector that sends images to a remote DeepStack HTTP API for inference. Not recommended.""" + + model_config = ConfigDict( + title="DeepStack", + ) + type: Literal[DETECTOR_KEY] api_url: str = Field( - default="http://localhost:80/v1/vision/detection", title="DeepStack API URL" + default="http://localhost:80/v1/vision/detection", + title="DeepStack API URL", + description="The URL of the DeepStack API.", + ) + api_timeout: float = Field( + default=0.1, + title="DeepStack API timeout (in seconds)", + description="Maximum time allowed for a DeepStack API request.", + ) + api_key: str = Field( + default="", + title="DeepStack API key (if required)", + description="Optional API key for authenticated DeepStack services.", ) - api_timeout: float = Field(default=0.1, title="DeepStack API timeout (in seconds)") - api_key: str = Field(default="", title="DeepStack API key (if required)") class DeepStack(DetectionApi): diff --git a/frigate/detectors/plugins/degirum.py b/frigate/detectors/plugins/degirum.py index 28a13389f..5afb32a3a 100644 --- a/frigate/detectors/plugins/degirum.py +++ b/frigate/detectors/plugins/degirum.py @@ -2,7 +2,7 @@ import logging import queue import numpy as np -from pydantic import Field +from pydantic import ConfigDict, Field from typing_extensions import Literal from frigate.detectors.detection_api import DetectionApi @@ -14,10 +14,28 @@ DETECTOR_KEY = "degirum" ### DETECTOR CONFIG ### class DGDetectorConfig(BaseDetectorConfig): + """DeGirum detector for running models via DeGirum cloud or local inference services.""" + + model_config = ConfigDict( + title="DeGirum", + ) + type: Literal[DETECTOR_KEY] - location: str = Field(default=None, title="Inference Location") - zoo: str = Field(default=None, title="Model Zoo") - token: str = Field(default=None, title="DeGirum Cloud Token") + location: str = Field( + default=None, + title="Inference Location", + description="Location of the DeGirim inference engine (e.g. '@cloud', '127.0.0.1').", + ) + zoo: str = Field( + default=None, + title="Model Zoo", + description="Path or URL to the DeGirum model zoo.", + ) + token: str = Field( + default=None, + title="DeGirum Cloud Token", + description="Token for DeGirum Cloud access.", + ) ### ACTUAL DETECTOR ### diff --git a/frigate/detectors/plugins/edgetpu_tfl.py b/frigate/detectors/plugins/edgetpu_tfl.py index 36c769b4b..02bd9f5ec 100644 --- a/frigate/detectors/plugins/edgetpu_tfl.py +++ b/frigate/detectors/plugins/edgetpu_tfl.py @@ -4,7 +4,7 @@ import os import cv2 import numpy as np -from pydantic import Field +from pydantic import ConfigDict, Field from typing_extensions import Literal from frigate.detectors.detection_api import DetectionApi @@ -21,8 +21,18 @@ DETECTOR_KEY = "edgetpu" class EdgeTpuDetectorConfig(BaseDetectorConfig): + """EdgeTPU detector that runs TensorFlow Lite models compiled for Coral EdgeTPU using the EdgeTPU delegate.""" + + model_config = ConfigDict( + title="EdgeTPU", + ) + type: Literal[DETECTOR_KEY] - device: str = Field(default=None, title="Device Type") + device: str = Field( + default=None, + title="Device Type", + description="The device to use for EdgeTPU inference (e.g. 'usb', 'pci').", + ) class EdgeTpuTfl(DetectionApi): diff --git a/frigate/detectors/plugins/hailo8l.py b/frigate/detectors/plugins/hailo8l.py index cafc809c9..bbe84d52f 100755 --- a/frigate/detectors/plugins/hailo8l.py +++ b/frigate/detectors/plugins/hailo8l.py @@ -8,7 +8,7 @@ from typing import Dict, List, Optional, Tuple import cv2 import numpy as np -from pydantic import Field +from pydantic import ConfigDict, Field from typing_extensions import Literal from frigate.const import MODEL_CACHE_DIR @@ -410,5 +410,15 @@ class HailoDetector(DetectionApi): # ----------------- HailoDetectorConfig Class ----------------- # class HailoDetectorConfig(BaseDetectorConfig): + """Hailo-8/Hailo-8L detector using HEF models and the HailoRT SDK for inference on Hailo hardware.""" + + model_config = ConfigDict( + title="Hailo-8/Hailo-8L", + ) + type: Literal[DETECTOR_KEY] - device: str = Field(default="PCIe", title="Device Type") + device: str = Field( + default="PCIe", + title="Device Type", + description="The device to use for Hailo inference (e.g. 'PCIe', 'M.2').", + ) diff --git a/frigate/detectors/plugins/memryx.py b/frigate/detectors/plugins/memryx.py index a93888f8a..e0ad401cb 100644 --- a/frigate/detectors/plugins/memryx.py +++ b/frigate/detectors/plugins/memryx.py @@ -8,7 +8,7 @@ from queue import Queue import cv2 import numpy as np -from pydantic import BaseModel, Field +from pydantic import BaseModel, ConfigDict, Field from typing_extensions import Literal from frigate.detectors.detection_api import DetectionApi @@ -30,8 +30,18 @@ class ModelConfig(BaseModel): class MemryXDetectorConfig(BaseDetectorConfig): + """MemryX MX3 detector that runs compiled DFP models on MemryX accelerators.""" + + model_config = ConfigDict( + title="MemryX", + ) + type: Literal[DETECTOR_KEY] - device: str = Field(default="PCIe", title="Device Path") + device: str = Field( + default="PCIe", + title="Device Path", + description="The device to use for MemryX inference (e.g. 'PCIe').", + ) class MemryXDetector(DetectionApi): diff --git a/frigate/detectors/plugins/onnx.py b/frigate/detectors/plugins/onnx.py index 6c9e510ce..c52480642 100644 --- a/frigate/detectors/plugins/onnx.py +++ b/frigate/detectors/plugins/onnx.py @@ -1,7 +1,7 @@ import logging import numpy as np -from pydantic import Field +from pydantic import ConfigDict, Field from typing_extensions import Literal from frigate.detectors.detection_api import DetectionApi @@ -23,8 +23,18 @@ DETECTOR_KEY = "onnx" class ONNXDetectorConfig(BaseDetectorConfig): + """ONNX detector for running ONNX models; will use available acceleration backends (CUDA/ROCm/OpenVINO) when available.""" + + model_config = ConfigDict( + title="ONNX", + ) + type: Literal[DETECTOR_KEY] - device: str = Field(default="AUTO", title="Device Type") + device: str = Field( + default="AUTO", + title="Device Type", + description="The device to use for ONNX inference (e.g. 'AUTO', 'CPU', 'GPU').", + ) class ONNXDetector(DetectionApi): diff --git a/frigate/detectors/plugins/openvino.py b/frigate/detectors/plugins/openvino.py index bda5c8871..f73b7cb0c 100644 --- a/frigate/detectors/plugins/openvino.py +++ b/frigate/detectors/plugins/openvino.py @@ -2,7 +2,7 @@ import logging import numpy as np import openvino as ov -from pydantic import Field +from pydantic import ConfigDict, Field from typing_extensions import Literal from frigate.detectors.detection_api import DetectionApi @@ -20,8 +20,18 @@ DETECTOR_KEY = "openvino" class OvDetectorConfig(BaseDetectorConfig): + """OpenVINO detector for AMD and Intel CPUs, Intel GPUs and Intel VPU hardware.""" + + model_config = ConfigDict( + title="OpenVINO", + ) + type: Literal[DETECTOR_KEY] - device: str = Field(default=None, title="Device Type") + device: str = Field( + default=None, + title="Device Type", + description="The device to use for OpenVINO inference (e.g. 'CPU', 'GPU', 'NPU').", + ) class OvDetector(DetectionApi): diff --git a/frigate/detectors/plugins/rknn.py b/frigate/detectors/plugins/rknn.py index c16df507e..15ab93dcb 100644 --- a/frigate/detectors/plugins/rknn.py +++ b/frigate/detectors/plugins/rknn.py @@ -6,7 +6,7 @@ from typing import Literal import cv2 import numpy as np -from pydantic import Field +from pydantic import ConfigDict, Field from frigate.const import MODEL_CACHE_DIR, SUPPORTED_RK_SOCS from frigate.detectors.detection_api import DetectionApi @@ -29,8 +29,20 @@ model_cache_dir = os.path.join(MODEL_CACHE_DIR, "rknn_cache/") class RknnDetectorConfig(BaseDetectorConfig): + """RKNN detector for Rockchip NPUs; runs compiled RKNN models on Rockchip hardware.""" + + model_config = ConfigDict( + title="RKNN", + ) + type: Literal[DETECTOR_KEY] - num_cores: int = Field(default=0, ge=0, le=3, title="Number of NPU cores to use.") + num_cores: int = Field( + default=0, + ge=0, + le=3, + title="Number of NPU cores to use.", + description="The number of NPU cores to use (0 for auto).", + ) class Rknn(DetectionApi): diff --git a/frigate/detectors/plugins/synaptics.py b/frigate/detectors/plugins/synaptics.py index 6181b16d7..e6983a29c 100644 --- a/frigate/detectors/plugins/synaptics.py +++ b/frigate/detectors/plugins/synaptics.py @@ -2,6 +2,7 @@ import logging import os import numpy as np +from pydantic import ConfigDict from typing_extensions import Literal from frigate.detectors.detection_api import DetectionApi @@ -27,6 +28,12 @@ DETECTOR_KEY = "synaptics" class SynapDetectorConfig(BaseDetectorConfig): + """Synaptics NPU detector for models in .synap format using the Synap SDK on Synaptics hardware.""" + + model_config = ConfigDict( + title="Synaptics", + ) + type: Literal[DETECTOR_KEY] diff --git a/frigate/detectors/plugins/teflon_tfl.py b/frigate/detectors/plugins/teflon_tfl.py index 7e29d6630..370d08817 100644 --- a/frigate/detectors/plugins/teflon_tfl.py +++ b/frigate/detectors/plugins/teflon_tfl.py @@ -1,5 +1,6 @@ import logging +from pydantic import ConfigDict from typing_extensions import Literal from frigate.detectors.detection_api import DetectionApi @@ -18,6 +19,12 @@ DETECTOR_KEY = "teflon_tfl" class TeflonDetectorConfig(BaseDetectorConfig): + """Teflon delegate detector for TFLite using Mesa Teflon delegate library to accelerate inference on supported GPUs.""" + + model_config = ConfigDict( + title="Teflon", + ) + type: Literal[DETECTOR_KEY] diff --git a/frigate/detectors/plugins/tensorrt.py b/frigate/detectors/plugins/tensorrt.py index bf0eb6fa8..087331a2d 100644 --- a/frigate/detectors/plugins/tensorrt.py +++ b/frigate/detectors/plugins/tensorrt.py @@ -14,7 +14,7 @@ try: except ModuleNotFoundError: TRT_SUPPORT = False -from pydantic import Field +from pydantic import ConfigDict, Field from typing_extensions import Literal from frigate.detectors.detection_api import DetectionApi @@ -46,8 +46,16 @@ if TRT_SUPPORT: class TensorRTDetectorConfig(BaseDetectorConfig): + """TensorRT detector for Nvidia Jetson devices using serialized TensorRT engines for accelerated inference.""" + + model_config = ConfigDict( + title="TensorRT", + ) + type: Literal[DETECTOR_KEY] - device: int = Field(default=0, title="GPU Device Index") + device: int = Field( + default=0, title="GPU Device Index", description="The GPU device index to use." + ) class HostDeviceMem(object): diff --git a/frigate/detectors/plugins/zmq_ipc.py b/frigate/detectors/plugins/zmq_ipc.py index cd397aefa..b0e568eff 100644 --- a/frigate/detectors/plugins/zmq_ipc.py +++ b/frigate/detectors/plugins/zmq_ipc.py @@ -5,7 +5,7 @@ from typing import Any, List import numpy as np import zmq -from pydantic import Field +from pydantic import ConfigDict, Field from typing_extensions import Literal from frigate.detectors.detection_api import DetectionApi @@ -17,14 +17,28 @@ DETECTOR_KEY = "zmq" class ZmqDetectorConfig(BaseDetectorConfig): + """ZMQ IPC detector that offloads inference to an external process via a ZeroMQ IPC endpoint.""" + + model_config = ConfigDict( + title="ZMQ IPC", + ) + type: Literal[DETECTOR_KEY] endpoint: str = Field( - default="ipc:///tmp/cache/zmq_detector", title="ZMQ IPC endpoint" + default="ipc:///tmp/cache/zmq_detector", + title="ZMQ IPC endpoint", + description="The ZMQ endpoint to connect to.", ) request_timeout_ms: int = Field( - default=200, title="ZMQ request timeout in milliseconds" + default=200, + title="ZMQ request timeout in milliseconds", + description="Timeout for ZMQ requests in milliseconds.", + ) + linger_ms: int = Field( + default=0, + title="ZMQ socket linger in milliseconds", + description="Socket linger period in milliseconds.", ) - linger_ms: int = Field(default=0, title="ZMQ socket linger in milliseconds") class ZmqIpcDetector(DetectionApi): diff --git a/frigate/util/builtin.py b/frigate/util/builtin.py index 867d2533d..bcdc2feda 100644 --- a/frigate/util/builtin.py +++ b/frigate/util/builtin.py @@ -195,7 +195,8 @@ def flatten_config_data( ) -> Dict[str, Any]: items = [] for key, value in config_data.items(): - new_key = f"{parent_key}.{key}" if parent_key else key + escaped_key = escape_config_key_segment(str(key)) + new_key = f"{parent_key}.{escaped_key}" if parent_key else escaped_key if isinstance(value, dict): items.extend(flatten_config_data(value, new_key).items()) else: @@ -203,6 +204,41 @@ def flatten_config_data( return dict(items) +def escape_config_key_segment(segment: str) -> str: + """Escape dots and backslashes so they can be treated as literal key chars.""" + return segment.replace("\\", "\\\\").replace(".", "\\.") + + +def split_config_key_path(key_path_str: str) -> list[str]: + """Split a dotted config path, honoring \\. as a literal dot in a key.""" + parts: list[str] = [] + current: list[str] = [] + escaped = False + + for char in key_path_str: + if escaped: + current.append(char) + escaped = False + continue + + if char == "\\": + escaped = True + continue + + if char == ".": + parts.append("".join(current)) + current = [] + continue + + current.append(char) + + if escaped: + current.append("\\") + + parts.append("".join(current)) + return parts + + def update_yaml_file_bulk(file_path: str, updates: Dict[str, Any]): yaml = YAML() yaml.indent(mapping=2, sequence=4, offset=2) @@ -218,7 +254,7 @@ def update_yaml_file_bulk(file_path: str, updates: Dict[str, Any]): # Apply all updates for key_path_str, new_value in updates.items(): - key_path = key_path_str.split(".") + key_path = split_config_key_path(key_path_str) for i in range(len(key_path)): try: index = int(key_path[i]) diff --git a/frigate/util/schema.py b/frigate/util/schema.py new file mode 100644 index 000000000..5ba1bc061 --- /dev/null +++ b/frigate/util/schema.py @@ -0,0 +1,46 @@ +"""JSON schema utilities for Frigate.""" + +from typing import Any, Dict, Type + +from pydantic import BaseModel, TypeAdapter + + +def get_config_schema(config_class: Type[BaseModel]) -> Dict[str, Any]: + """ + Returns the JSON schema for FrigateConfig with polymorphic detectors. + + This utility patches the FrigateConfig schema to include the full polymorphic + definitions for detectors. By default, Pydantic's schema for Dict[str, BaseDetectorConfig] + only includes the base class fields. This function replaces it with a reference + to the DetectorConfig union, which includes all available detector subclasses. + """ + # Import here to ensure all detector plugins are loaded through the detectors module + from frigate.detectors import DetectorConfig + + # Get the base schema for FrigateConfig + schema = config_class.model_json_schema() + + # Get the schema for the polymorphic DetectorConfig union + detector_adapter: TypeAdapter = TypeAdapter(DetectorConfig) + detector_schema = detector_adapter.json_schema() + + # Ensure $defs exists in FrigateConfig schema + if "$defs" not in schema: + schema["$defs"] = {} + + # Merge $defs from DetectorConfig into FrigateConfig schema + # This includes the specific schemas for each detector plugin (OvDetectorConfig, etc.) + if "$defs" in detector_schema: + schema["$defs"].update(detector_schema["$defs"]) + + # Extract the union schema (oneOf/discriminator) and add it as a definition + detector_union_schema = {k: v for k, v in detector_schema.items() if k != "$defs"} + schema["$defs"]["DetectorConfig"] = detector_union_schema + + # Update the 'detectors' property to use the polymorphic DetectorConfig definition + if "detectors" in schema.get("properties", {}): + schema["properties"]["detectors"]["additionalProperties"] = { + "$ref": "#/$defs/DetectorConfig" + } + + return schema diff --git a/generate_config_translations.py b/generate_config_translations.py index c19578f1a..f41957561 100644 --- a/generate_config_translations.py +++ b/generate_config_translations.py @@ -8,20 +8,18 @@ and generates JSON translation files with titles and descriptions for the web UI import json import logging -import shutil +import sys from pathlib import Path -from typing import Any, Dict, Optional, get_args, get_origin - -from pydantic import BaseModel -from pydantic.fields import FieldInfo +from typing import Any, Dict, get_args, get_origin from frigate.config.config import FrigateConfig +from frigate.util.schema import get_config_schema logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) -def get_field_translations(field_info: FieldInfo) -> Dict[str, str]: +def get_field_translations(field_info) -> Dict[str, str]: """Extract title and description from a Pydantic field.""" translations = {} @@ -34,50 +32,147 @@ def get_field_translations(field_info: FieldInfo) -> Dict[str, str]: return translations -def process_model_fields(model: type[BaseModel]) -> Dict[str, Any]: +def extract_translations_from_schema( + schema: Dict[str, Any], defs: Dict[str, Any] = None +) -> Dict[str, Any]: """ - Recursively process a Pydantic model to extract translations. + Recursively extract translations (titles and descriptions) from a JSON schema. - Returns a nested dictionary structure matching the config schema, - with title and description for each field. + Returns a dictionary structure with label and description for each field, + and nested fields directly under their parent keys. """ + if defs is None: + defs = schema.get("$defs", {}) + translations = {} - model_fields = model.model_fields + # Add top-level title and description if present + if "title" in schema: + translations["label"] = schema["title"] + if "description" in schema: + translations["description"] = schema["description"] - for field_name, field_info in model_fields.items(): - field_translations = get_field_translations(field_info) + # Process nested properties + properties = schema.get("properties", {}) + for field_name, field_schema in properties.items(): + field_translations = {} - # Get the field's type annotation - field_type = field_info.annotation + # Handle $ref references + if "$ref" in field_schema: + ref_path = field_schema["$ref"] + if ref_path.startswith("#/$defs/"): + ref_name = ref_path.split("/")[-1] + if ref_name in defs: + ref_schema = defs[ref_name] + # Extract from the referenced schema + ref_translations = extract_translations_from_schema( + ref_schema, defs=defs + ) + # Use the $ref field's own title/description if present + if "title" in field_schema: + field_translations["label"] = field_schema["title"] + elif "label" in ref_translations: + field_translations["label"] = ref_translations["label"] + if "description" in field_schema: + field_translations["description"] = field_schema["description"] + elif "description" in ref_translations: + field_translations["description"] = ref_translations[ + "description" + ] + # Add nested properties from referenced schema + nested_without_root = { + k: v + for k, v in ref_translations.items() + if k not in ("label", "description") + } + field_translations.update(nested_without_root) + # Handle additionalProperties with $ref (for dict types) + elif "additionalProperties" in field_schema: + additional_props = field_schema["additionalProperties"] + # Extract title and description from the field itself + if "title" in field_schema: + field_translations["label"] = field_schema["title"] + if "description" in field_schema: + field_translations["description"] = field_schema["description"] - # Handle Optional types - origin = get_origin(field_type) + # If additionalProperties contains a $ref, extract nested translations + if "$ref" in additional_props: + ref_path = additional_props["$ref"] + if ref_path.startswith("#/$defs/"): + ref_name = ref_path.split("/")[-1] + if ref_name in defs: + ref_schema = defs[ref_name] + nested = extract_translations_from_schema(ref_schema, defs=defs) + nested_without_root = { + k: v + for k, v in nested.items() + if k not in ("label", "description") + } + field_translations.update(nested_without_root) + # Handle items with $ref (for array types) + elif "items" in field_schema: + items = field_schema["items"] + # Extract title and description from the field itself + if "title" in field_schema: + field_translations["label"] = field_schema["title"] + if "description" in field_schema: + field_translations["description"] = field_schema["description"] - if origin is Optional or ( - hasattr(origin, "__name__") and origin.__name__ == "UnionType" - ): - args = get_args(field_type) - field_type = next( - (arg for arg in args if arg is not type(None)), field_type - ) + # If items contains a $ref, extract nested translations + if "$ref" in items: + ref_path = items["$ref"] + if ref_path.startswith("#/$defs/"): + ref_name = ref_path.split("/")[-1] + if ref_name in defs: + ref_schema = defs[ref_name] + nested = extract_translations_from_schema(ref_schema, defs=defs) + nested_without_root = { + k: v + for k, v in nested.items() + if k not in ("label", "description") + } + field_translations.update(nested_without_root) + else: + # Extract title and description + if "title" in field_schema: + field_translations["label"] = field_schema["title"] + if "description" in field_schema: + field_translations["description"] = field_schema["description"] - # Handle Dict types (like Dict[str, CameraConfig]) - if get_origin(field_type) is dict: - dict_args = get_args(field_type) - - if len(dict_args) >= 2: - value_type = dict_args[1] - - if isinstance(value_type, type) and issubclass(value_type, BaseModel): - nested_translations = process_model_fields(value_type) - - if nested_translations: - field_translations["properties"] = nested_translations - elif isinstance(field_type, type) and issubclass(field_type, BaseModel): - nested_translations = process_model_fields(field_type) - if nested_translations: - field_translations["properties"] = nested_translations + # Recursively process nested properties + if "properties" in field_schema: + nested = extract_translations_from_schema(field_schema, defs=defs) + # Merge nested translations + nested_without_root = { + k: v for k, v in nested.items() if k not in ("label", "description") + } + field_translations.update(nested_without_root) + # Handle anyOf cases + elif "anyOf" in field_schema: + for item in field_schema["anyOf"]: + if "properties" in item: + nested = extract_translations_from_schema(item, defs=defs) + nested_without_root = { + k: v + for k, v in nested.items() + if k not in ("label", "description") + } + field_translations.update(nested_without_root) + elif "$ref" in item: + ref_path = item["$ref"] + if ref_path.startswith("#/$defs/"): + ref_name = ref_path.split("/")[-1] + if ref_name in defs: + ref_schema = defs[ref_name] + nested = extract_translations_from_schema( + ref_schema, defs=defs + ) + nested_without_root = { + k: v + for k, v in nested.items() + if k not in ("label", "description") + } + field_translations.update(nested_without_root) if field_translations: translations[field_name] = field_translations @@ -85,76 +180,350 @@ def process_model_fields(model: type[BaseModel]) -> Dict[str, Any]: return translations -def generate_section_translation( - section_name: str, field_info: FieldInfo -) -> Dict[str, Any]: +def generate_section_translation(config_class: type) -> Dict[str, Any]: """ - Generate translation structure for a top-level config section. + Generate translation structure for a config section using its JSON schema. """ - section_translations = get_field_translations(field_info) - field_type = field_info.annotation - origin = get_origin(field_type) + schema = config_class.model_json_schema() + return extract_translations_from_schema(schema) - if origin is Optional or ( - hasattr(origin, "__name__") and origin.__name__ == "UnionType" - ): - args = get_args(field_type) - field_type = next((arg for arg in args if arg is not type(None)), field_type) - # Handle Dict types (like detectors, cameras, camera_groups) - if get_origin(field_type) is dict: - dict_args = get_args(field_type) - if len(dict_args) >= 2: - value_type = dict_args[1] - if isinstance(value_type, type) and issubclass(value_type, BaseModel): - nested = process_model_fields(value_type) - if nested: - section_translations["properties"] = nested +def get_detector_translations( + config_schema: Dict[str, Any], +) -> tuple[Dict[str, Any], set[str]]: + """Build detector type translations with nested fields based on schema definitions.""" + defs = config_schema.get("$defs", {}) + detector_schema = defs.get("DetectorConfig", {}) + discriminator = detector_schema.get("discriminator", {}) + mapping = discriminator.get("mapping", {}) - # If the field itself is a BaseModel, process it - elif isinstance(field_type, type) and issubclass(field_type, BaseModel): - nested = process_model_fields(field_type) - if nested: - section_translations["properties"] = nested + type_translations: Dict[str, Any] = {} + nested_field_keys: set[str] = set() + for detector_type, ref in mapping.items(): + if not isinstance(ref, str): + continue - return section_translations + if not ref.startswith("#/$defs/"): + continue + + ref_name = ref.split("/")[-1] + ref_schema = defs.get(ref_name, {}) + if not ref_schema: + continue + + type_entry: Dict[str, str] = {} + title = ref_schema.get("title") + description = ref_schema.get("description") + if title: + type_entry["label"] = title + if description: + type_entry["description"] = description + + nested = extract_translations_from_schema(ref_schema, defs=defs) + nested_without_root = { + k: v for k, v in nested.items() if k not in ("label", "description") + } + if nested_without_root: + type_entry.update(nested_without_root) + nested_field_keys.update(nested_without_root.keys()) + + if type_entry: + type_translations[detector_type] = type_entry + + return type_translations, nested_field_keys def main(): """Main function to generate config translations.""" # Define output directory - output_dir = Path(__file__).parent / "web" / "public" / "locales" / "en" / "config" + if len(sys.argv) > 1: + output_dir = Path(sys.argv[1]) + else: + output_dir = ( + Path(__file__).parent / "web" / "public" / "locales" / "en" / "config" + ) logger.info(f"Output directory: {output_dir}") - # Clean and recreate the output directory - if output_dir.exists(): - logger.info(f"Removing existing directory: {output_dir}") - shutil.rmtree(output_dir) - - logger.info(f"Creating directory: {output_dir}") + # Ensure the output directory exists; do not delete existing files. output_dir.mkdir(parents=True, exist_ok=True) + logger.info( + f"Using output directory (existing files will be overwritten): {output_dir}" + ) config_fields = FrigateConfig.model_fields + config_schema = get_config_schema(FrigateConfig) logger.info(f"Found {len(config_fields)} top-level config sections") + global_translations = {} + for field_name, field_info in config_fields.items(): if field_name.startswith("_"): continue logger.info(f"Processing section: {field_name}") - section_data = generate_section_translation(field_name, field_info) + + # Get the field's type + field_type = field_info.annotation + from typing import Optional, Union + + origin = get_origin(field_type) + if ( + origin is Optional + or origin is Union + or ( + hasattr(origin, "__name__") + and origin.__name__ in ("UnionType", "Union") + ) + ): + args = get_args(field_type) + field_type = next( + (arg for arg in args if arg is not type(None)), field_type + ) + + # Handle Dict[str, SomeModel] - extract the value type + if origin is dict: + args = get_args(field_type) + if args and len(args) > 1: + field_type = args[1] # Get value type from Dict[key, value] + + # Start with field's top-level metadata (label, description) + section_data = get_field_translations(field_info) + + # Generate nested translations from the field type's schema + if hasattr(field_type, "model_json_schema"): + schema = field_type.model_json_schema() + # Extract nested properties from schema + nested = extract_translations_from_schema(schema) + # Remove top-level label/description from nested since we got those from field_info + nested_without_root = { + k: v for k, v in nested.items() if k not in ("label", "description") + } + section_data.update(nested_without_root) + + if field_name == "detectors": + detector_types, detector_field_keys = get_detector_translations( + config_schema + ) + section_data.update(detector_types) + for key in detector_field_keys: + if key == "type": + continue + section_data.pop(key, None) if not section_data: logger.warning(f"No translations found for section: {field_name}") continue - output_file = output_dir / f"{field_name}.json" - with open(output_file, "w", encoding="utf-8") as f: - json.dump(section_data, f, indent=2, ensure_ascii=False) + # Add camera-level fields to global config documentation if applicable + CAMERA_LEVEL_FIELDS = { + "birdseye": ( + "frigate.config.camera.birdseye", + "BirdseyeCameraConfig", + ["order"], + ), + "ffmpeg": ( + "frigate.config.camera.ffmpeg", + "CameraFfmpegConfig", + ["inputs"], + ), + "lpr": ( + "frigate.config.classification", + "CameraLicensePlateRecognitionConfig", + ["expire_time"], + ), + "semantic_search": ( + "frigate.config.classification", + "CameraSemanticSearchConfig", + ["triggers"], + ), + } - logger.info(f"Generated: {output_file}") + if field_name in CAMERA_LEVEL_FIELDS: + module_path, class_name, field_names = CAMERA_LEVEL_FIELDS[field_name] + try: + import importlib + + module = importlib.import_module(module_path) + camera_class = getattr(module, class_name) + schema = camera_class.model_json_schema() + camera_fields = schema.get("properties", {}) + defs = schema.get("$defs", {}) + + for fname in field_names: + if fname in camera_fields: + field_schema = camera_fields[fname] + field_trans = {} + if "title" in field_schema: + field_trans["label"] = field_schema["title"] + if "description" in field_schema: + field_trans["description"] = field_schema["description"] + + # Extract nested properties based on schema type + nested_to_extract = None + + # Handle direct $ref + if "$ref" in field_schema: + ref_path = field_schema["$ref"] + if ref_path.startswith("#/$defs/"): + ref_name = ref_path.split("/")[-1] + if ref_name in defs: + nested_to_extract = defs[ref_name] + + # Handle additionalProperties with $ref (for dict types) + elif "additionalProperties" in field_schema: + additional_props = field_schema["additionalProperties"] + if "$ref" in additional_props: + ref_path = additional_props["$ref"] + if ref_path.startswith("#/$defs/"): + ref_name = ref_path.split("/")[-1] + if ref_name in defs: + nested_to_extract = defs[ref_name] + + # Handle items with $ref (for array types) + elif "items" in field_schema: + items = field_schema["items"] + if "$ref" in items: + ref_path = items["$ref"] + if ref_path.startswith("#/$defs/"): + ref_name = ref_path.split("/")[-1] + if ref_name in defs: + nested_to_extract = defs[ref_name] + + # Extract nested properties if we found a schema to use + if nested_to_extract: + nested = extract_translations_from_schema( + nested_to_extract, defs=defs + ) + nested_without_root = { + k: v + for k, v in nested.items() + if k not in ("label", "description") + } + field_trans.update(nested_without_root) + + if field_trans: + section_data[fname] = field_trans + except Exception as e: + logger.warning( + f"Could not add camera-level fields for {field_name}: {e}" + ) + + # Add to global translations instead of writing separate files + global_translations[field_name] = section_data + + logger.info(f"Added section to global translations: {field_name}") + + # Handle camera-level configs that aren't top-level FrigateConfig fields + # These are defined as fields in CameraConfig, so we extract title/description from there + camera_level_configs = { + "camera_mqtt": ("frigate.config.camera.mqtt", "CameraMqttConfig", "mqtt"), + "camera_ui": ("frigate.config.camera.ui", "CameraUiConfig", "ui"), + "onvif": ("frigate.config.camera.onvif", "OnvifConfig", "onvif"), + } + + # Import CameraConfig to extract field metadata + from frigate.config.camera.camera import CameraConfig + + camera_config_schema = CameraConfig.model_json_schema() + camera_properties = camera_config_schema.get("properties", {}) + + for config_name, ( + module_path, + class_name, + camera_field_name, + ) in camera_level_configs.items(): + try: + logger.info(f"Processing camera-level section: {config_name}") + import importlib + + module = importlib.import_module(module_path) + config_class = getattr(module, class_name) + + section_data = {} + + # Extract top-level label and description from CameraConfig field definition + if camera_field_name in camera_properties: + field_schema = camera_properties[camera_field_name] + if "title" in field_schema: + section_data["label"] = field_schema["title"] + if "description" in field_schema: + section_data["description"] = field_schema["description"] + + # Process model fields from schema + schema = config_class.model_json_schema() + nested = extract_translations_from_schema(schema) + # Remove top-level label/description since we got those from CameraConfig + nested_without_root = { + k: v for k, v in nested.items() if k not in ("label", "description") + } + section_data.update(nested_without_root) + + # Add camera-level section into global translations (do not write separate file) + global_translations[config_name] = section_data + logger.info( + f"Added camera-level section to global translations: {config_name}" + ) + except Exception as e: + logger.error(f"Failed to generate {config_name}: {e}") + + # Remove top-level 'cameras' field if present so it remains a separate file + if "cameras" in global_translations: + logger.info( + "Removing top-level 'cameras' from global translations to keep it as a separate cameras.json" + ) + del global_translations["cameras"] + + # Write consolidated global.json with per-section keys + global_file = output_dir / "global.json" + with open(global_file, "w", encoding="utf-8") as f: + json.dump(global_translations, f, indent=2, ensure_ascii=False) + f.write("\n") + + logger.info(f"Generated consolidated translations: {global_file}") + + if not global_translations: + logger.warning("No global translations were generated!") + else: + logger.info(f"Global contains {len(global_translations)} sections") + + # Generate cameras.json from CameraConfig schema + cameras_file = output_dir / "cameras.json" + logger.info(f"Generating cameras.json: {cameras_file}") + try: + if "camera_config_schema" in locals(): + camera_schema = camera_config_schema + else: + from frigate.config.camera.camera import CameraConfig + + camera_schema = CameraConfig.model_json_schema() + + camera_translations = extract_translations_from_schema(camera_schema) + + # Change descriptions to use 'for this camera' for fields that are global + def sanitize_camera_descriptions(obj): + if isinstance(obj, dict): + for k, v in list(obj.items()): + if k == "description" and isinstance(v, str): + obj[k] = v.replace( + "for all cameras; can be overridden per-camera", + "for this camera", + ) + else: + sanitize_camera_descriptions(v) + elif isinstance(obj, list): + for item in obj: + sanitize_camera_descriptions(item) + + sanitize_camera_descriptions(camera_translations) + + with open(cameras_file, "w", encoding="utf-8") as f: + json.dump(camera_translations, f, indent=2, ensure_ascii=False) + f.write("\n") + logger.info(f"Generated cameras.json: {cameras_file}") + except Exception as e: + logger.error(f"Failed to generate cameras.json: {e}") logger.info("Translation generation complete!") diff --git a/web/package-lock.json b/web/package-lock.json index cfd5aa2c6..e0e36bc8a 100644 --- a/web/package-lock.json +++ b/web/package-lock.json @@ -32,6 +32,10 @@ "@radix-ui/react-toggle": "^1.1.2", "@radix-ui/react-toggle-group": "^1.1.2", "@radix-ui/react-tooltip": "^1.2.8", + "@rjsf/core": "^6.3.1", + "@rjsf/shadcn": "^6.3.1", + "@rjsf/utils": "^6.3.1", + "@rjsf/validator-ajv8": "^6.3.1", "apexcharts": "^3.52.0", "axios": "^1.7.7", "class-variance-authority": "^0.7.1", @@ -2085,6 +2089,15 @@ } } }, + "node_modules/@radix-ui/react-icons": { + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/@radix-ui/react-icons/-/react-icons-1.3.2.tgz", + "integrity": "sha512-fyQIhGDhzfc9pK2kH6Pl9c4BDJGfMkPqkyIgYDthyNYoNg3wVhoJMMh19WS4Up/1KMPFVpNsT2q3WmXn2N1m6g==", + "license": "MIT", + "peerDependencies": { + "react": "^16.x || ^17.x || ^18.x || ^19.0.0 || ^19.0.0-rc" + } + }, "node_modules/@radix-ui/react-id": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/@radix-ui/react-id/-/react-id-1.1.0.tgz", @@ -3292,6 +3305,15 @@ "integrity": "sha512-A9+lCBZoaMJlVKcRBz2YByCG+Cp2t6nAnMnNba+XiWxnj6r4JUFqfsgwocMBZU9LPtdxC6wB56ySYpc7LQIoJg==", "license": "MIT" }, + "node_modules/@react-icons/all-files": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/@react-icons/all-files/-/all-files-4.1.0.tgz", + "integrity": "sha512-hxBI2UOuVaI3O/BhQfhtb4kcGn9ft12RWAFVMUeNjqqhLsHvFtzIkFaptBJpFDANTKoDfdVoHTKZDlwKCACbMQ==", + "license": "MIT", + "peerDependencies": { + "react": "*" + } + }, "node_modules/@remix-run/router": { "version": "1.23.2", "resolved": "https://registry.npmjs.org/@remix-run/router/-/router-1.23.2.tgz", @@ -3301,6 +3323,1256 @@ "node": ">=14.0.0" } }, + "node_modules/@rjsf/core": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/@rjsf/core/-/core-6.3.1.tgz", + "integrity": "sha512-LTjFz5Fk3FlbgFPJ+OJi1JdWJyiap9dSpx8W6u7JHNB7K5VbwzJe8gIU45XWLHzWFGDHKPm89VrUzjOs07TPtg==", + "license": "Apache-2.0", + "peer": true, + "dependencies": { + "lodash": "^4.17.23", + "lodash-es": "^4.17.23", + "markdown-to-jsx": "^8.0.0", + "prop-types": "^15.8.1" + }, + "engines": { + "node": ">=20" + }, + "peerDependencies": { + "@rjsf/utils": "^6.3.x", + "react": ">=18" + } + }, + "node_modules/@rjsf/shadcn": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/@rjsf/shadcn/-/shadcn-6.3.1.tgz", + "integrity": "sha512-9v+BZ5ip2fdlYRYMPlkNzrhHhZmyrdConPLbHjLN+wVDTeIPZW8IjeV5C/diNqFpS3wm223vW5zOOE5eWuhi/g==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-checkbox": "^1.3.3", + "@radix-ui/react-dialog": "^1.1.15", + "@radix-ui/react-icons": "^1.3.2", + "@radix-ui/react-label": "^2.1.8", + "@radix-ui/react-popover": "^1.1.15", + "@radix-ui/react-radio-group": "^1.3.8", + "@radix-ui/react-select": "^2.2.6", + "@radix-ui/react-separator": "^1.1.8", + "@radix-ui/react-slider": "^1.3.6", + "@radix-ui/react-slot": "^1.2.0", + "@react-icons/all-files": "^4.1.0", + "class-variance-authority": "^0.7.1", + "clsx": "^2.1.1", + "cmdk": "^1.1.1", + "lodash": "^4.17.23", + "lodash-es": "^4.17.23", + "lucide-react": "^0.548.0", + "tailwind-merge": "^3.4.0", + "tailwindcss-animate": "^1.0.7", + "uuid": "^13.0.0" + }, + "engines": { + "node": ">=20" + }, + "peerDependencies": { + "@rjsf/core": "^6.3.x", + "@rjsf/utils": "^6.3.x", + "react": ">=18" + } + }, + "node_modules/@rjsf/shadcn/node_modules/@radix-ui/number": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/number/-/number-1.1.1.tgz", + "integrity": "sha512-MkKCwxlXTgz6CFoJx3pCwn07GKp36+aZyu/u2Ln2VrA5DcdyCZkASEDBTd8x5whTQQL5CiYf4prXKLcgQdv29g==", + "license": "MIT" + }, + "node_modules/@rjsf/shadcn/node_modules/@radix-ui/primitive": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/@radix-ui/primitive/-/primitive-1.1.3.tgz", + "integrity": "sha512-JTF99U/6XIjCBo0wqkU5sK10glYe27MRRsfwoiq5zzOEZLHU3A3KCMa5X/azekYRCJ0HlwI0crAXS/5dEHTzDg==", + "license": "MIT" + }, + "node_modules/@rjsf/shadcn/node_modules/@radix-ui/react-checkbox": { + "version": "1.3.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-checkbox/-/react-checkbox-1.3.3.tgz", + "integrity": "sha512-wBbpv+NQftHDdG86Qc0pIyXk5IR3tM8Vd0nWLKDcX8nNn4nXFOFwsKuqw2okA/1D/mpaAkmuyndrPJTYDNZtFw==", + "license": "MIT", + "dependencies": { + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-presence": "1.1.5", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-controllable-state": "1.2.2", + "@radix-ui/react-use-previous": "1.1.1", + "@radix-ui/react-use-size": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@rjsf/shadcn/node_modules/@radix-ui/react-checkbox/node_modules/@radix-ui/react-presence": { + "version": "1.1.5", + "resolved": "https://registry.npmjs.org/@radix-ui/react-presence/-/react-presence-1.1.5.tgz", + "integrity": "sha512-/jfEwNDdQVBCNvjkGit4h6pMOzq8bHkopq458dPt2lMjx+eBQUohZNG9A7DtO/O5ukSbxuaNGXMjHicgwy6rQQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-use-layout-effect": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@rjsf/shadcn/node_modules/@radix-ui/react-checkbox/node_modules/@radix-ui/react-primitive": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.3.tgz", + "integrity": "sha512-m9gTwRkhy2lvCPe6QJp4d3G1TYEUHn/FzJUtq9MjH46an1wJU+GdoGC5VLof8RX8Ft/DlpshApkhswDLZzHIcQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-slot": "1.2.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@rjsf/shadcn/node_modules/@radix-ui/react-compose-refs": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@radix-ui/react-compose-refs/-/react-compose-refs-1.1.2.tgz", + "integrity": "sha512-z4eqJvfiNnFMHIIvXP3CY57y2WJs5g2v3X0zm9mEJkrkNv4rDxu+sg9Jh8EkXyeqBkB7SOcboo9dMVqhyrACIg==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@rjsf/shadcn/node_modules/@radix-ui/react-context": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@radix-ui/react-context/-/react-context-1.1.2.tgz", + "integrity": "sha512-jCi/QKUM2r1Ju5a3J64TH2A5SpKAgh0LpknyqdQ4m6DCV0xJ2HG1xARRwNGPQfi1SLdLWZ1OJz6F4OMBBNiGJA==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@rjsf/shadcn/node_modules/@radix-ui/react-direction": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-direction/-/react-direction-1.1.1.tgz", + "integrity": "sha512-1UEWRX6jnOA2y4H5WczZ44gOOjTEmlqv1uNW4GAJEO5+bauCBhv8snY65Iw5/VOS/ghKN9gr2KjnLKxrsvoMVw==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@rjsf/shadcn/node_modules/@radix-ui/react-focus-guards": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-focus-guards/-/react-focus-guards-1.1.3.tgz", + "integrity": "sha512-0rFg/Rj2Q62NCm62jZw0QX7a3sz6QCQU0LpZdNrJX8byRGaGVTqbrW9jAoIAHyMQqsNpeZ81YgSizOt5WXq0Pw==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@rjsf/shadcn/node_modules/@radix-ui/react-id": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-id/-/react-id-1.1.1.tgz", + "integrity": "sha512-kGkGegYIdQsOb4XjsfM97rXsiHaBwco+hFI66oO4s9LU+PLAC5oJ7khdOVFxkhsmlbpUqDAvXw11CluXP+jkHg==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-use-layout-effect": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@rjsf/shadcn/node_modules/@radix-ui/react-label": { + "version": "2.1.8", + "resolved": "https://registry.npmjs.org/@radix-ui/react-label/-/react-label-2.1.8.tgz", + "integrity": "sha512-FmXs37I6hSBVDlO4y764TNz1rLgKwjJMQ0EGte6F3Cb3f4bIuHB/iLa/8I9VKkmOy+gNHq8rql3j686ACVV21A==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-primitive": "2.1.4" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@rjsf/shadcn/node_modules/@radix-ui/react-label/node_modules/@radix-ui/react-primitive": { + "version": "2.1.4", + "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.4.tgz", + "integrity": "sha512-9hQc4+GNVtJAIEPEqlYqW5RiYdrr8ea5XQ0ZOnD6fgru+83kqT15mq2OCcbe8KnjRZl5vF3ks69AKz3kh1jrhg==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-slot": "1.2.4" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@rjsf/shadcn/node_modules/@radix-ui/react-label/node_modules/@radix-ui/react-slot": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.2.4.tgz", + "integrity": "sha512-Jl+bCv8HxKnlTLVrcDE8zTMJ09R9/ukw4qBs/oZClOfoQk/cOTbDn+NceXfV7j09YPVQUryJPHurafcSg6EVKA==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@rjsf/shadcn/node_modules/@radix-ui/react-popover": { + "version": "1.1.15", + "resolved": "https://registry.npmjs.org/@radix-ui/react-popover/-/react-popover-1.1.15.tgz", + "integrity": "sha512-kr0X2+6Yy/vJzLYJUPCZEc8SfQcf+1COFoAqauJm74umQhta9M7lNJHP7QQS3vkvcGLQUbWpMzwrXYwrYztHKA==", + "license": "MIT", + "dependencies": { + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-dismissable-layer": "1.1.11", + "@radix-ui/react-focus-guards": "1.1.3", + "@radix-ui/react-focus-scope": "1.1.7", + "@radix-ui/react-id": "1.1.1", + "@radix-ui/react-popper": "1.2.8", + "@radix-ui/react-portal": "1.1.9", + "@radix-ui/react-presence": "1.1.5", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-slot": "1.2.3", + "@radix-ui/react-use-controllable-state": "1.2.2", + "aria-hidden": "^1.2.4", + "react-remove-scroll": "^2.6.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@rjsf/shadcn/node_modules/@radix-ui/react-popover/node_modules/@radix-ui/react-dismissable-layer": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/@radix-ui/react-dismissable-layer/-/react-dismissable-layer-1.1.11.tgz", + "integrity": "sha512-Nqcp+t5cTB8BinFkZgXiMJniQH0PsUt2k51FUhbdfeKvc4ACcG2uQniY/8+h1Yv6Kza4Q7lD7PQV0z0oicE0Mg==", + "license": "MIT", + "dependencies": { + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-callback-ref": "1.1.1", + "@radix-ui/react-use-escape-keydown": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@rjsf/shadcn/node_modules/@radix-ui/react-popover/node_modules/@radix-ui/react-focus-scope": { + "version": "1.1.7", + "resolved": "https://registry.npmjs.org/@radix-ui/react-focus-scope/-/react-focus-scope-1.1.7.tgz", + "integrity": "sha512-t2ODlkXBQyn7jkl6TNaw/MtVEVvIGelJDCG41Okq/KwUsJBwQ4XVZsHAVUkK4mBv3ewiAS3PGuUWuY2BoK4ZUw==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-callback-ref": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@rjsf/shadcn/node_modules/@radix-ui/react-popover/node_modules/@radix-ui/react-popper": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/@radix-ui/react-popper/-/react-popper-1.2.8.tgz", + "integrity": "sha512-0NJQ4LFFUuWkE7Oxf0htBKS6zLkkjBH+hM1uk7Ng705ReR8m/uelduy1DBo0PyBXPKVnBA6YBlU94MBGXrSBCw==", + "license": "MIT", + "dependencies": { + "@floating-ui/react-dom": "^2.0.0", + "@radix-ui/react-arrow": "1.1.7", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-callback-ref": "1.1.1", + "@radix-ui/react-use-layout-effect": "1.1.1", + "@radix-ui/react-use-rect": "1.1.1", + "@radix-ui/react-use-size": "1.1.1", + "@radix-ui/rect": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@rjsf/shadcn/node_modules/@radix-ui/react-popover/node_modules/@radix-ui/react-popper/node_modules/@radix-ui/react-arrow": { + "version": "1.1.7", + "resolved": "https://registry.npmjs.org/@radix-ui/react-arrow/-/react-arrow-1.1.7.tgz", + "integrity": "sha512-F+M1tLhO+mlQaOWspE8Wstg+z6PwxwRd8oQ8IXceWz92kfAmalTRf0EjrouQeo7QssEPfCn05B4Ihs1K9WQ/7w==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-primitive": "2.1.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@rjsf/shadcn/node_modules/@radix-ui/react-popover/node_modules/@radix-ui/react-portal": { + "version": "1.1.9", + "resolved": "https://registry.npmjs.org/@radix-ui/react-portal/-/react-portal-1.1.9.tgz", + "integrity": "sha512-bpIxvq03if6UNwXZ+HTK71JLh4APvnXntDc6XOX8UVq4XQOVl7lwok0AvIl+b8zgCw3fSaVTZMpAPPagXbKmHQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-layout-effect": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@rjsf/shadcn/node_modules/@radix-ui/react-popover/node_modules/@radix-ui/react-presence": { + "version": "1.1.5", + "resolved": "https://registry.npmjs.org/@radix-ui/react-presence/-/react-presence-1.1.5.tgz", + "integrity": "sha512-/jfEwNDdQVBCNvjkGit4h6pMOzq8bHkopq458dPt2lMjx+eBQUohZNG9A7DtO/O5ukSbxuaNGXMjHicgwy6rQQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-use-layout-effect": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@rjsf/shadcn/node_modules/@radix-ui/react-popover/node_modules/@radix-ui/react-primitive": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.3.tgz", + "integrity": "sha512-m9gTwRkhy2lvCPe6QJp4d3G1TYEUHn/FzJUtq9MjH46an1wJU+GdoGC5VLof8RX8Ft/DlpshApkhswDLZzHIcQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-slot": "1.2.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@rjsf/shadcn/node_modules/@radix-ui/react-radio-group": { + "version": "1.3.8", + "resolved": "https://registry.npmjs.org/@radix-ui/react-radio-group/-/react-radio-group-1.3.8.tgz", + "integrity": "sha512-VBKYIYImA5zsxACdisNQ3BjCBfmbGH3kQlnFVqlWU4tXwjy7cGX8ta80BcrO+WJXIn5iBylEH3K6ZTlee//lgQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-direction": "1.1.1", + "@radix-ui/react-presence": "1.1.5", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-roving-focus": "1.1.11", + "@radix-ui/react-use-controllable-state": "1.2.2", + "@radix-ui/react-use-previous": "1.1.1", + "@radix-ui/react-use-size": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@rjsf/shadcn/node_modules/@radix-ui/react-radio-group/node_modules/@radix-ui/react-presence": { + "version": "1.1.5", + "resolved": "https://registry.npmjs.org/@radix-ui/react-presence/-/react-presence-1.1.5.tgz", + "integrity": "sha512-/jfEwNDdQVBCNvjkGit4h6pMOzq8bHkopq458dPt2lMjx+eBQUohZNG9A7DtO/O5ukSbxuaNGXMjHicgwy6rQQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-use-layout-effect": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@rjsf/shadcn/node_modules/@radix-ui/react-radio-group/node_modules/@radix-ui/react-primitive": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.3.tgz", + "integrity": "sha512-m9gTwRkhy2lvCPe6QJp4d3G1TYEUHn/FzJUtq9MjH46an1wJU+GdoGC5VLof8RX8Ft/DlpshApkhswDLZzHIcQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-slot": "1.2.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@rjsf/shadcn/node_modules/@radix-ui/react-radio-group/node_modules/@radix-ui/react-roving-focus": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/@radix-ui/react-roving-focus/-/react-roving-focus-1.1.11.tgz", + "integrity": "sha512-7A6S9jSgm/S+7MdtNDSb+IU859vQqJ/QAtcYQcfFC6W8RS4IxIZDldLR0xqCFZ6DCyrQLjLPsxtTNch5jVA4lA==", + "license": "MIT", + "dependencies": { + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-collection": "1.1.7", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-direction": "1.1.1", + "@radix-ui/react-id": "1.1.1", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-callback-ref": "1.1.1", + "@radix-ui/react-use-controllable-state": "1.2.2" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@rjsf/shadcn/node_modules/@radix-ui/react-radio-group/node_modules/@radix-ui/react-roving-focus/node_modules/@radix-ui/react-collection": { + "version": "1.1.7", + "resolved": "https://registry.npmjs.org/@radix-ui/react-collection/-/react-collection-1.1.7.tgz", + "integrity": "sha512-Fh9rGN0MoI4ZFUNyfFVNU4y9LUz93u9/0K+yLgA2bwRojxM8JU1DyvvMBabnZPBgMWREAJvU2jjVzq+LrFUglw==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-slot": "1.2.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@rjsf/shadcn/node_modules/@radix-ui/react-select": { + "version": "2.2.6", + "resolved": "https://registry.npmjs.org/@radix-ui/react-select/-/react-select-2.2.6.tgz", + "integrity": "sha512-I30RydO+bnn2PQztvo25tswPH+wFBjehVGtmagkU78yMdwTwVf12wnAOF+AeP8S2N8xD+5UPbGhkUfPyvT+mwQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/number": "1.1.1", + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-collection": "1.1.7", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-direction": "1.1.1", + "@radix-ui/react-dismissable-layer": "1.1.11", + "@radix-ui/react-focus-guards": "1.1.3", + "@radix-ui/react-focus-scope": "1.1.7", + "@radix-ui/react-id": "1.1.1", + "@radix-ui/react-popper": "1.2.8", + "@radix-ui/react-portal": "1.1.9", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-slot": "1.2.3", + "@radix-ui/react-use-callback-ref": "1.1.1", + "@radix-ui/react-use-controllable-state": "1.2.2", + "@radix-ui/react-use-layout-effect": "1.1.1", + "@radix-ui/react-use-previous": "1.1.1", + "@radix-ui/react-visually-hidden": "1.2.3", + "aria-hidden": "^1.2.4", + "react-remove-scroll": "^2.6.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@rjsf/shadcn/node_modules/@radix-ui/react-select/node_modules/@radix-ui/react-collection": { + "version": "1.1.7", + "resolved": "https://registry.npmjs.org/@radix-ui/react-collection/-/react-collection-1.1.7.tgz", + "integrity": "sha512-Fh9rGN0MoI4ZFUNyfFVNU4y9LUz93u9/0K+yLgA2bwRojxM8JU1DyvvMBabnZPBgMWREAJvU2jjVzq+LrFUglw==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-slot": "1.2.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@rjsf/shadcn/node_modules/@radix-ui/react-select/node_modules/@radix-ui/react-dismissable-layer": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/@radix-ui/react-dismissable-layer/-/react-dismissable-layer-1.1.11.tgz", + "integrity": "sha512-Nqcp+t5cTB8BinFkZgXiMJniQH0PsUt2k51FUhbdfeKvc4ACcG2uQniY/8+h1Yv6Kza4Q7lD7PQV0z0oicE0Mg==", + "license": "MIT", + "dependencies": { + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-callback-ref": "1.1.1", + "@radix-ui/react-use-escape-keydown": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@rjsf/shadcn/node_modules/@radix-ui/react-select/node_modules/@radix-ui/react-focus-scope": { + "version": "1.1.7", + "resolved": "https://registry.npmjs.org/@radix-ui/react-focus-scope/-/react-focus-scope-1.1.7.tgz", + "integrity": "sha512-t2ODlkXBQyn7jkl6TNaw/MtVEVvIGelJDCG41Okq/KwUsJBwQ4XVZsHAVUkK4mBv3ewiAS3PGuUWuY2BoK4ZUw==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-callback-ref": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@rjsf/shadcn/node_modules/@radix-ui/react-select/node_modules/@radix-ui/react-popper": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/@radix-ui/react-popper/-/react-popper-1.2.8.tgz", + "integrity": "sha512-0NJQ4LFFUuWkE7Oxf0htBKS6zLkkjBH+hM1uk7Ng705ReR8m/uelduy1DBo0PyBXPKVnBA6YBlU94MBGXrSBCw==", + "license": "MIT", + "dependencies": { + "@floating-ui/react-dom": "^2.0.0", + "@radix-ui/react-arrow": "1.1.7", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-callback-ref": "1.1.1", + "@radix-ui/react-use-layout-effect": "1.1.1", + "@radix-ui/react-use-rect": "1.1.1", + "@radix-ui/react-use-size": "1.1.1", + "@radix-ui/rect": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@rjsf/shadcn/node_modules/@radix-ui/react-select/node_modules/@radix-ui/react-popper/node_modules/@radix-ui/react-arrow": { + "version": "1.1.7", + "resolved": "https://registry.npmjs.org/@radix-ui/react-arrow/-/react-arrow-1.1.7.tgz", + "integrity": "sha512-F+M1tLhO+mlQaOWspE8Wstg+z6PwxwRd8oQ8IXceWz92kfAmalTRf0EjrouQeo7QssEPfCn05B4Ihs1K9WQ/7w==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-primitive": "2.1.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@rjsf/shadcn/node_modules/@radix-ui/react-select/node_modules/@radix-ui/react-portal": { + "version": "1.1.9", + "resolved": "https://registry.npmjs.org/@radix-ui/react-portal/-/react-portal-1.1.9.tgz", + "integrity": "sha512-bpIxvq03if6UNwXZ+HTK71JLh4APvnXntDc6XOX8UVq4XQOVl7lwok0AvIl+b8zgCw3fSaVTZMpAPPagXbKmHQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-layout-effect": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@rjsf/shadcn/node_modules/@radix-ui/react-select/node_modules/@radix-ui/react-primitive": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.3.tgz", + "integrity": "sha512-m9gTwRkhy2lvCPe6QJp4d3G1TYEUHn/FzJUtq9MjH46an1wJU+GdoGC5VLof8RX8Ft/DlpshApkhswDLZzHIcQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-slot": "1.2.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@rjsf/shadcn/node_modules/@radix-ui/react-select/node_modules/@radix-ui/react-visually-hidden": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-visually-hidden/-/react-visually-hidden-1.2.3.tgz", + "integrity": "sha512-pzJq12tEaaIhqjbzpCuv/OypJY/BPavOofm+dbab+MHLajy277+1lLm6JFcGgF5eskJ6mquGirhXY2GD/8u8Ug==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-primitive": "2.1.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@rjsf/shadcn/node_modules/@radix-ui/react-separator": { + "version": "1.1.8", + "resolved": "https://registry.npmjs.org/@radix-ui/react-separator/-/react-separator-1.1.8.tgz", + "integrity": "sha512-sDvqVY4itsKwwSMEe0jtKgfTh+72Sy3gPmQpjqcQneqQ4PFmr/1I0YA+2/puilhggCe2gJcx5EBAYFkWkdpa5g==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-primitive": "2.1.4" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@rjsf/shadcn/node_modules/@radix-ui/react-separator/node_modules/@radix-ui/react-primitive": { + "version": "2.1.4", + "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.4.tgz", + "integrity": "sha512-9hQc4+GNVtJAIEPEqlYqW5RiYdrr8ea5XQ0ZOnD6fgru+83kqT15mq2OCcbe8KnjRZl5vF3ks69AKz3kh1jrhg==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-slot": "1.2.4" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@rjsf/shadcn/node_modules/@radix-ui/react-separator/node_modules/@radix-ui/react-slot": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.2.4.tgz", + "integrity": "sha512-Jl+bCv8HxKnlTLVrcDE8zTMJ09R9/ukw4qBs/oZClOfoQk/cOTbDn+NceXfV7j09YPVQUryJPHurafcSg6EVKA==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@rjsf/shadcn/node_modules/@radix-ui/react-slider": { + "version": "1.3.6", + "resolved": "https://registry.npmjs.org/@radix-ui/react-slider/-/react-slider-1.3.6.tgz", + "integrity": "sha512-JPYb1GuM1bxfjMRlNLE+BcmBC8onfCi60Blk7OBqi2MLTFdS+8401U4uFjnwkOr49BLmXxLC6JHkvAsx5OJvHw==", + "license": "MIT", + "dependencies": { + "@radix-ui/number": "1.1.1", + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-collection": "1.1.7", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-direction": "1.1.1", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-controllable-state": "1.2.2", + "@radix-ui/react-use-layout-effect": "1.1.1", + "@radix-ui/react-use-previous": "1.1.1", + "@radix-ui/react-use-size": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@rjsf/shadcn/node_modules/@radix-ui/react-slider/node_modules/@radix-ui/react-collection": { + "version": "1.1.7", + "resolved": "https://registry.npmjs.org/@radix-ui/react-collection/-/react-collection-1.1.7.tgz", + "integrity": "sha512-Fh9rGN0MoI4ZFUNyfFVNU4y9LUz93u9/0K+yLgA2bwRojxM8JU1DyvvMBabnZPBgMWREAJvU2jjVzq+LrFUglw==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-slot": "1.2.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@rjsf/shadcn/node_modules/@radix-ui/react-slider/node_modules/@radix-ui/react-primitive": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.3.tgz", + "integrity": "sha512-m9gTwRkhy2lvCPe6QJp4d3G1TYEUHn/FzJUtq9MjH46an1wJU+GdoGC5VLof8RX8Ft/DlpshApkhswDLZzHIcQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-slot": "1.2.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@rjsf/shadcn/node_modules/@radix-ui/react-use-callback-ref": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-callback-ref/-/react-use-callback-ref-1.1.1.tgz", + "integrity": "sha512-FkBMwD+qbGQeMu1cOHnuGB6x4yzPjho8ap5WtbEJ26umhgqVXbhekKUQO+hZEL1vU92a3wHwdp0HAcqAUF5iDg==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@rjsf/shadcn/node_modules/@radix-ui/react-use-controllable-state": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-controllable-state/-/react-use-controllable-state-1.2.2.tgz", + "integrity": "sha512-BjasUjixPFdS+NKkypcyyN5Pmg83Olst0+c6vGov0diwTEo6mgdqVR6hxcEgFuh4QrAs7Rc+9KuGJ9TVCj0Zzg==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-use-effect-event": "0.0.2", + "@radix-ui/react-use-layout-effect": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@rjsf/shadcn/node_modules/@radix-ui/react-use-escape-keydown": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-escape-keydown/-/react-use-escape-keydown-1.1.1.tgz", + "integrity": "sha512-Il0+boE7w/XebUHyBjroE+DbByORGR9KKmITzbR7MyQ4akpORYP/ZmbhAr0DG7RmmBqoOnZdy2QlvajJ2QA59g==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-use-callback-ref": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@rjsf/shadcn/node_modules/@radix-ui/react-use-layout-effect": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-layout-effect/-/react-use-layout-effect-1.1.1.tgz", + "integrity": "sha512-RbJRS4UWQFkzHTTwVymMTUv8EqYhOp8dOOviLj2ugtTiXRaRQS7GLGxZTLL1jWhMeoSCf5zmcZkqTl9IiYfXcQ==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@rjsf/shadcn/node_modules/@radix-ui/react-use-previous": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-previous/-/react-use-previous-1.1.1.tgz", + "integrity": "sha512-2dHfToCj/pzca2Ck724OZ5L0EVrr3eHRNsG/b3xQJLA2hZpVCS99bLAX+hm1IHXDEnzU6by5z/5MIY794/a8NQ==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@rjsf/shadcn/node_modules/@radix-ui/react-use-rect": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-rect/-/react-use-rect-1.1.1.tgz", + "integrity": "sha512-QTYuDesS0VtuHNNvMh+CjlKJ4LJickCMUAqjlE3+j8w+RlRpwyX3apEQKGFzbZGdo7XNG1tXa+bQqIE7HIXT2w==", + "license": "MIT", + "dependencies": { + "@radix-ui/rect": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@rjsf/shadcn/node_modules/@radix-ui/react-use-size": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-size/-/react-use-size-1.1.1.tgz", + "integrity": "sha512-ewrXRDTAqAXlkl6t/fkXWNAhFX9I+CkKlw6zjEwk86RSPKwZr3xpBRso655aqYafwtnbpHLj6toFzmd6xdVptQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-use-layout-effect": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@rjsf/shadcn/node_modules/@radix-ui/rect": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/rect/-/rect-1.1.1.tgz", + "integrity": "sha512-HPwpGIzkl28mWyZqG52jiqDJ12waP11Pa1lGoiyUkIEuMLBP0oeK/C89esbXrxsky5we7dfd8U58nm0SgAWpVw==", + "license": "MIT" + }, + "node_modules/@rjsf/shadcn/node_modules/cmdk": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/cmdk/-/cmdk-1.1.1.tgz", + "integrity": "sha512-Vsv7kFaXm+ptHDMZ7izaRsP70GgrW9NBNGswt9OZaVBLlE0SNpDq8eu/VGXyF9r7M0azK3Wy7OlYXsuyYLFzHg==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "^1.1.1", + "@radix-ui/react-dialog": "^1.1.6", + "@radix-ui/react-id": "^1.1.0", + "@radix-ui/react-primitive": "^2.0.2" + }, + "peerDependencies": { + "react": "^18 || ^19 || ^19.0.0-rc", + "react-dom": "^18 || ^19 || ^19.0.0-rc" + } + }, + "node_modules/@rjsf/shadcn/node_modules/lucide-react": { + "version": "0.548.0", + "resolved": "https://registry.npmjs.org/lucide-react/-/lucide-react-0.548.0.tgz", + "integrity": "sha512-63b16z63jM9yc1MwxajHeuu0FRZFsDtljtDjYm26Kd86UQ5HQzu9ksEtoUUw4RBuewodw/tGFmvipePvRsKeDA==", + "license": "ISC", + "peerDependencies": { + "react": "^16.5.1 || ^17.0.0 || ^18.0.0 || ^19.0.0" + } + }, + "node_modules/@rjsf/shadcn/node_modules/tailwind-merge": { + "version": "3.4.0", + "resolved": "https://registry.npmjs.org/tailwind-merge/-/tailwind-merge-3.4.0.tgz", + "integrity": "sha512-uSaO4gnW+b3Y2aWoWfFpX62vn2sR3skfhbjsEnaBI81WD1wBLlHZe5sWf0AqjksNdYTbGBEd0UasQMT3SNV15g==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/dcastil" + } + }, + "node_modules/@rjsf/utils": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/@rjsf/utils/-/utils-6.3.1.tgz", + "integrity": "sha512-ve2KHl1ITYG8QIonnuK83/T1k/5NuxP4D1egVqP9Hz2ub28kgl0rNMwmRSxXs3WIbCcMW9g3ox+daVrbSNc4Mw==", + "license": "Apache-2.0", + "peer": true, + "dependencies": { + "@x0k/json-schema-merge": "^1.0.2", + "fast-uri": "^3.1.0", + "jsonpointer": "^5.0.1", + "lodash": "^4.17.23", + "lodash-es": "^4.17.23", + "react-is": "^18.3.1" + }, + "engines": { + "node": ">=20" + }, + "peerDependencies": { + "react": ">=18" + } + }, + "node_modules/@rjsf/validator-ajv8": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/@rjsf/validator-ajv8/-/validator-ajv8-6.3.1.tgz", + "integrity": "sha512-2RHDxBc0gBplPniau5UZj7aznpTelSBm1b3DNybok8L0NuIfmndbp9kNXgFuRvlyfsQSyYmZSBjbzeYqr0Hpcw==", + "license": "Apache-2.0", + "dependencies": { + "ajv": "^8.17.1", + "ajv-formats": "^2.1.1", + "lodash": "^4.17.23", + "lodash-es": "^4.17.23" + }, + "engines": { + "node": ">=20" + }, + "peerDependencies": { + "@rjsf/utils": "^6.3.x" + } + }, + "node_modules/@rjsf/validator-ajv8/node_modules/ajv": { + "version": "8.17.1", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.17.1.tgz", + "integrity": "sha512-B/gBuNg5SiMTrPkC+A2+cW0RszwxYmn6VYxB/inlBStS5nx6xHIt/ehKRhIMhqusl7a8LjQoZnjCs5vhwxOQ1g==", + "license": "MIT", + "dependencies": { + "fast-deep-equal": "^3.1.3", + "fast-uri": "^3.0.1", + "json-schema-traverse": "^1.0.0", + "require-from-string": "^2.0.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/@rjsf/validator-ajv8/node_modules/json-schema-traverse": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", + "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==", + "license": "MIT" + }, "node_modules/@rollup/rollup-android-arm-eabi": { "version": "4.34.9", "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.34.9.tgz", @@ -3859,6 +5131,12 @@ "dev": true, "license": "MIT" }, + "node_modules/@types/json-schema": { + "version": "7.0.15", + "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.15.tgz", + "integrity": "sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA==", + "license": "MIT" + }, "node_modules/@types/lodash": { "version": "4.17.12", "resolved": "https://registry.npmjs.org/@types/lodash/-/lodash-4.17.12.tgz", @@ -3872,6 +5150,7 @@ "integrity": "sha512-MdiXf+nDuMvY0gJKxyfZ7/6UFsETO7mGKF54MVD/ekJS6HdFtpZFBgrh6Pseu64XTb2MLyFPlbW6hj8HYRQNOQ==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "undici-types": "~5.26.4" } @@ -3886,6 +5165,7 @@ "resolved": "https://registry.npmjs.org/@types/react/-/react-18.3.3.tgz", "integrity": "sha512-hti/R0pS0q1/xx+TsI73XIqk26eBsISZ2R0wUijXIngRK9R/e7Xw/cXVxQK7R5JjW+SV4zGcn5hXjudkN/pLIw==", "license": "MIT", + "peer": true, "dependencies": { "@types/prop-types": "*", "csstype": "^3.0.2" @@ -3896,6 +5176,7 @@ "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-18.3.0.tgz", "integrity": "sha512-EhwApuTmMBmXuFOikhQLIBUn6uFg81SwLMOAUgodJF14SOBOCMdU04gDoYi0WOJJHD144TL32z4yDqCW3dnkQg==", "devOptional": true, + "peer": true, "dependencies": { "@types/react": "*" } @@ -4046,6 +5327,7 @@ "integrity": "sha512-dm/J2UDY3oV3TKius2OUZIFHsomQmpHtsV0FTh1WO8EKgHLQ1QCADUqscPgTpU+ih1e21FQSRjXckHn3txn6kQ==", "dev": true, "license": "BSD-2-Clause", + "peer": true, "dependencies": { "@typescript-eslint/scope-manager": "7.12.0", "@typescript-eslint/types": "7.12.0", @@ -4313,6 +5595,15 @@ "url": "https://opencollective.com/vitest" } }, + "node_modules/@x0k/json-schema-merge": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/@x0k/json-schema-merge/-/json-schema-merge-1.0.2.tgz", + "integrity": "sha512-1734qiJHNX3+cJGDMMw2yz7R+7kpbAtl5NdPs1c/0gO5kYT6s4dMbLXiIfpZNsOYhGZI3aH7FWrj4Zxz7epXNg==", + "license": "MIT", + "dependencies": { + "@types/json-schema": "^7.0.15" + } + }, "node_modules/@yr/monotone-cubic-spline": { "version": "1.0.3", "resolved": "https://registry.npmjs.org/@yr/monotone-cubic-spline/-/monotone-cubic-spline-1.0.3.tgz", @@ -4323,6 +5614,7 @@ "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.11.3.tgz", "integrity": "sha512-Y9rRfJG5jcKOE0CLisYbojUjIrIEE7AGMzA/Sm4BslANhbS+cDMpgBdcPT91oJ7OuJ9hYJBx59RjbhxVnrF8Xg==", "dev": true, + "peer": true, "bin": { "acorn": "bin/acorn" }, @@ -4370,6 +5662,45 @@ "url": "https://github.com/sponsors/epoberezkin" } }, + "node_modules/ajv-formats": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/ajv-formats/-/ajv-formats-2.1.1.tgz", + "integrity": "sha512-Wx0Kx52hxE7C18hkMEggYlEifqWZtYaRgouJor+WMdPnQyEK13vgEWyVNup7SoeeoLMsr4kf5h6dOW11I15MUA==", + "license": "MIT", + "dependencies": { + "ajv": "^8.0.0" + }, + "peerDependencies": { + "ajv": "^8.0.0" + }, + "peerDependenciesMeta": { + "ajv": { + "optional": true + } + } + }, + "node_modules/ajv-formats/node_modules/ajv": { + "version": "8.17.1", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.17.1.tgz", + "integrity": "sha512-B/gBuNg5SiMTrPkC+A2+cW0RszwxYmn6VYxB/inlBStS5nx6xHIt/ehKRhIMhqusl7a8LjQoZnjCs5vhwxOQ1g==", + "license": "MIT", + "dependencies": { + "fast-deep-equal": "^3.1.3", + "fast-uri": "^3.0.1", + "json-schema-traverse": "^1.0.0", + "require-from-string": "^2.0.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/ajv-formats/node_modules/json-schema-traverse": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", + "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==", + "license": "MIT" + }, "node_modules/ansi-escapes": { "version": "4.3.2", "resolved": "https://registry.npmjs.org/ansi-escapes/-/ansi-escapes-4.3.2.tgz", @@ -4445,6 +5776,7 @@ "resolved": "https://registry.npmjs.org/apexcharts/-/apexcharts-3.52.0.tgz", "integrity": "sha512-7dg0ADKs8AA89iYMZMe2sFDG0XK5PfqllKV9N+i3hKHm3vEtdhwz8AlXGm+/b0nJ6jKiaXsqci5LfVxNhtB+dA==", "license": "MIT", + "peer": true, "dependencies": { "@yr/monotone-cubic-spline": "^1.0.3", "svg.draggable.js": "^2.2.2", @@ -4645,6 +5977,7 @@ } ], "license": "MIT", + "peer": true, "dependencies": { "caniuse-lite": "^1.0.30001646", "electron-to-chromium": "^1.5.4", @@ -5394,6 +6727,7 @@ "resolved": "https://registry.npmjs.org/date-fns/-/date-fns-3.6.0.tgz", "integrity": "sha512-fRHTG8g/Gif+kSh50gaGEdToemgfj74aRX3swtiouboip5JDLAyDE9F11nHMIcvOaXeOC6D7SpNhi7uFyB7Uww==", "license": "MIT", + "peer": true, "funding": { "type": "github", "url": "https://github.com/sponsors/kossnocorp" @@ -5664,7 +6998,8 @@ "version": "8.2.0", "resolved": "https://registry.npmjs.org/embla-carousel/-/embla-carousel-8.2.0.tgz", "integrity": "sha512-rf2GIX8rab9E6ZZN0Uhz05746qu2KrDje9IfFyHzjwxLwhvGjUt6y9+uaY1Sf+B0OPSa3sgas7BE2hWZCtopTA==", - "license": "MIT" + "license": "MIT", + "peer": true }, "node_modules/embla-carousel-react": { "version": "8.2.0", @@ -5827,6 +7162,7 @@ "integrity": "sha512-dZ6+mexnaTIbSBZWgou51U6OmzIhYM2VcNdtiTtI7qPNZm35Akpr0f6vtw3w1Kmn5PYo+tZVfh13WrhpS6oLqQ==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@eslint-community/eslint-utils": "^4.2.0", "@eslint-community/regexpp": "^4.6.1", @@ -5882,6 +7218,7 @@ "resolved": "https://registry.npmjs.org/eslint-config-prettier/-/eslint-config-prettier-9.1.0.tgz", "integrity": "sha512-NSWl5BFQWEPi1j4TjVNItzYV7dZXZ+wP6I6ZhrBGpChQhZRUaElihE9uRRkcbRnNb76UMKDF3r+WTmNcGPKsqw==", "dev": true, + "peer": true, "bin": { "eslint-config-prettier": "bin/cli.js" }, @@ -6121,7 +7458,6 @@ "version": "3.1.3", "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==", - "dev": true, "license": "MIT" }, "node_modules/fast-diff": { @@ -6175,6 +7511,22 @@ "integrity": "sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==", "dev": true }, + "node_modules/fast-uri": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/fast-uri/-/fast-uri-3.1.0.tgz", + "integrity": "sha512-iPeeDKJSWf4IEOasVVrknXpaBV0IApz/gp7S2bb7Z4Lljbl2MGJRqInZiUrQwV16cpzw/D3S5j5Julj/gT52AA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/fastify" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/fastify" + } + ], + "license": "BSD-3-Clause" + }, "node_modules/fastq": { "version": "1.15.0", "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.15.0.tgz", @@ -6672,6 +8024,7 @@ } ], "license": "MIT", + "peer": true, "dependencies": { "@babel/runtime": "^7.23.2" }, @@ -7138,6 +8491,15 @@ "resolved": "https://registry.npmjs.org/jsonc-parser/-/jsonc-parser-3.2.0.tgz", "integrity": "sha512-gfFQZrcTc8CnKXp6Y4/CBT3fTc0OVuDofpre4aEeEpSBPV5X5v4+Vmx+8snU7RLPrNHPKSgLxGo9YuQzz20o+w==" }, + "node_modules/jsonpointer": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/jsonpointer/-/jsonpointer-5.0.1.tgz", + "integrity": "sha512-p/nXbhSEcu3pZRdkW1OfJhpsVtW1gd4Wa1fnQc9YLiTfAjn0312eMKimbdIQzuZl9aa9xUGaRlP9T/CJE/ditQ==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, "node_modules/keyv": { "version": "4.5.4", "resolved": "https://registry.npmjs.org/keyv/-/keyv-4.5.4.tgz", @@ -7166,7 +8528,8 @@ "url": "https://github.com/sponsors/lavrton" } ], - "license": "MIT" + "license": "MIT", + "peer": true }, "node_modules/levn": { "version": "0.4.1", @@ -7215,6 +8578,12 @@ "integrity": "sha512-LgVTMpQtIopCi79SJeDiP0TfWi5CNEc/L/aRdTh3yIvmZXTnheWpKjSZhnvMl8iXbC1tFg9gdHHDMLoV7CnG+w==", "license": "MIT" }, + "node_modules/lodash-es": { + "version": "4.17.23", + "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.23.tgz", + "integrity": "sha512-LgVTMpQtIopCi79SJeDiP0TfWi5CNEc/L/aRdTh3yIvmZXTnheWpKjSZhnvMl8iXbC1tFg9gdHHDMLoV7CnG+w==", + "license": "MIT" + }, "node_modules/lodash.merge": { "version": "4.6.2", "resolved": "https://registry.npmjs.org/lodash.merge/-/lodash.merge-4.6.2.tgz", @@ -8002,6 +9371,7 @@ } ], "license": "MIT", + "peer": true, "dependencies": { "nanoid": "^3.3.8", "picocolors": "^1.1.1", @@ -8136,6 +9506,7 @@ "resolved": "https://registry.npmjs.org/prettier/-/prettier-3.3.3.tgz", "integrity": "sha512-i2tDNA0O5IrMO757lfrdQZCc2jPNDVntV0m/+4whiDfWaTKfMNgR7Qz0NAeGz/nRqF4m5/6CLzbP4/liHt12Ew==", "license": "MIT", + "peer": true, "bin": { "prettier": "bin/prettier.cjs" }, @@ -8330,6 +9701,7 @@ "version": "18.3.1", "resolved": "https://registry.npmjs.org/react/-/react-18.3.1.tgz", "integrity": "sha512-wS+hAgJShR0KhEvPJArfuPVN1+Hz1t0Y6n5jLrGQbkb4urgPE/0Rve+1kMB1v/oWgHgm4WIcV+i7F2pTVj+2iQ==", + "peer": true, "dependencies": { "loose-envify": "^1.1.0" }, @@ -8396,6 +9768,7 @@ "version": "18.3.1", "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-18.3.1.tgz", "integrity": "sha512-5m4nQKp+rZRb09LNH59GM4BxTh9251/ylbKIbpe7TpGxfJ+9kv6BLkLBXIjjspbgbnIBNqlI23tRnTWT0snUIw==", + "peer": true, "dependencies": { "loose-envify": "^1.1.0", "scheduler": "^0.23.2" @@ -8465,6 +9838,7 @@ "resolved": "https://registry.npmjs.org/react-hook-form/-/react-hook-form-7.52.1.tgz", "integrity": "sha512-uNKIhaoICJ5KQALYZ4TOaOLElyM+xipord+Ha3crEFhTntdLvWZqVY49Wqd/0GiVCA/f9NjemLeiNPjG7Hpurg==", "license": "MIT", + "peer": true, "engines": { "node": ">=12.22.0" }, @@ -8508,10 +9882,10 @@ } }, "node_modules/react-is": { - "version": "18.2.0", - "resolved": "https://registry.npmjs.org/react-is/-/react-is-18.2.0.tgz", - "integrity": "sha512-xWGDIW6x921xtzPkhiULtthJHoJvBbF3q26fzloPCK0hsvxtPVelvftw3zjbHWSkR2km9Z+4uxbDDK/6Zw9B8w==", - "dev": true + "version": "18.3.1", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-18.3.1.tgz", + "integrity": "sha512-/LLMVyas0ljjAtoYiPqYiL8VWXzUUdThrmU5+n20DZv+a+ClRoevUzw5JxU+Ieh5/c87ytoTBV9G1FiKfNJdmg==", + "license": "MIT" }, "node_modules/react-konva": { "version": "18.2.10", @@ -8806,6 +10180,15 @@ "node": ">=0.10.0" } }, + "node_modules/require-from-string": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/require-from-string/-/require-from-string-2.0.2.tgz", + "integrity": "sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, "node_modules/requires-port": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/requires-port/-/requires-port-1.0.0.tgz", @@ -9082,6 +10465,7 @@ "version": "0.23.2", "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.23.2.tgz", "integrity": "sha512-UOShsPwz7NrMUqhR6t0hWjFduvOzbtv7toDH1/hIrfRNIDBnnBWd0CwJTGvTpngVlmwGCdP9/Zl/tVrDqcuYzQ==", + "peer": true, "dependencies": { "loose-envify": "^1.1.0" } @@ -9523,6 +10907,7 @@ "resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-3.4.9.tgz", "integrity": "sha512-1SEOvRr6sSdV5IDf9iC+NU4dhwdqzF4zKKq3sAbasUWHEM6lsMhX+eNN5gkPx1BvLFEnZQEUFbXnGj8Qlp83Pg==", "license": "MIT", + "peer": true, "dependencies": { "@alloc/quick-lru": "^5.2.0", "arg": "^5.0.2", @@ -9853,6 +11238,7 @@ "integrity": "sha512-aJn6wq13/afZp/jT9QZmwEjDqqvSGp1VT5GVg+f/t6/oVyrgXM6BY1h9BRh/O5p3PlUPAe+WuiEZOmb/49RqoQ==", "devOptional": true, "license": "Apache-2.0", + "peer": true, "bin": { "tsc": "bin/tsc", "tsserver": "bin/tsserver" @@ -10035,6 +11421,19 @@ "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==" }, + "node_modules/uuid": { + "version": "13.0.0", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-13.0.0.tgz", + "integrity": "sha512-XQegIaBTVUjSHliKqcnFqYypAd4S+WCYt5NIeRs6w/UAry7z8Y9j5ZwRRL4kzq9U3sD6v+85er9FvkEaBpji2w==", + "funding": [ + "https://github.com/sponsors/broofa", + "https://github.com/sponsors/ctavan" + ], + "license": "MIT", + "bin": { + "uuid": "dist-node/bin/uuid" + } + }, "node_modules/vaul": { "version": "0.9.1", "resolved": "https://registry.npmjs.org/vaul/-/vaul-0.9.1.tgz", @@ -10083,6 +11482,7 @@ "integrity": "sha512-+Oxm7q9hDoLMyJOYfUYBuHQo+dkAloi33apOPP56pzj+vsdJDzr+j1NISE5pyaAuKL4A3UD34qd0lx5+kfKp2g==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "esbuild": "^0.25.0", "fdir": "^6.4.4", @@ -10220,6 +11620,7 @@ "integrity": "sha512-IP7gPK3LS3Fvn44x30X1dM9vtawm0aesAa2yBIZ9vQf+qB69NXC5776+Qmcr7ohUXIQuLhk7xQR0aSUIDPqavg==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@vitest/expect": "3.0.7", "@vitest/mocker": "3.0.7", diff --git a/web/package.json b/web/package.json index 46d667058..b412bb695 100644 --- a/web/package.json +++ b/web/package.json @@ -38,6 +38,10 @@ "@radix-ui/react-toggle": "^1.1.2", "@radix-ui/react-toggle-group": "^1.1.2", "@radix-ui/react-tooltip": "^1.2.8", + "@rjsf/core": "^6.3.1", + "@rjsf/shadcn": "^6.3.1", + "@rjsf/utils": "^6.3.1", + "@rjsf/validator-ajv8": "^6.3.1", "apexcharts": "^3.52.0", "axios": "^1.7.7", "class-variance-authority": "^0.7.1", diff --git a/web/public/locales/en/common.json b/web/public/locales/en/common.json index 300f74ddb..9dec7b048 100644 --- a/web/public/locales/en/common.json +++ b/web/public/locales/en/common.json @@ -115,8 +115,10 @@ "internalID": "The Internal ID Frigate uses in the configuration and database" }, "button": { + "add": "Add", "apply": "Apply", "reset": "Reset", + "undo": "Undo", "done": "Done", "enabled": "Enabled", "enable": "Enable", @@ -150,7 +152,14 @@ "export": "Export", "deleteNow": "Delete Now", "next": "Next", - "continue": "Continue" + "continue": "Continue", + "modified": "Modified", + "overridden": "Overridden", + "resetToGlobal": "Reset to Global", + "resetToDefault": "Reset to Default", + "saveAll": "Save All", + "savingAll": "Saving All…", + "undoAll": "Undo All" }, "menu": { "system": "System", diff --git a/web/public/locales/en/config/audio.json b/web/public/locales/en/config/audio.json deleted file mode 100644 index f9aaffa6b..000000000 --- a/web/public/locales/en/config/audio.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "label": "Global Audio events configuration.", - "properties": { - "enabled": { - "label": "Enable audio events." - }, - "max_not_heard": { - "label": "Seconds of not hearing the type of audio to end the event." - }, - "min_volume": { - "label": "Min volume required to run audio detection." - }, - "listen": { - "label": "Audio to listen for." - }, - "filters": { - "label": "Audio filters." - }, - "enabled_in_config": { - "label": "Keep track of original state of audio detection." - }, - "num_threads": { - "label": "Number of detection threads" - } - } -} \ No newline at end of file diff --git a/web/public/locales/en/config/audio_transcription.json b/web/public/locales/en/config/audio_transcription.json deleted file mode 100644 index 6922b9d80..000000000 --- a/web/public/locales/en/config/audio_transcription.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "label": "Audio transcription config.", - "properties": { - "enabled": { - "label": "Enable audio transcription." - }, - "language": { - "label": "Language abbreviation to use for audio event transcription/translation." - }, - "device": { - "label": "The device used for license plate recognition." - }, - "model_size": { - "label": "The size of the embeddings model used." - }, - "enabled_in_config": { - "label": "Keep track of original state of camera." - }, - "live_enabled": { - "label": "Enable live transcriptions." - } - } -} \ No newline at end of file diff --git a/web/public/locales/en/config/auth.json b/web/public/locales/en/config/auth.json deleted file mode 100644 index a524d8d1b..000000000 --- a/web/public/locales/en/config/auth.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "label": "Auth configuration.", - "properties": { - "enabled": { - "label": "Enable authentication" - }, - "reset_admin_password": { - "label": "Reset the admin password on startup" - }, - "cookie_name": { - "label": "Name for jwt token cookie" - }, - "cookie_secure": { - "label": "Set secure flag on cookie" - }, - "session_length": { - "label": "Session length for jwt session tokens" - }, - "refresh_time": { - "label": "Refresh the session if it is going to expire in this many seconds" - }, - "failed_login_rate_limit": { - "label": "Rate limits for failed login attempts." - }, - "trusted_proxies": { - "label": "Trusted proxies for determining IP address to rate limit" - }, - "hash_iterations": { - "label": "Password hash iterations" - }, - "roles": { - "label": "Role to camera mappings. Empty list grants access to all cameras." - } - } -} \ No newline at end of file diff --git a/web/public/locales/en/config/birdseye.json b/web/public/locales/en/config/birdseye.json deleted file mode 100644 index f122f314c..000000000 --- a/web/public/locales/en/config/birdseye.json +++ /dev/null @@ -1,37 +0,0 @@ -{ - "label": "Birdseye configuration.", - "properties": { - "enabled": { - "label": "Enable birdseye view." - }, - "mode": { - "label": "Tracking mode." - }, - "restream": { - "label": "Restream birdseye via RTSP." - }, - "width": { - "label": "Birdseye width." - }, - "height": { - "label": "Birdseye height." - }, - "quality": { - "label": "Encoding quality." - }, - "inactivity_threshold": { - "label": "Birdseye Inactivity Threshold" - }, - "layout": { - "label": "Birdseye Layout Config", - "properties": { - "scaling_factor": { - "label": "Birdseye Scaling Factor" - }, - "max_cameras": { - "label": "Max cameras" - } - } - } - } -} \ No newline at end of file diff --git a/web/public/locales/en/config/camera_groups.json b/web/public/locales/en/config/camera_groups.json deleted file mode 100644 index 2900e9c67..000000000 --- a/web/public/locales/en/config/camera_groups.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "label": "Camera group configuration", - "properties": { - "cameras": { - "label": "List of cameras in this group." - }, - "icon": { - "label": "Icon that represents camera group." - }, - "order": { - "label": "Sort order for group." - } - } -} \ No newline at end of file diff --git a/web/public/locales/en/config/cameras.json b/web/public/locales/en/config/cameras.json index d2c74dc54..b2b34c8fb 100644 --- a/web/public/locales/en/config/cameras.json +++ b/web/public/locales/en/config/cameras.json @@ -1,758 +1,927 @@ { - "label": "Camera configuration.", - "properties": { - "name": { - "label": "Camera name." - }, - "friendly_name": { - "label": "Camera friendly name used in the Frigate UI." - }, + "label": "CameraConfig", + "name": { + "label": "Camera name", + "description": "Camera name is required" + }, + "friendly_name": { + "label": "Friendly name", + "description": "Camera friendly name used in the Frigate UI" + }, + "enabled": { + "label": "Enabled", + "description": "Enabled" + }, + "audio": { + "label": "Audio events", + "description": "Settings for audio-based event detection for this camera.", "enabled": { - "label": "Enable camera." + "label": "Enable audio detection", + "description": "Enable or disable audio event detection for this camera." }, - "audio": { - "label": "Audio events configuration.", - "properties": { - "enabled": { - "label": "Enable audio events." + "max_not_heard": { + "label": "End timeout", + "description": "Amount of seconds without the configured audio type before the audio event is ended." + }, + "min_volume": { + "label": "Minimum volume", + "description": "Minimum RMS volume threshold required to run audio detection; lower values increase sensitivity (e.g., 200 high, 500 medium, 1000 low)." + }, + "listen": { + "label": "Listen types", + "description": "List of audio event types to detect (for example: bark, fire_alarm, scream, speech, yell)." + }, + "filters": { + "label": "Audio filters", + "description": "Per-audio-type filter settings such as confidence thresholds used to reduce false positives." + }, + "enabled_in_config": { + "label": "Original audio state", + "description": "Indicates whether audio detection was originally enabled in the static config file." + }, + "num_threads": { + "label": "Detection threads", + "description": "Number of threads to use for audio detection processing." + } + }, + "audio_transcription": { + "label": "Audio transcription", + "description": "Settings for live and speech audio transcription used for events and live captions.", + "enabled": { + "label": "Enable transcription", + "description": "Enable or disable manually triggered audio event transcription." + }, + "enabled_in_config": { + "label": "Original transcription state" + }, + "live_enabled": { + "label": "Live transcription", + "description": "Enable streaming live transcription for audio as it is received." + } + }, + "birdseye": { + "label": "Birdseye", + "description": "Settings for the Birdseye composite view that composes multiple camera feeds into a single layout.", + "enabled": { + "label": "Enable Birdseye", + "description": "Enable or disable the Birdseye view feature." + }, + "mode": { + "label": "Tracking mode", + "description": "Mode for including cameras in Birdseye: 'objects', 'motion', or 'continuous'." + }, + "order": { + "label": "Position", + "description": "Numeric position controlling the camera's ordering in the Birdseye layout." + } + }, + "detect": { + "label": "Object Detection", + "description": "Settings for the detection/detect role used to run object detection and initialize trackers.", + "enabled": { + "label": "Detection enabled", + "description": "Enable or disable object detection for this camera. Detection must be enabled for object tracking to run." + }, + "height": { + "label": "Detect height", + "description": "Height (pixels) of frames used for the detect stream; leave empty to use the native stream resolution." + }, + "width": { + "label": "Detect width", + "description": "Width (pixels) of frames used for the detect stream; leave empty to use the native stream resolution." + }, + "fps": { + "label": "Detect FPS", + "description": "Desired frames per second to run detection on; lower values reduce CPU usage (recommended value is 5, only set higher - at most 10 - if tracking extremely fast moving objects)." + }, + "min_initialized": { + "label": "Minimum initialization frames", + "description": "Number of consecutive detection hits required before creating a tracked object. Increase to reduce false initializations. Default value is fps divided by 2." + }, + "max_disappeared": { + "label": "Maximum disappeared frames", + "description": "Number of frames without a detection before a tracked object is considered gone." + }, + "stationary": { + "label": "Stationary objects config", + "description": "Settings to detect and manage objects that remain stationary for a period of time.", + "interval": { + "label": "Stationary interval", + "description": "How often (in frames) to run a detection check to confirm a stationary object." + }, + "threshold": { + "label": "Stationary threshold", + "description": "Number of frames with no position change required to mark an object as stationary." + }, + "max_frames": { + "label": "Max frames", + "description": "Limits how long stationary objects are tracked before being discarded.", + "default": { + "label": "Default max frames", + "description": "Default maximum frames to track a stationary object before stopping." }, - "max_not_heard": { - "label": "Seconds of not hearing the type of audio to end the event." - }, - "min_volume": { - "label": "Min volume required to run audio detection." - }, - "listen": { - "label": "Audio to listen for." - }, - "filters": { - "label": "Audio filters." - }, - "enabled_in_config": { - "label": "Keep track of original state of audio detection." - }, - "num_threads": { - "label": "Number of detection threads" + "objects": { + "label": "Object max frames", + "description": "Per-object overrides for maximum frames to track stationary objects." } + }, + "classifier": { + "label": "Enable visual classifier", + "description": "Use a visual classifier to detect truly stationary objects even when bounding boxes jitter." } }, - "audio_transcription": { - "label": "Audio transcription config.", - "properties": { - "enabled": { - "label": "Enable audio transcription." - }, - "language": { - "label": "Language abbreviation to use for audio event transcription/translation." - }, - "device": { - "label": "The device used for license plate recognition." - }, - "model_size": { - "label": "The size of the embeddings model used." - }, - "enabled_in_config": { - "label": "Keep track of original state of camera." - }, - "live_enabled": { - "label": "Enable live transcriptions." - } + "annotation_offset": { + "label": "Annotation offset", + "description": "Milliseconds to shift detect annotations to better align timeline bounding boxes with recordings; can be positive or negative." + } + }, + "face_recognition": { + "label": "Face recognition", + "description": "Settings for face detection and recognition for this camera.", + "enabled": { + "label": "Enable face recognition", + "description": "Enable or disable face recognition." + }, + "min_area": { + "label": "Minimum face area", + "description": "Minimum area (pixels) of a detected face box required to attempt recognition." + } + }, + "ffmpeg": { + "label": "FFmpeg", + "description": "FFmpeg settings including binary path, args, hwaccel options, and per-role output args.", + "path": { + "label": "FFmpeg path", + "description": "Path to the FFmpeg binary to use or a version alias (\"5.0\" or \"7.0\")." + }, + "global_args": { + "label": "FFmpeg global arguments", + "description": "Global arguments passed to FFmpeg processes." + }, + "hwaccel_args": { + "label": "Hardware acceleration arguments", + "description": "Hardware acceleration arguments for FFmpeg. Provider-specific presets are recommended." + }, + "input_args": { + "label": "Input arguments", + "description": "Input arguments applied to FFmpeg input streams." + }, + "output_args": { + "label": "Output arguments", + "description": "Default output arguments used for different FFmpeg roles such as detect and record.", + "detect": { + "label": "Detect output arguments", + "description": "Default output arguments for detect role streams." + }, + "record": { + "label": "Record output arguments", + "description": "Default output arguments for record role streams." } }, - "birdseye": { - "label": "Birdseye camera configuration.", - "properties": { - "enabled": { - "label": "Enable birdseye view for camera." - }, - "mode": { - "label": "Tracking mode for camera." - }, - "order": { - "label": "Position of the camera in the birdseye view." - } + "retry_interval": { + "label": "FFmpeg retry time", + "description": "Seconds to wait before attempting to reconnect a camera stream after failure. Default is 10." + }, + "apple_compatibility": { + "label": "Apple compatibility", + "description": "Enable HEVC tagging for better Apple player compatibility when recording H.265." + }, + "gpu": { + "label": "GPU index", + "description": "Default GPU index used for hardware acceleration if available." + }, + "inputs": { + "label": "Camera inputs", + "description": "List of input stream definitions (paths and roles) for this camera.", + "path": { + "label": "Input path", + "description": "Camera input stream URL or path." + }, + "roles": { + "label": "Input roles", + "description": "Roles for this input stream." + }, + "global_args": { + "label": "FFmpeg global arguments", + "description": "FFmpeg global arguments for this input stream." + }, + "hwaccel_args": { + "label": "Hardware acceleration arguments", + "description": "Hardware acceleration arguments for this input stream." + }, + "input_args": { + "label": "Input arguments", + "description": "Input arguments specific to this stream." + } + } + }, + "live": { + "label": "Live playback", + "description": "Settings used by the Web UI to control live stream selection, resolution and quality.", + "streams": { + "label": "Live stream names", + "description": "Mapping of configured stream names to restream/go2rtc names used for live playback." + }, + "height": { + "label": "Live height", + "description": "Height (pixels) to render the jsmpeg live stream in the Web UI; must be <= detect stream height." + }, + "quality": { + "label": "Live quality", + "description": "Encoding quality for the jsmpeg stream (1 highest, 31 lowest)." + } + }, + "lpr": { + "label": "License Plate Recognition", + "description": "License plate recognition settings including detection thresholds, formatting, and known plates.", + "enabled": { + "label": "Enable LPR", + "description": "Enable or disable LPR on this camera." + }, + "expire_time": { + "label": "Expire seconds", + "description": "Time in seconds after which an unseen plate is expired from the tracker (for dedicated LPR cameras only)." + }, + "min_area": { + "label": "Minimum plate area", + "description": "Minimum plate area (pixels) required to attempt recognition." + }, + "enhancement": { + "label": "Enhancement level", + "description": "Enhancement level (0-10) to apply to plate crops prior to OCR; higher values may not always improve results, levels above 5 may only work with night time plates and should be used with caution." + } + }, + "motion": { + "label": "Motion detection", + "description": "Default motion detection settings for this camera.", + "enabled": { + "label": "Enable motion detection", + "description": "Enable or disable motion detection for this camera." + }, + "threshold": { + "label": "Motion threshold", + "description": "Pixel difference threshold used by the motion detector; higher values reduce sensitivity (range 1-255)." + }, + "lightning_threshold": { + "label": "Lightning threshold", + "description": "Threshold to detect and ignore brief lighting spikes (lower is more sensitive, values between 0.3 and 1.0)." + }, + "improve_contrast": { + "label": "Improve contrast", + "description": "Apply contrast improvement to frames before motion analysis to help detection." + }, + "contour_area": { + "label": "Contour area", + "description": "Minimum contour area in pixels required for a motion contour to be counted." + }, + "delta_alpha": { + "label": "Delta alpha", + "description": "Alpha blending factor used in frame differencing for motion calculation." + }, + "frame_alpha": { + "label": "Frame alpha", + "description": "Alpha value used when blending frames for motion preprocessing." + }, + "frame_height": { + "label": "Frame height", + "description": "Height in pixels to scale frames to when computing motion." + }, + "mask": { + "label": "Mask coordinates", + "description": "Ordered x,y coordinates defining the motion mask polygon used to include/exclude areas." + }, + "mqtt_off_delay": { + "label": "MQTT off delay", + "description": "Seconds to wait after last motion before publishing an MQTT 'off' state." + }, + "enabled_in_config": { + "label": "Original motion state", + "description": "Indicates whether motion detection was enabled in the original static configuration." + }, + "raw_mask": { + "label": "Raw Mask" + } + }, + "objects": { + "label": "Objects", + "description": "Object tracking defaults including which labels to track and per-object filters.", + "track": { + "label": "Objects to track", + "description": "List of object labels to track for this camera." + }, + "filters": { + "label": "Object filters", + "description": "Filters applied to detected objects to reduce false positives (area, ratio, confidence).", + "min_area": { + "label": "Minimum object area", + "description": "Minimum bounding box area (pixels or percentage) required for this object type. Can be pixels (int) or percentage (float between 0.000001 and 0.99)." + }, + "max_area": { + "label": "Maximum object area", + "description": "Maximum bounding box area (pixels or percentage) allowed for this object type. Can be pixels (int) or percentage (float between 0.000001 and 0.99)." + }, + "min_ratio": { + "label": "Minimum aspect ratio", + "description": "Minimum width/height ratio required for the bounding box to qualify." + }, + "max_ratio": { + "label": "Maximum aspect ratio", + "description": "Maximum width/height ratio allowed for the bounding box to qualify." + }, + "threshold": { + "label": "Confidence threshold", + "description": "Average detection confidence threshold required for the object to be considered a true positive." + }, + "min_score": { + "label": "Minimum confidence", + "description": "Minimum single-frame detection confidence required for the object to be counted." + }, + "mask": { + "label": "Filter mask", + "description": "Polygon coordinates defining where this filter applies within the frame." + }, + "raw_mask": { + "label": "Raw Mask" } }, - "detect": { - "label": "Object detection configuration.", - "properties": { - "enabled": { - "label": "Detection Enabled." - }, - "height": { - "label": "Height of the stream for the detect role." - }, - "width": { - "label": "Width of the stream for the detect role." - }, - "fps": { - "label": "Number of frames per second to process through detection." - }, - "min_initialized": { - "label": "Minimum number of consecutive hits for an object to be initialized by the tracker." - }, - "max_disappeared": { - "label": "Maximum number of frames the object can disappear before detection ends." - }, - "stationary": { - "label": "Stationary objects config.", - "properties": { - "interval": { - "label": "Frame interval for checking stationary objects." - }, - "threshold": { - "label": "Number of frames without a position change for an object to be considered stationary" - }, - "max_frames": { - "label": "Max frames for stationary objects.", - "properties": { - "default": { - "label": "Default max frames." - }, - "objects": { - "label": "Object specific max frames." - } - } - }, - "classifier": { - "label": "Enable visual classifier for determing if objects with jittery bounding boxes are stationary." - } - } - }, - "annotation_offset": { - "label": "Milliseconds to offset detect annotations by." - } - } + "mask": { + "label": "Object mask", + "description": "Mask polygon used to prevent object detection in specified areas." }, - "face_recognition": { - "label": "Face recognition config.", - "properties": { - "enabled": { - "label": "Enable face recognition." + "genai": { + "label": "GenAI object config", + "description": "GenAI options for describing tracked objects and sending frames for generation.", + "enabled": { + "label": "Enable GenAI", + "description": "Enable GenAI generation of descriptions for tracked objects by default." + }, + "use_snapshot": { + "label": "Use snapshots", + "description": "Use object snapshots instead of thumbnails for GenAI description generation." + }, + "prompt": { + "label": "Caption prompt", + "description": "Default prompt template used when generating descriptions with GenAI." + }, + "object_prompts": { + "label": "Object prompts", + "description": "Per-object prompts to customize GenAI outputs for specific labels." + }, + "objects": { + "label": "GenAI objects", + "description": "List of object labels to send to GenAI by default." + }, + "required_zones": { + "label": "Required zones", + "description": "Zones that must be entered for objects to qualify for GenAI description generation." + }, + "debug_save_thumbnails": { + "label": "Save thumbnails", + "description": "Save thumbnails sent to GenAI for debugging and review." + }, + "send_triggers": { + "label": "GenAI triggers", + "description": "Defines when frames should be sent to GenAI (on end, after updates, etc.).", + "tracked_object_end": { + "label": "Send on end", + "description": "Send a request to GenAI when the tracked object ends." }, - "min_area": { - "label": "Min area of face box to consider running face recognition." + "after_significant_updates": { + "label": "Early GenAI trigger", + "description": "Send a request to GenAI after a specified number of significant updates for the tracked object." } + }, + "enabled_in_config": { + "label": "Original GenAI state", + "description": "Indicates whether GenAI was enabled in the original static config." } + } + }, + "record": { + "label": "Recording", + "description": "Recording and retention settings for this camera.", + "enabled": { + "label": "Enable recording", + "description": "Enable or disable recording for this camera." }, - "ffmpeg": { - "label": "FFmpeg configuration for the camera.", - "properties": { - "path": { - "label": "FFmpeg path" - }, - "global_args": { - "label": "Global FFmpeg arguments." - }, - "hwaccel_args": { - "label": "FFmpeg hardware acceleration arguments." - }, - "input_args": { - "label": "FFmpeg input arguments." - }, - "output_args": { - "label": "FFmpeg output arguments per role.", - "properties": { - "detect": { - "label": "Detect role FFmpeg output arguments." - }, - "record": { - "label": "Record role FFmpeg output arguments." - } - } - }, - "retry_interval": { - "label": "Time in seconds to wait before FFmpeg retries connecting to the camera." - }, - "apple_compatibility": { - "label": "Set tag on HEVC (H.265) recording stream to improve compatibility with Apple players." - }, - "inputs": { - "label": "Camera inputs." - } - } + "expire_interval": { + "label": "Record cleanup interval", + "description": "Minutes between cleanup passes that remove expired recording segments." }, - "live": { - "label": "Live playback settings.", - "properties": { - "streams": { - "label": "Friendly names and restream names to use for live view." - }, - "height": { - "label": "Live camera view height" - }, - "quality": { - "label": "Live camera view quality" - } - } - }, - "lpr": { - "label": "LPR config.", - "properties": { - "enabled": { - "label": "Enable license plate recognition." - }, - "expire_time": { - "label": "Expire plates not seen after number of seconds (for dedicated LPR cameras only)." - }, - "min_area": { - "label": "Minimum area of license plate to begin running recognition." - }, - "enhancement": { - "label": "Amount of contrast adjustment and denoising to apply to license plate images before recognition." - } + "continuous": { + "label": "Continuous retention", + "description": "Number of days to retain recordings regardless of tracked objects or motion. Set to 0 if you only want to retain recordings of alerts and detections.", + "days": { + "label": "Retention days", + "description": "Days to retain recordings." } }, "motion": { - "label": "Motion detection configuration.", - "properties": { - "enabled": { - "label": "Enable motion on all cameras." + "label": "Motion retention", + "description": "Number of days to retain recordings triggered by motion regardless of tracked objects. Set to 0 if you only want to retain recordings of alerts and detections.", + "days": { + "label": "Retention days", + "description": "Days to retain recordings." + } + }, + "detections": { + "label": "Detection retention", + "description": "Recording retention settings for detection events including pre/post capture durations.", + "pre_capture": { + "label": "Pre-capture seconds", + "description": "Number of seconds before the detection event to include in the recording." + }, + "post_capture": { + "label": "Post-capture seconds", + "description": "Number of seconds after the detection event to include in the recording." + }, + "retain": { + "label": "Event retention", + "description": "Retention settings for recordings of detection events.", + "days": { + "label": "Retention days", + "description": "Number of days to retain recordings of detection events." }, - "threshold": { - "label": "Motion detection threshold (1-255)." - }, - "lightning_threshold": { - "label": "Lightning detection threshold (0.3-1.0)." - }, - "improve_contrast": { - "label": "Improve Contrast" - }, - "contour_area": { - "label": "Contour Area" - }, - "delta_alpha": { - "label": "Delta Alpha" - }, - "frame_alpha": { - "label": "Frame Alpha" - }, - "frame_height": { - "label": "Frame Height" - }, - "mask": { - "label": "Coordinates polygon for the motion mask." - }, - "mqtt_off_delay": { - "label": "Delay for updating MQTT with no motion detected." - }, - "enabled_in_config": { - "label": "Keep track of original state of motion detection." + "mode": { + "label": "Retention mode", + "description": "Mode for retention: all (save all segments), motion (save segments with motion), or active_objects (save segments with active objects)." } } }, - "objects": { - "label": "Object configuration.", - "properties": { - "track": { - "label": "Objects to track." + "alerts": { + "label": "Alert retention", + "description": "Recording retention settings for alert events including pre/post capture durations.", + "pre_capture": { + "label": "Pre-capture seconds", + "description": "Number of seconds before the detection event to include in the recording." + }, + "post_capture": { + "label": "Post-capture seconds", + "description": "Number of seconds after the detection event to include in the recording." + }, + "retain": { + "label": "Event retention", + "description": "Retention settings for recordings of detection events.", + "days": { + "label": "Retention days", + "description": "Number of days to retain recordings of detection events." }, - "filters": { - "label": "Object filters.", - "properties": { - "min_area": { - "label": "Minimum area of bounding box for object to be counted. Can be pixels (int) or percentage (float between 0.000001 and 0.99)." - }, - "max_area": { - "label": "Maximum area of bounding box for object to be counted. Can be pixels (int) or percentage (float between 0.000001 and 0.99)." - }, - "min_ratio": { - "label": "Minimum ratio of bounding box's width/height for object to be counted." - }, - "max_ratio": { - "label": "Maximum ratio of bounding box's width/height for object to be counted." - }, - "threshold": { - "label": "Average detection confidence threshold for object to be counted." - }, - "min_score": { - "label": "Minimum detection confidence for object to be counted." - }, - "mask": { - "label": "Detection area polygon mask for this filter configuration." - } - } - }, - "mask": { - "label": "Object mask." - }, - "genai": { - "label": "Config for using genai to analyze objects.", - "properties": { - "enabled": { - "label": "Enable GenAI for camera." - }, - "use_snapshot": { - "label": "Use snapshots for generating descriptions." - }, - "prompt": { - "label": "Default caption prompt." - }, - "object_prompts": { - "label": "Object specific prompts." - }, - "objects": { - "label": "List of objects to run generative AI for." - }, - "required_zones": { - "label": "List of required zones to be entered in order to run generative AI." - }, - "debug_save_thumbnails": { - "label": "Save thumbnails sent to generative AI for debugging purposes." - }, - "send_triggers": { - "label": "What triggers to use to send frames to generative AI for a tracked object.", - "properties": { - "tracked_object_end": { - "label": "Send once the object is no longer tracked." - }, - "after_significant_updates": { - "label": "Send an early request to generative AI when X frames accumulated." - } - } - }, - "enabled_in_config": { - "label": "Keep track of original state of generative AI." - } - } + "mode": { + "label": "Retention mode", + "description": "Mode for retention: all (save all segments), motion (save segments with motion), or active_objects (save segments with active objects)." } } }, - "record": { - "label": "Record configuration.", - "properties": { - "enabled": { - "label": "Enable record on all cameras." - }, - "expire_interval": { - "label": "Number of minutes to wait between cleanup runs." - }, - "continuous": { - "label": "Continuous recording retention settings.", - "properties": { - "days": { - "label": "Default retention period." - } - } - }, - "motion": { - "label": "Motion recording retention settings.", - "properties": { - "days": { - "label": "Default retention period." - } - } - }, - "detections": { - "label": "Detection specific retention settings.", - "properties": { - "pre_capture": { - "label": "Seconds to retain before event starts." - }, - "post_capture": { - "label": "Seconds to retain after event ends." - }, - "retain": { - "label": "Event retention settings.", - "properties": { - "days": { - "label": "Default retention period." - }, - "mode": { - "label": "Retain mode." - } - } - } - } - }, - "alerts": { - "label": "Alert specific retention settings.", - "properties": { - "pre_capture": { - "label": "Seconds to retain before event starts." - }, - "post_capture": { - "label": "Seconds to retain after event ends." - }, - "retain": { - "label": "Event retention settings.", - "properties": { - "days": { - "label": "Default retention period." - }, - "mode": { - "label": "Retain mode." - } - } - } - } - }, - "export": { - "label": "Recording Export Config", - "properties": { - "timelapse_args": { - "label": "Timelapse Args" - } - } - }, - "preview": { - "label": "Recording Preview Config", - "properties": { - "quality": { - "label": "Quality of recording preview." - } - } - }, - "enabled_in_config": { - "label": "Keep track of original state of recording." - } + "export": { + "label": "Export config", + "description": "Settings used when exporting recordings such as timelapse and hardware acceleration.", + "hwaccel_args": { + "label": "Export hwaccel args", + "description": "Hardware acceleration args to use for export/transcode operations." } }, - "review": { - "label": "Review configuration.", - "properties": { - "alerts": { - "label": "Review alerts config.", - "properties": { - "enabled": { - "label": "Enable alerts." - }, - "labels": { - "label": "Labels to create alerts for." - }, - "required_zones": { - "label": "List of required zones to be entered in order to save the event as an alert." - }, - "enabled_in_config": { - "label": "Keep track of original state of alerts." - }, - "cutoff_time": { - "label": "Time to cutoff alerts after no alert-causing activity has occurred." - } - } - }, - "detections": { - "label": "Review detections config.", - "properties": { - "enabled": { - "label": "Enable detections." - }, - "labels": { - "label": "Labels to create detections for." - }, - "required_zones": { - "label": "List of required zones to be entered in order to save the event as a detection." - }, - "cutoff_time": { - "label": "Time to cutoff detection after no detection-causing activity has occurred." - }, - "enabled_in_config": { - "label": "Keep track of original state of detections." - } - } - }, - "genai": { - "label": "Review description genai config.", - "properties": { - "enabled": { - "label": "Enable GenAI descriptions for review items." - }, - "alerts": { - "label": "Enable GenAI for alerts." - }, - "detections": { - "label": "Enable GenAI for detections." - }, - "additional_concerns": { - "label": "Additional concerns that GenAI should make note of on this camera." - }, - "debug_save_thumbnails": { - "label": "Save thumbnails sent to generative AI for debugging purposes." - }, - "enabled_in_config": { - "label": "Keep track of original state of generative AI." - }, - "preferred_language": { - "label": "Preferred language for GenAI Response" - }, - "activity_context_prompt": { - "label": "Custom activity context prompt defining normal activity patterns for this property." - } - } - } - } - }, - "semantic_search": { - "label": "Semantic search configuration.", - "properties": { - "triggers": { - "label": "Trigger actions on tracked objects that match existing thumbnails or descriptions", - "properties": { - "enabled": { - "label": "Enable this trigger" - }, - "type": { - "label": "Type of trigger" - }, - "data": { - "label": "Trigger content (text phrase or image ID)" - }, - "threshold": { - "label": "Confidence score required to run the trigger" - }, - "actions": { - "label": "Actions to perform when trigger is matched" - } - } - } - } - }, - "snapshots": { - "label": "Snapshot configuration.", - "properties": { - "enabled": { - "label": "Snapshots enabled." - }, - "clean_copy": { - "label": "Create a clean copy of the snapshot image." - }, - "timestamp": { - "label": "Add a timestamp overlay on the snapshot." - }, - "bounding_box": { - "label": "Add a bounding box overlay on the snapshot." - }, - "crop": { - "label": "Crop the snapshot to the detected object." - }, - "required_zones": { - "label": "List of required zones to be entered in order to save a snapshot." - }, - "height": { - "label": "Snapshot image height." - }, - "retain": { - "label": "Snapshot retention.", - "properties": { - "default": { - "label": "Default retention period." - }, - "mode": { - "label": "Retain mode." - }, - "objects": { - "label": "Object retention period." - } - } - }, - "quality": { - "label": "Quality of the encoded jpeg (0-100)." - } - } - }, - "timestamp_style": { - "label": "Timestamp style configuration.", - "properties": { - "position": { - "label": "Timestamp position." - }, - "format": { - "label": "Timestamp format." - }, - "color": { - "label": "Timestamp color.", - "properties": { - "red": { - "label": "Red" - }, - "green": { - "label": "Green" - }, - "blue": { - "label": "Blue" - } - } - }, - "thickness": { - "label": "Timestamp thickness." - }, - "effect": { - "label": "Timestamp effect." - } - } - }, - "best_image_timeout": { - "label": "How long to wait for the image with the highest confidence score." - }, - "mqtt": { - "label": "MQTT configuration.", - "properties": { - "enabled": { - "label": "Send image over MQTT." - }, - "timestamp": { - "label": "Add timestamp to MQTT image." - }, - "bounding_box": { - "label": "Add bounding box to MQTT image." - }, - "crop": { - "label": "Crop MQTT image to detected object." - }, - "height": { - "label": "MQTT image height." - }, - "required_zones": { - "label": "List of required zones to be entered in order to send the image." - }, - "quality": { - "label": "Quality of the encoded jpeg (0-100)." - } - } - }, - "notifications": { - "label": "Notifications configuration.", - "properties": { - "enabled": { - "label": "Enable notifications" - }, - "email": { - "label": "Email required for push." - }, - "cooldown": { - "label": "Cooldown period for notifications (time in seconds)." - }, - "enabled_in_config": { - "label": "Keep track of original state of notifications." - } - } - }, - "onvif": { - "label": "Camera Onvif Configuration.", - "properties": { - "host": { - "label": "Onvif Host" - }, - "port": { - "label": "Onvif Port" - }, - "user": { - "label": "Onvif Username" - }, - "password": { - "label": "Onvif Password" - }, - "tls_insecure": { - "label": "Onvif Disable TLS verification" - }, - "autotracking": { - "label": "PTZ auto tracking config.", - "properties": { - "enabled": { - "label": "Enable PTZ object autotracking." - }, - "calibrate_on_startup": { - "label": "Perform a camera calibration when Frigate starts." - }, - "zooming": { - "label": "Autotracker zooming mode." - }, - "zoom_factor": { - "label": "Zooming factor (0.1-0.75)." - }, - "track": { - "label": "Objects to track." - }, - "required_zones": { - "label": "List of required zones to be entered in order to begin autotracking." - }, - "return_preset": { - "label": "Name of camera preset to return to when object tracking is over." - }, - "timeout": { - "label": "Seconds to delay before returning to preset." - }, - "movement_weights": { - "label": "Internal value used for PTZ movements based on the speed of your camera's motor." - }, - "enabled_in_config": { - "label": "Keep track of original state of autotracking." - } - } - }, - "ignore_time_mismatch": { - "label": "Onvif Ignore Time Synchronization Mismatch Between Camera and Server" - } - } - }, - "type": { - "label": "Camera Type" - }, - "ui": { - "label": "Camera UI Modifications.", - "properties": { - "order": { - "label": "Order of camera in UI." - }, - "dashboard": { - "label": "Show this camera in Frigate dashboard UI." - } - } - }, - "webui_url": { - "label": "URL to visit the camera directly from system page" - }, - "zones": { - "label": "Zone configuration.", - "properties": { - "filters": { - "label": "Zone filters.", - "properties": { - "min_area": { - "label": "Minimum area of bounding box for object to be counted. Can be pixels (int) or percentage (float between 0.000001 and 0.99)." - }, - "max_area": { - "label": "Maximum area of bounding box for object to be counted. Can be pixels (int) or percentage (float between 0.000001 and 0.99)." - }, - "min_ratio": { - "label": "Minimum ratio of bounding box's width/height for object to be counted." - }, - "max_ratio": { - "label": "Maximum ratio of bounding box's width/height for object to be counted." - }, - "threshold": { - "label": "Average detection confidence threshold for object to be counted." - }, - "min_score": { - "label": "Minimum detection confidence for object to be counted." - }, - "mask": { - "label": "Detection area polygon mask for this filter configuration." - } - } - }, - "coordinates": { - "label": "Coordinates polygon for the defined zone." - }, - "distances": { - "label": "Real-world distances for the sides of quadrilateral for the defined zone." - }, - "inertia": { - "label": "Number of consecutive frames required for object to be considered present in the zone." - }, - "loitering_time": { - "label": "Number of seconds that an object must loiter to be considered in the zone." - }, - "speed_threshold": { - "label": "Minimum speed value for an object to be considered in the zone." - }, - "objects": { - "label": "List of objects that can trigger the zone." - } + "preview": { + "label": "Preview config", + "description": "Settings controlling the quality of recording previews shown in the UI.", + "quality": { + "label": "Preview quality", + "description": "Preview quality level (very_low, low, medium, high, very_high)." } }, "enabled_in_config": { - "label": "Keep track of original state of camera." + "label": "Original recording state", + "description": "Indicates whether recording was enabled in the original static configuration." } + }, + "review": { + "label": "Review", + "description": "Settings that control alerts, detections, and GenAI review summaries used by the UI and storage for this camera.", + "alerts": { + "label": "Alerts config", + "description": "Settings for which tracked objects generate alerts and how alerts are retained.", + "enabled": { + "label": "Enable alerts", + "description": "Enable or disable alert generation for this camera." + }, + "labels": { + "label": "Alert labels", + "description": "List of object labels that qualify as alerts (for example: car, person)." + }, + "required_zones": { + "label": "Required zones", + "description": "Zones that an object must enter to be considered an alert; leave empty to allow any zone." + }, + "enabled_in_config": { + "label": "Original alerts state", + "description": "Tracks whether alerts were originally enabled in the static configuration." + }, + "cutoff_time": { + "label": "Alerts cutoff time", + "description": "Seconds to wait after no alert-causing activity before cutting off an alert." + } + }, + "detections": { + "label": "Detections config", + "description": "Settings for creating detection events (non-alert) and how long to keep them.", + "enabled": { + "label": "Enable detections", + "description": "Enable or disable detection events for this camera." + }, + "labels": { + "label": "Detection labels", + "description": "List of object labels that qualify as detection events." + }, + "required_zones": { + "label": "Required zones", + "description": "Zones that an object must enter to be considered a detection; leave empty to allow any zone." + }, + "cutoff_time": { + "label": "Detections cutoff time", + "description": "Seconds to wait after no detection-causing activity before cutting off a detection." + }, + "enabled_in_config": { + "label": "Original detections state", + "description": "Tracks whether detections were originally enabled in the static configuration." + } + }, + "genai": { + "label": "GenAI config", + "description": "Controls use of generative AI for producing descriptions and summaries of review items.", + "enabled": { + "label": "Enable GenAI descriptions", + "description": "Enable or disable GenAI-generated descriptions and summaries for review items." + }, + "alerts": { + "label": "Enable GenAI for alerts", + "description": "Use GenAI to generate descriptions for alert items." + }, + "detections": { + "label": "Enable GenAI for detections", + "description": "Use GenAI to generate descriptions for detection items." + }, + "image_source": { + "label": "Review image source", + "description": "Source of images sent to GenAI ('preview' or 'recordings'); 'recordings' uses higher quality frames but more tokens." + }, + "additional_concerns": { + "label": "Additional concerns", + "description": "A list of additional concerns or notes the GenAI should consider when evaluating activity on this camera." + }, + "debug_save_thumbnails": { + "label": "Save thumbnails", + "description": "Save thumbnails that are sent to the GenAI provider for debugging and review." + }, + "enabled_in_config": { + "label": "Original GenAI state", + "description": "Tracks whether GenAI review was originally enabled in the static configuration." + }, + "preferred_language": { + "label": "Preferred language", + "description": "Preferred language to request from the GenAI provider for generated responses." + }, + "activity_context_prompt": { + "label": "Activity context prompt", + "description": "Custom prompt describing what is and is not suspicious activity to provide context for GenAI summaries." + } + } + }, + "semantic_search": { + "label": "Semantic Search", + "description": "Settings for semantic search which builds and queries object embeddings to find similar items.", + "triggers": { + "label": "Triggers", + "description": "Actions and matching criteria for camera-specific semantic search triggers.", + "friendly_name": { + "label": "Friendly name", + "description": "Optional friendly name displayed in the UI for this trigger." + }, + "enabled": { + "label": "Enable this trigger", + "description": "Enable or disable this semantic search trigger." + }, + "type": { + "label": "Trigger type", + "description": "Type of trigger: 'thumbnail' (match against image) or 'description' (match against text)." + }, + "data": { + "label": "Trigger content", + "description": "Text phrase or thumbnail ID to match against tracked objects." + }, + "threshold": { + "label": "Trigger threshold", + "description": "Minimum similarity score (0-1) required to activate this trigger." + }, + "actions": { + "label": "Trigger actions", + "description": "List of actions to execute when trigger matches (notification, sub_label, attribute)." + } + } + }, + "snapshots": { + "label": "Snapshots", + "description": "Settings for saved JPEG snapshots of tracked objects for this camera.", + "enabled": { + "label": "Snapshots enabled", + "description": "Enable or disable saving snapshots for this camera." + }, + "clean_copy": { + "label": "Save clean copy", + "description": "Save an unannotated clean copy of snapshots in addition to annotated ones." + }, + "timestamp": { + "label": "Timestamp overlay", + "description": "Overlay a timestamp on saved snapshots." + }, + "bounding_box": { + "label": "Bounding box overlay", + "description": "Draw bounding boxes for tracked objects on saved snapshots." + }, + "crop": { + "label": "Crop snapshot", + "description": "Crop saved snapshots to the detected object's bounding box." + }, + "required_zones": { + "label": "Required zones", + "description": "Zones an object must enter for a snapshot to be saved." + }, + "height": { + "label": "Snapshot height", + "description": "Height (pixels) to resize saved snapshots to; leave empty to preserve original size." + }, + "retain": { + "label": "Snapshot retention", + "description": "Retention settings for saved snapshots including default days and per-object overrides.", + "default": { + "label": "Default retention", + "description": "Default number of days to retain snapshots." + }, + "mode": { + "label": "Retention mode", + "description": "Mode for retention: all (save all segments), motion (save segments with motion), or active_objects (save segments with active objects)." + }, + "objects": { + "label": "Object retention", + "description": "Per-object overrides for snapshot retention days." + } + }, + "quality": { + "label": "JPEG quality", + "description": "JPEG encode quality for saved snapshots (0-100)." + } + }, + "timestamp_style": { + "label": "Timestamp style", + "description": "Styling options for in-feed timestamps applied to recordings and snapshots.", + "position": { + "label": "Timestamp position", + "description": "Position of the timestamp on the image (tl/tr/bl/br)." + }, + "format": { + "label": "Timestamp format", + "description": "Datetime format string used for timestamps (Python datetime format codes)." + }, + "color": { + "label": "Timestamp color", + "description": "RGB color values for the timestamp text (all values 0-255).", + "red": { + "label": "Red", + "description": "Red component (0-255) for timestamp color." + }, + "green": { + "label": "Green", + "description": "Green component (0-255) for timestamp color." + }, + "blue": { + "label": "Blue", + "description": "Blue component (0-255) for timestamp color." + } + }, + "thickness": { + "label": "Timestamp thickness", + "description": "Line thickness of the timestamp text." + }, + "effect": { + "label": "Timestamp effect", + "description": "Visual effect for the timestamp text (none, solid, shadow)." + } + }, + "best_image_timeout": { + "label": "Best image timeout", + "description": "How long to wait for the image with the highest confidence score." + }, + "mqtt": { + "label": "MQTT", + "description": "MQTT image publishing settings.", + "enabled": { + "label": "Send image", + "description": "Enable publishing image snapshots for objects to MQTT topics for this camera." + }, + "timestamp": { + "label": "Add timestamp", + "description": "Overlay a timestamp on images published to MQTT." + }, + "bounding_box": { + "label": "Add bounding box", + "description": "Draw bounding boxes on images published over MQTT." + }, + "crop": { + "label": "Crop image", + "description": "Crop images published to MQTT to the detected object's bounding box." + }, + "height": { + "label": "Image height", + "description": "Height (pixels) to resize images published over MQTT." + }, + "required_zones": { + "label": "Required zones", + "description": "Zones that an object must enter for an MQTT image to be published." + }, + "quality": { + "label": "JPEG quality", + "description": "JPEG quality for images published to MQTT (0-100)." + } + }, + "notifications": { + "label": "Notifications", + "description": "Settings to enable and control notifications for this camera.", + "enabled": { + "label": "Enable notifications", + "description": "Enable or disable notifications for this camera." + }, + "email": { + "label": "Notification email", + "description": "Email address used for push notifications or required by certain notification providers." + }, + "cooldown": { + "label": "Cooldown period", + "description": "Cooldown (seconds) between notifications to avoid spamming recipients." + }, + "enabled_in_config": { + "label": "Original notifications state", + "description": "Indicates whether notifications were enabled in the original static configuration." + } + }, + "onvif": { + "label": "ONVIF", + "description": "ONVIF connection and PTZ autotracking settings for this camera.", + "host": { + "label": "ONVIF host", + "description": "Host (and optional scheme) for the ONVIF service for this camera." + }, + "port": { + "label": "ONVIF port", + "description": "Port number for the ONVIF service." + }, + "user": { + "label": "ONVIF username", + "description": "Username for ONVIF authentication; some devices require admin user for ONVIF." + }, + "password": { + "label": "ONVIF password", + "description": "Password for ONVIF authentication." + }, + "tls_insecure": { + "label": "Disable TLS verify", + "description": "Skip TLS verification and disable digest auth for ONVIF (unsafe; use in safe networks only)." + }, + "autotracking": { + "label": "Autotracking", + "description": "Automatically track moving objects and keep them centered in the frame using PTZ camera movements.", + "enabled": { + "label": "Enable Autotracking", + "description": "Enable or disable automatic PTZ camera tracking of detected objects." + }, + "calibrate_on_startup": { + "label": "Calibrate on start", + "description": "Measure PTZ motor speeds on startup to improve tracking accuracy. Frigate will update config with movement_weights after calibration." + }, + "zooming": { + "label": "Zoom mode", + "description": "Control zoom behavior: disabled (pan/tilt only), absolute (most compatible), or relative (concurrent pan/tilt/zoom)." + }, + "zoom_factor": { + "label": "Zoom factor", + "description": "Control zoom level on tracked objects. Lower values keep more scene in view; higher values zoom in closer but may lose tracking. Values between 0.1 and 0.75." + }, + "track": { + "label": "Tracked objects", + "description": "List of object types that should trigger autotracking." + }, + "required_zones": { + "label": "Required zones", + "description": "Objects must enter one of these zones before autotracking begins." + }, + "return_preset": { + "label": "Return preset", + "description": "ONVIF preset name configured in camera firmware to return to after tracking ends." + }, + "timeout": { + "label": "Return timeout", + "description": "Wait this many seconds after losing tracking before returning camera to preset position." + }, + "movement_weights": { + "label": "Movement weights", + "description": "Calibration values automatically generated by camera calibration. Do not modify manually." + }, + "enabled_in_config": { + "label": "Original autotrack state", + "description": "Internal field to track whether autotracking was enabled in configuration." + } + }, + "ignore_time_mismatch": { + "label": "Ignore time mismatch", + "description": "Ignore time synchronization differences between camera and Frigate server for ONVIF communication." + } + }, + "type": { + "label": "Camera type", + "description": "Camera Type" + }, + "ui": { + "label": "Camera UI", + "description": "Display ordering and visibility for this camera in the UI. Ordering affects the default dashboard. For more granular control, use camera groups.", + "order": { + "label": "UI order", + "description": "Numeric order used to sort the camera in the UI (default dashboard and lists); larger numbers appear later." + }, + "dashboard": { + "label": "Show in UI", + "description": "Toggle whether this camera is visible everywhere in the Frigate UI. Disabling this will require manually editing the config to view this camera in the UI again." + } + }, + "webui_url": { + "label": "Camera URL", + "description": "URL to visit the camera directly from system page" + }, + "zones": { + "label": "Zones", + "description": "Zones allow you to define a specific area of the frame so you can determine whether or not an object is within a particular area.", + "friendly_name": { + "label": "Zone name", + "description": "A user-friendly name for the zone, displayed in the Frigate UI. If not set, a formatted version of the zone name will be used." + }, + "filters": { + "label": "Zone filters", + "description": "Filters to apply to objects within this zone. Used to reduce false positives or restrict which objects are considered present in the zone.", + "min_area": { + "label": "Minimum object area", + "description": "Minimum bounding box area (pixels or percentage) required for this object type. Can be pixels (int) or percentage (float between 0.000001 and 0.99)." + }, + "max_area": { + "label": "Maximum object area", + "description": "Maximum bounding box area (pixels or percentage) allowed for this object type. Can be pixels (int) or percentage (float between 0.000001 and 0.99)." + }, + "min_ratio": { + "label": "Minimum aspect ratio", + "description": "Minimum width/height ratio required for the bounding box to qualify." + }, + "max_ratio": { + "label": "Maximum aspect ratio", + "description": "Maximum width/height ratio allowed for the bounding box to qualify." + }, + "threshold": { + "label": "Confidence threshold", + "description": "Average detection confidence threshold required for the object to be considered a true positive." + }, + "min_score": { + "label": "Minimum confidence", + "description": "Minimum single-frame detection confidence required for the object to be counted." + }, + "mask": { + "label": "Filter mask", + "description": "Polygon coordinates defining where this filter applies within the frame." + }, + "raw_mask": { + "label": "Raw Mask" + } + }, + "coordinates": { + "label": "Coordinates", + "description": "Polygon coordinates that define the zone area. Can be a comma-separated string or a list of coordinate strings. Coordinates should be relative (0-1) or absolute (legacy)." + }, + "distances": { + "label": "Real-world distances", + "description": "Optional real-world distances for each side of the zone quadrilateral, used for speed or distance calculations. Must have exactly 4 values if set." + }, + "inertia": { + "label": "Inertia frames", + "description": "Number of consecutive frames an object must be detected in the zone before it is considered present. Helps filter out transient detections." + }, + "loitering_time": { + "label": "Loitering seconds", + "description": "Number of seconds an object must remain in the zone to be considered as loitering. Set to 0 to disable loitering detection." + }, + "speed_threshold": { + "label": "Minimum speed", + "description": "Minimum speed (in real-world units if distances are set) required for an object to be considered present in the zone. Used for speed-based zone triggers." + }, + "objects": { + "label": "Trigger objects", + "description": "List of object types (from labelmap) that can trigger this zone. Can be a string or a list of strings. If empty, all objects are considered." + } + }, + "enabled_in_config": { + "label": "Original camera state", + "description": "Keep track of original state of camera." } } diff --git a/web/public/locales/en/config/classification.json b/web/public/locales/en/config/classification.json deleted file mode 100644 index e8014b2fa..000000000 --- a/web/public/locales/en/config/classification.json +++ /dev/null @@ -1,58 +0,0 @@ -{ - "label": "Object classification config.", - "properties": { - "bird": { - "label": "Bird classification config.", - "properties": { - "enabled": { - "label": "Enable bird classification." - }, - "threshold": { - "label": "Minimum classification score required to be considered a match." - } - } - }, - "custom": { - "label": "Custom Classification Model Configs.", - "properties": { - "enabled": { - "label": "Enable running the model." - }, - "name": { - "label": "Name of classification model." - }, - "threshold": { - "label": "Classification score threshold to change the state." - }, - "object_config": { - "properties": { - "objects": { - "label": "Object types to classify." - }, - "classification_type": { - "label": "Type of classification that is applied." - } - } - }, - "state_config": { - "properties": { - "cameras": { - "label": "Cameras to run classification on.", - "properties": { - "crop": { - "label": "Crop of image frame on this camera to run classification on." - } - } - }, - "motion": { - "label": "If classification should be run when motion is detected in the crop." - }, - "interval": { - "label": "Interval to run classification on in seconds." - } - } - } - } - } - } -} \ No newline at end of file diff --git a/web/public/locales/en/config/database.json b/web/public/locales/en/config/database.json deleted file mode 100644 index ece7ccbaa..000000000 --- a/web/public/locales/en/config/database.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "label": "Database configuration.", - "properties": { - "path": { - "label": "Database path." - } - } -} \ No newline at end of file diff --git a/web/public/locales/en/config/detect.json b/web/public/locales/en/config/detect.json deleted file mode 100644 index 9e1b59313..000000000 --- a/web/public/locales/en/config/detect.json +++ /dev/null @@ -1,51 +0,0 @@ -{ - "label": "Global object tracking configuration.", - "properties": { - "enabled": { - "label": "Detection Enabled." - }, - "height": { - "label": "Height of the stream for the detect role." - }, - "width": { - "label": "Width of the stream for the detect role." - }, - "fps": { - "label": "Number of frames per second to process through detection." - }, - "min_initialized": { - "label": "Minimum number of consecutive hits for an object to be initialized by the tracker." - }, - "max_disappeared": { - "label": "Maximum number of frames the object can disappear before detection ends." - }, - "stationary": { - "label": "Stationary objects config.", - "properties": { - "interval": { - "label": "Frame interval for checking stationary objects." - }, - "threshold": { - "label": "Number of frames without a position change for an object to be considered stationary" - }, - "max_frames": { - "label": "Max frames for stationary objects.", - "properties": { - "default": { - "label": "Default max frames." - }, - "objects": { - "label": "Object specific max frames." - } - } - }, - "classifier": { - "label": "Enable visual classifier for determing if objects with jittery bounding boxes are stationary." - } - } - }, - "annotation_offset": { - "label": "Milliseconds to offset detect annotations by." - } - } -} \ No newline at end of file diff --git a/web/public/locales/en/config/detectors.json b/web/public/locales/en/config/detectors.json deleted file mode 100644 index 1bd6fec70..000000000 --- a/web/public/locales/en/config/detectors.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "label": "Detector hardware configuration.", - "properties": { - "type": { - "label": "Detector Type" - }, - "model": { - "label": "Detector specific model configuration." - }, - "model_path": { - "label": "Detector specific model path." - } - } -} \ No newline at end of file diff --git a/web/public/locales/en/config/environment_vars.json b/web/public/locales/en/config/environment_vars.json deleted file mode 100644 index ce97ce49e..000000000 --- a/web/public/locales/en/config/environment_vars.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "label": "Frigate environment variables." -} \ No newline at end of file diff --git a/web/public/locales/en/config/face_recognition.json b/web/public/locales/en/config/face_recognition.json deleted file mode 100644 index 705d75468..000000000 --- a/web/public/locales/en/config/face_recognition.json +++ /dev/null @@ -1,36 +0,0 @@ -{ - "label": "Face recognition config.", - "properties": { - "enabled": { - "label": "Enable face recognition." - }, - "model_size": { - "label": "The size of the embeddings model used." - }, - "unknown_score": { - "label": "Minimum face distance score required to be marked as a potential match." - }, - "detection_threshold": { - "label": "Minimum face detection score required to be considered a face." - }, - "recognition_threshold": { - "label": "Minimum face distance score required to be considered a match." - }, - "min_area": { - "label": "Min area of face box to consider running face recognition." - }, - "min_faces": { - "label": "Min face recognitions for the sub label to be applied to the person object." - }, - "save_attempts": { - "label": "Number of face attempts to save in the recent recognitions tab." - }, - "blur_confidence_filter": { - "label": "Apply blur quality filter to face confidence." - }, - "device": { - "label": "The device key to use for face recognition.", - "description": "This is an override, to target a specific device. See https://onnxruntime.ai/docs/execution-providers/ for more information" - } - } -} \ No newline at end of file diff --git a/web/public/locales/en/config/ffmpeg.json b/web/public/locales/en/config/ffmpeg.json deleted file mode 100644 index 570da5a35..000000000 --- a/web/public/locales/en/config/ffmpeg.json +++ /dev/null @@ -1,34 +0,0 @@ -{ - "label": "Global FFmpeg configuration.", - "properties": { - "path": { - "label": "FFmpeg path" - }, - "global_args": { - "label": "Global FFmpeg arguments." - }, - "hwaccel_args": { - "label": "FFmpeg hardware acceleration arguments." - }, - "input_args": { - "label": "FFmpeg input arguments." - }, - "output_args": { - "label": "FFmpeg output arguments per role.", - "properties": { - "detect": { - "label": "Detect role FFmpeg output arguments." - }, - "record": { - "label": "Record role FFmpeg output arguments." - } - } - }, - "retry_interval": { - "label": "Time in seconds to wait before FFmpeg retries connecting to the camera." - }, - "apple_compatibility": { - "label": "Set tag on HEVC (H.265) recording stream to improve compatibility with Apple players." - } - } -} \ No newline at end of file diff --git a/web/public/locales/en/config/genai.json b/web/public/locales/en/config/genai.json deleted file mode 100644 index fed679d9e..000000000 --- a/web/public/locales/en/config/genai.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "label": "Generative AI configuration.", - "properties": { - "api_key": { - "label": "Provider API key." - }, - "base_url": { - "label": "Provider base url." - }, - "model": { - "label": "GenAI model." - }, - "provider": { - "label": "GenAI provider." - }, - "provider_options": { - "label": "GenAI Provider extra options." - }, - "runtime_options": { - "label": "Options to pass during inference calls." - } - } -} diff --git a/web/public/locales/en/config/global.json b/web/public/locales/en/config/global.json new file mode 100644 index 000000000..9dc991491 --- /dev/null +++ b/web/public/locales/en/config/global.json @@ -0,0 +1,2185 @@ +{ + "version": { + "label": "Current config version", + "description": "Numeric or string version of the active configuration to help detect migrations or format changes." + }, + "safe_mode": { + "label": "Safe mode", + "description": "When enabled, start Frigate in safe mode with reduced features for troubleshooting." + }, + "environment_vars": { + "label": "Environment variables", + "description": "Key/value pairs of environment variables to set for the Frigate process in Home Assistant OS. Non-HAOS users must use Docker environment variable configuration instead." + }, + "logger": { + "label": "Logging", + "description": "Controls default log verbosity and per-component log level overrides.", + "default": { + "label": "Logging level", + "description": "Default global log verbosity (debug, info, warning, error)." + }, + "logs": { + "label": "Per-process log level", + "description": "Per-component log level overrides to increase or decrease verbosity for specific modules." + } + }, + "auth": { + "label": "Authentication", + "description": "Authentication and session-related settings including cookie and rate limit options.", + "enabled": { + "label": "Enable authentication", + "description": "Enable native authentication for the Frigate UI." + }, + "reset_admin_password": { + "label": "Reset admin password", + "description": "If true, reset the admin user's password on startup and print the new password in logs." + }, + "cookie_name": { + "label": "JWT cookie name", + "description": "Name of the cookie used to store the JWT token for native authentication." + }, + "cookie_secure": { + "label": "Secure cookie flag", + "description": "Set the secure flag on the auth cookie; should be true when using TLS." + }, + "session_length": { + "label": "Session length", + "description": "Session duration in seconds for JWT-based sessions." + }, + "refresh_time": { + "label": "Session refresh window", + "description": "When a session is within this many seconds of expiring, refresh it back to full length." + }, + "failed_login_rate_limit": { + "label": "Failed login limits", + "description": "Rate limiting rules for failed login attempts to reduce brute-force attacks." + }, + "trusted_proxies": { + "label": "Trusted proxies", + "description": "List of trusted proxy IPs used when determining client IP for rate limiting." + }, + "hash_iterations": { + "label": "Hash iterations", + "description": "Number of PBKDF2-SHA256 iterations to use when hashing user passwords." + }, + "roles": { + "label": "Role mappings", + "description": "Map roles to camera lists. An empty list grants access to all cameras for the role." + }, + "admin_first_time_login": { + "label": "First-time admin flag", + "description": "When true the UI may show a help link on the login page informing users how to sign in after an admin password reset. " + } + }, + "database": { + "label": "Database", + "description": "Settings for the SQLite database used by Frigate to store tracked object and recording metadata.", + "path": { + "label": "Database path", + "description": "Filesystem path where the Frigate SQLite database file will be stored." + } + }, + "go2rtc": { + "label": "go2rtc", + "description": "Settings for the integrated go2rtc restreaming service used for live stream relaying and translation." + }, + "mqtt": { + "label": "MQTT", + "description": "Settings for connecting and publishing telemetry, snapshots, and event details to an MQTT broker.", + "enabled": { + "label": "Enable MQTT", + "description": "Enable or disable MQTT integration for state, events, and snapshots." + }, + "host": { + "label": "MQTT host", + "description": "Hostname or IP address of the MQTT broker." + }, + "port": { + "label": "MQTT port", + "description": "Port of the MQTT broker (usually 1883 for plain MQTT)." + }, + "topic_prefix": { + "label": "Topic prefix", + "description": "MQTT topic prefix for all Frigate topics; must be unique if running multiple instances." + }, + "client_id": { + "label": "Client ID", + "description": "Client identifier used when connecting to the MQTT broker; should be unique per instance." + }, + "stats_interval": { + "label": "Stats interval", + "description": "Interval in seconds for publishing system and camera stats to MQTT." + }, + "user": { + "label": "MQTT username", + "description": "Optional MQTT username; can be provided via environment variables or secrets." + }, + "password": { + "label": "MQTT password", + "description": "Optional MQTT password; can be provided via environment variables or secrets." + }, + "tls_ca_certs": { + "label": "TLS CA certs", + "description": "Path to CA certificate for TLS connections to the broker (for self-signed certs)." + }, + "tls_client_cert": { + "label": "Client cert", + "description": "Client certificate path for TLS mutual authentication; do not set user/password when using client certs." + }, + "tls_client_key": { + "label": "Client key", + "description": "Private key path for the client certificate." + }, + "tls_insecure": { + "label": "TLS insecure", + "description": "Allow insecure TLS connections by skipping hostname verification (not recommended)." + }, + "qos": { + "label": "MQTT QoS", + "description": "Quality of Service level for MQTT publishes/subscriptions (0, 1, or 2)." + } + }, + "notifications": { + "label": "Notifications", + "description": "Settings to enable and control notifications for all cameras; can be overridden per-camera.", + "enabled": { + "label": "Enable notifications", + "description": "Enable or disable notifications for all cameras; can be overridden per-camera." + }, + "email": { + "label": "Notification email", + "description": "Email address used for push notifications or required by certain notification providers." + }, + "cooldown": { + "label": "Cooldown period", + "description": "Cooldown (seconds) between notifications to avoid spamming recipients." + }, + "enabled_in_config": { + "label": "Original notifications state", + "description": "Indicates whether notifications were enabled in the original static configuration." + } + }, + "networking": { + "label": "Networking", + "description": "Network-related settings such as IPv6 enablement for Frigate endpoints.", + "ipv6": { + "label": "IPv6 configuration", + "description": "IPv6-specific settings for Frigate network services.", + "enabled": { + "label": "Enable IPv6", + "description": "Enable IPv6 support for Frigate services (API and UI) where applicable." + } + }, + "listen": { + "label": "Listening ports configuration", + "description": "Configuration for internal and external listening ports. This is for advanced users. For the majority of use cases it's recommended to change the ports section of your Docker compose file.", + "internal": { + "label": "Internal port", + "description": "Internal listening port for Frigate (default 5000)." + }, + "external": { + "label": "External port", + "description": "External listening port for Frigate (default 8971)." + } + } + }, + "proxy": { + "label": "Proxy", + "description": "Settings for integrating Frigate behind a reverse proxy that passes authenticated user headers.", + "header_map": { + "label": "Header mapping", + "description": "Map incoming proxy headers to Frigate user and role fields for proxy-based auth.", + "user": { + "label": "User header", + "description": "Header containing the authenticated username provided by the upstream proxy." + }, + "role": { + "label": "Role header", + "description": "Header containing the authenticated user's role or groups from the upstream proxy." + }, + "role_map": { + "label": "Role mapping", + "description": "Map upstream group values to Frigate roles (for example map admin groups to the admin role)." + } + }, + "logout_url": { + "label": "Logout URL", + "description": "URL to redirect users to when logging out via the proxy." + }, + "auth_secret": { + "label": "Proxy secret", + "description": "Optional secret checked against the X-Proxy-Secret header to verify trusted proxies." + }, + "default_role": { + "label": "Default role", + "description": "Default role assigned to proxy-authenticated users when no role mapping applies (admin or viewer)." + }, + "separator": { + "label": "Separator character", + "description": "Character used to split multiple values provided in proxy headers." + } + }, + "telemetry": { + "label": "Telemetry", + "description": "System telemetry and stats options including GPU and network bandwidth monitoring.", + "network_interfaces": { + "label": "Network interfaces", + "description": "List of network interface name prefixes to monitor for bandwidth statistics." + }, + "stats": { + "label": "System stats", + "description": "Options to enable/disable collection of various system and GPU statistics.", + "amd_gpu_stats": { + "label": "AMD GPU stats", + "description": "Enable collection of AMD GPU statistics if an AMD GPU is present." + }, + "intel_gpu_stats": { + "label": "Intel GPU stats", + "description": "Enable collection of Intel GPU statistics if an Intel GPU is present." + }, + "network_bandwidth": { + "label": "Network bandwidth", + "description": "Enable per-process network bandwidth monitoring for camera ffmpeg processes and detectors (requires capabilities)." + }, + "intel_gpu_device": { + "label": "SR-IOV device", + "description": "Device identifier used when treating Intel GPUs as SR-IOV to fix GPU stats." + } + }, + "version_check": { + "label": "Version check", + "description": "Enable an outbound check to detect if a newer Frigate version is available." + } + }, + "tls": { + "label": "TLS", + "description": "TLS settings for Frigate's web endpoints (port 8971).", + "enabled": { + "label": "Enable TLS", + "description": "Enable TLS for Frigate's web UI and API on the configured TLS port." + } + }, + "ui": { + "label": "UI", + "description": "User interface preferences such as timezone, time/date formatting, and units.", + "timezone": { + "label": "Timezone", + "description": "Optional timezone to display across the UI (defaults to browser local time if unset)." + }, + "time_format": { + "label": "Time format", + "description": "Time format to use in the UI (browser, 12hour, or 24hour)." + }, + "date_style": { + "label": "Date style", + "description": "Date style to use in the UI (full, long, medium, short)." + }, + "time_style": { + "label": "Time style", + "description": "Time style to use in the UI (full, long, medium, short)." + }, + "unit_system": { + "label": "Unit system", + "description": "Unit system for display (metric or imperial) used in the UI and MQTT." + } + }, + "detectors": { + "label": "Detector hardware", + "description": "Configuration for object detectors (CPU, GPU, ONNX backends) and any detector-specific model settings.", + "type": { + "label": "Detector Type", + "description": "Type of detector to use for object detection (for example 'cpu', 'edgetpu', 'openvino')." + }, + "cpu": { + "label": "CPU", + "description": "CPU TFLite detector that runs TensorFlow Lite models on the host CPU without hardware acceleration. Not recommended.", + "type": { + "label": "Type" + }, + "model": { + "label": "Detector specific model configuration", + "description": "Detector-specific model configuration options (path, input size, etc.).", + "path": { + "label": "Custom Object detection model path", + "description": "Path to a custom detection model file (or plus:// for Frigate+ models)." + }, + "labelmap_path": { + "label": "Label map for custom object detector", + "description": "Path to a labelmap file that maps numeric classes to string labels for the detector." + }, + "width": { + "label": "Object detection model input width", + "description": "Width of the model input tensor in pixels." + }, + "height": { + "label": "Object detection model input height", + "description": "Height of the model input tensor in pixels." + }, + "labelmap": { + "label": "Labelmap customization", + "description": "Overrides or remapping entries to merge into the standard labelmap." + }, + "attributes_map": { + "label": "Map of object labels to their attribute labels", + "description": "Mapping from object labels to attribute labels used to attach metadata (for example 'car' -> ['license_plate'])." + }, + "input_tensor": { + "label": "Model Input Tensor Shape", + "description": "Tensor format expected by the model: 'nhwc' or 'nchw'." + }, + "input_pixel_format": { + "label": "Model Input Pixel Color Format", + "description": "Pixel colorspace expected by the model: 'rgb', 'bgr', or 'yuv'." + }, + "input_dtype": { + "label": "Model Input D Type", + "description": "Data type of the model input tensor (for example 'float32')." + }, + "model_type": { + "label": "Object Detection Model Type", + "description": "Detector model architecture type (ssd, yolox, yolonas) used by some detectors for optimization." + } + }, + "model_path": { + "label": "Detector specific model path", + "description": "File path to the detector model binary if required by the chosen detector." + }, + "num_threads": { + "label": "Number of detection threads", + "description": "The number of threads used for CPU-based inference." + } + }, + "deepstack": { + "label": "DeepStack", + "description": "DeepStack/CodeProject.AI detector that sends images to a remote DeepStack HTTP API for inference. Not recommended.", + "type": { + "label": "Type" + }, + "model": { + "label": "Detector specific model configuration", + "description": "Detector-specific model configuration options (path, input size, etc.).", + "path": { + "label": "Custom Object detection model path", + "description": "Path to a custom detection model file (or plus:// for Frigate+ models)." + }, + "labelmap_path": { + "label": "Label map for custom object detector", + "description": "Path to a labelmap file that maps numeric classes to string labels for the detector." + }, + "width": { + "label": "Object detection model input width", + "description": "Width of the model input tensor in pixels." + }, + "height": { + "label": "Object detection model input height", + "description": "Height of the model input tensor in pixels." + }, + "labelmap": { + "label": "Labelmap customization", + "description": "Overrides or remapping entries to merge into the standard labelmap." + }, + "attributes_map": { + "label": "Map of object labels to their attribute labels", + "description": "Mapping from object labels to attribute labels used to attach metadata (for example 'car' -> ['license_plate'])." + }, + "input_tensor": { + "label": "Model Input Tensor Shape", + "description": "Tensor format expected by the model: 'nhwc' or 'nchw'." + }, + "input_pixel_format": { + "label": "Model Input Pixel Color Format", + "description": "Pixel colorspace expected by the model: 'rgb', 'bgr', or 'yuv'." + }, + "input_dtype": { + "label": "Model Input D Type", + "description": "Data type of the model input tensor (for example 'float32')." + }, + "model_type": { + "label": "Object Detection Model Type", + "description": "Detector model architecture type (ssd, yolox, yolonas) used by some detectors for optimization." + } + }, + "model_path": { + "label": "Detector specific model path", + "description": "File path to the detector model binary if required by the chosen detector." + }, + "api_url": { + "label": "DeepStack API URL", + "description": "The URL of the DeepStack API." + }, + "api_timeout": { + "label": "DeepStack API timeout (in seconds)", + "description": "Maximum time allowed for a DeepStack API request." + }, + "api_key": { + "label": "DeepStack API key (if required)", + "description": "Optional API key for authenticated DeepStack services." + } + }, + "degirum": { + "label": "DeGirum", + "description": "DeGirum detector for running models via DeGirum cloud or local inference services.", + "type": { + "label": "Type" + }, + "model": { + "label": "Detector specific model configuration", + "description": "Detector-specific model configuration options (path, input size, etc.).", + "path": { + "label": "Custom Object detection model path", + "description": "Path to a custom detection model file (or plus:// for Frigate+ models)." + }, + "labelmap_path": { + "label": "Label map for custom object detector", + "description": "Path to a labelmap file that maps numeric classes to string labels for the detector." + }, + "width": { + "label": "Object detection model input width", + "description": "Width of the model input tensor in pixels." + }, + "height": { + "label": "Object detection model input height", + "description": "Height of the model input tensor in pixels." + }, + "labelmap": { + "label": "Labelmap customization", + "description": "Overrides or remapping entries to merge into the standard labelmap." + }, + "attributes_map": { + "label": "Map of object labels to their attribute labels", + "description": "Mapping from object labels to attribute labels used to attach metadata (for example 'car' -> ['license_plate'])." + }, + "input_tensor": { + "label": "Model Input Tensor Shape", + "description": "Tensor format expected by the model: 'nhwc' or 'nchw'." + }, + "input_pixel_format": { + "label": "Model Input Pixel Color Format", + "description": "Pixel colorspace expected by the model: 'rgb', 'bgr', or 'yuv'." + }, + "input_dtype": { + "label": "Model Input D Type", + "description": "Data type of the model input tensor (for example 'float32')." + }, + "model_type": { + "label": "Object Detection Model Type", + "description": "Detector model architecture type (ssd, yolox, yolonas) used by some detectors for optimization." + } + }, + "model_path": { + "label": "Detector specific model path", + "description": "File path to the detector model binary if required by the chosen detector." + }, + "location": { + "label": "Inference Location", + "description": "Location of the DeGirim inference engine (e.g. '@cloud', '127.0.0.1')." + }, + "zoo": { + "label": "Model Zoo", + "description": "Path or URL to the DeGirum model zoo." + }, + "token": { + "label": "DeGirum Cloud Token", + "description": "Token for DeGirum Cloud access." + } + }, + "edgetpu": { + "label": "EdgeTPU", + "description": "EdgeTPU detector that runs TensorFlow Lite models compiled for Coral EdgeTPU using the EdgeTPU delegate.", + "type": { + "label": "Type" + }, + "model": { + "label": "Detector specific model configuration", + "description": "Detector-specific model configuration options (path, input size, etc.).", + "path": { + "label": "Custom Object detection model path", + "description": "Path to a custom detection model file (or plus:// for Frigate+ models)." + }, + "labelmap_path": { + "label": "Label map for custom object detector", + "description": "Path to a labelmap file that maps numeric classes to string labels for the detector." + }, + "width": { + "label": "Object detection model input width", + "description": "Width of the model input tensor in pixels." + }, + "height": { + "label": "Object detection model input height", + "description": "Height of the model input tensor in pixels." + }, + "labelmap": { + "label": "Labelmap customization", + "description": "Overrides or remapping entries to merge into the standard labelmap." + }, + "attributes_map": { + "label": "Map of object labels to their attribute labels", + "description": "Mapping from object labels to attribute labels used to attach metadata (for example 'car' -> ['license_plate'])." + }, + "input_tensor": { + "label": "Model Input Tensor Shape", + "description": "Tensor format expected by the model: 'nhwc' or 'nchw'." + }, + "input_pixel_format": { + "label": "Model Input Pixel Color Format", + "description": "Pixel colorspace expected by the model: 'rgb', 'bgr', or 'yuv'." + }, + "input_dtype": { + "label": "Model Input D Type", + "description": "Data type of the model input tensor (for example 'float32')." + }, + "model_type": { + "label": "Object Detection Model Type", + "description": "Detector model architecture type (ssd, yolox, yolonas) used by some detectors for optimization." + } + }, + "model_path": { + "label": "Detector specific model path", + "description": "File path to the detector model binary if required by the chosen detector." + }, + "device": { + "label": "Device Type", + "description": "The device to use for EdgeTPU inference (e.g. 'usb', 'pci')." + } + }, + "hailo8l": { + "label": "Hailo-8/Hailo-8L", + "description": "Hailo-8/Hailo-8L detector using HEF models and the HailoRT SDK for inference on Hailo hardware.", + "type": { + "label": "Type" + }, + "model": { + "label": "Detector specific model configuration", + "description": "Detector-specific model configuration options (path, input size, etc.).", + "path": { + "label": "Custom Object detection model path", + "description": "Path to a custom detection model file (or plus:// for Frigate+ models)." + }, + "labelmap_path": { + "label": "Label map for custom object detector", + "description": "Path to a labelmap file that maps numeric classes to string labels for the detector." + }, + "width": { + "label": "Object detection model input width", + "description": "Width of the model input tensor in pixels." + }, + "height": { + "label": "Object detection model input height", + "description": "Height of the model input tensor in pixels." + }, + "labelmap": { + "label": "Labelmap customization", + "description": "Overrides or remapping entries to merge into the standard labelmap." + }, + "attributes_map": { + "label": "Map of object labels to their attribute labels", + "description": "Mapping from object labels to attribute labels used to attach metadata (for example 'car' -> ['license_plate'])." + }, + "input_tensor": { + "label": "Model Input Tensor Shape", + "description": "Tensor format expected by the model: 'nhwc' or 'nchw'." + }, + "input_pixel_format": { + "label": "Model Input Pixel Color Format", + "description": "Pixel colorspace expected by the model: 'rgb', 'bgr', or 'yuv'." + }, + "input_dtype": { + "label": "Model Input D Type", + "description": "Data type of the model input tensor (for example 'float32')." + }, + "model_type": { + "label": "Object Detection Model Type", + "description": "Detector model architecture type (ssd, yolox, yolonas) used by some detectors for optimization." + } + }, + "model_path": { + "label": "Detector specific model path", + "description": "File path to the detector model binary if required by the chosen detector." + }, + "device": { + "label": "Device Type", + "description": "The device to use for Hailo inference (e.g. 'PCIe', 'M.2')." + } + }, + "memryx": { + "label": "MemryX", + "description": "MemryX MX3 detector that runs compiled DFP models on MemryX accelerators.", + "type": { + "label": "Type" + }, + "model": { + "label": "Detector specific model configuration", + "description": "Detector-specific model configuration options (path, input size, etc.).", + "path": { + "label": "Custom Object detection model path", + "description": "Path to a custom detection model file (or plus:// for Frigate+ models)." + }, + "labelmap_path": { + "label": "Label map for custom object detector", + "description": "Path to a labelmap file that maps numeric classes to string labels for the detector." + }, + "width": { + "label": "Object detection model input width", + "description": "Width of the model input tensor in pixels." + }, + "height": { + "label": "Object detection model input height", + "description": "Height of the model input tensor in pixels." + }, + "labelmap": { + "label": "Labelmap customization", + "description": "Overrides or remapping entries to merge into the standard labelmap." + }, + "attributes_map": { + "label": "Map of object labels to their attribute labels", + "description": "Mapping from object labels to attribute labels used to attach metadata (for example 'car' -> ['license_plate'])." + }, + "input_tensor": { + "label": "Model Input Tensor Shape", + "description": "Tensor format expected by the model: 'nhwc' or 'nchw'." + }, + "input_pixel_format": { + "label": "Model Input Pixel Color Format", + "description": "Pixel colorspace expected by the model: 'rgb', 'bgr', or 'yuv'." + }, + "input_dtype": { + "label": "Model Input D Type", + "description": "Data type of the model input tensor (for example 'float32')." + }, + "model_type": { + "label": "Object Detection Model Type", + "description": "Detector model architecture type (ssd, yolox, yolonas) used by some detectors for optimization." + } + }, + "model_path": { + "label": "Detector specific model path", + "description": "File path to the detector model binary if required by the chosen detector." + }, + "device": { + "label": "Device Path", + "description": "The device to use for MemryX inference (e.g. 'PCIe')." + } + }, + "onnx": { + "label": "ONNX", + "description": "ONNX detector for running ONNX models; will use available acceleration backends (CUDA/ROCm/OpenVINO) when available.", + "type": { + "label": "Type" + }, + "model": { + "label": "Detector specific model configuration", + "description": "Detector-specific model configuration options (path, input size, etc.).", + "path": { + "label": "Custom Object detection model path", + "description": "Path to a custom detection model file (or plus:// for Frigate+ models)." + }, + "labelmap_path": { + "label": "Label map for custom object detector", + "description": "Path to a labelmap file that maps numeric classes to string labels for the detector." + }, + "width": { + "label": "Object detection model input width", + "description": "Width of the model input tensor in pixels." + }, + "height": { + "label": "Object detection model input height", + "description": "Height of the model input tensor in pixels." + }, + "labelmap": { + "label": "Labelmap customization", + "description": "Overrides or remapping entries to merge into the standard labelmap." + }, + "attributes_map": { + "label": "Map of object labels to their attribute labels", + "description": "Mapping from object labels to attribute labels used to attach metadata (for example 'car' -> ['license_plate'])." + }, + "input_tensor": { + "label": "Model Input Tensor Shape", + "description": "Tensor format expected by the model: 'nhwc' or 'nchw'." + }, + "input_pixel_format": { + "label": "Model Input Pixel Color Format", + "description": "Pixel colorspace expected by the model: 'rgb', 'bgr', or 'yuv'." + }, + "input_dtype": { + "label": "Model Input D Type", + "description": "Data type of the model input tensor (for example 'float32')." + }, + "model_type": { + "label": "Object Detection Model Type", + "description": "Detector model architecture type (ssd, yolox, yolonas) used by some detectors for optimization." + } + }, + "model_path": { + "label": "Detector specific model path", + "description": "File path to the detector model binary if required by the chosen detector." + }, + "device": { + "label": "Device Type", + "description": "The device to use for ONNX inference (e.g. 'AUTO', 'CPU', 'GPU')." + } + }, + "openvino": { + "label": "OpenVINO", + "description": "OpenVINO detector for AMD and Intel CPUs, Intel GPUs and Intel VPU hardware.", + "type": { + "label": "Type" + }, + "model": { + "label": "Detector specific model configuration", + "description": "Detector-specific model configuration options (path, input size, etc.).", + "path": { + "label": "Custom Object detection model path", + "description": "Path to a custom detection model file (or plus:// for Frigate+ models)." + }, + "labelmap_path": { + "label": "Label map for custom object detector", + "description": "Path to a labelmap file that maps numeric classes to string labels for the detector." + }, + "width": { + "label": "Object detection model input width", + "description": "Width of the model input tensor in pixels." + }, + "height": { + "label": "Object detection model input height", + "description": "Height of the model input tensor in pixels." + }, + "labelmap": { + "label": "Labelmap customization", + "description": "Overrides or remapping entries to merge into the standard labelmap." + }, + "attributes_map": { + "label": "Map of object labels to their attribute labels", + "description": "Mapping from object labels to attribute labels used to attach metadata (for example 'car' -> ['license_plate'])." + }, + "input_tensor": { + "label": "Model Input Tensor Shape", + "description": "Tensor format expected by the model: 'nhwc' or 'nchw'." + }, + "input_pixel_format": { + "label": "Model Input Pixel Color Format", + "description": "Pixel colorspace expected by the model: 'rgb', 'bgr', or 'yuv'." + }, + "input_dtype": { + "label": "Model Input D Type", + "description": "Data type of the model input tensor (for example 'float32')." + }, + "model_type": { + "label": "Object Detection Model Type", + "description": "Detector model architecture type (ssd, yolox, yolonas) used by some detectors for optimization." + } + }, + "model_path": { + "label": "Detector specific model path", + "description": "File path to the detector model binary if required by the chosen detector." + }, + "device": { + "label": "Device Type", + "description": "The device to use for OpenVINO inference (e.g. 'CPU', 'GPU', 'NPU')." + } + }, + "rknn": { + "label": "RKNN", + "description": "RKNN detector for Rockchip NPUs; runs compiled RKNN models on Rockchip hardware.", + "type": { + "label": "Type" + }, + "model": { + "label": "Detector specific model configuration", + "description": "Detector-specific model configuration options (path, input size, etc.).", + "path": { + "label": "Custom Object detection model path", + "description": "Path to a custom detection model file (or plus:// for Frigate+ models)." + }, + "labelmap_path": { + "label": "Label map for custom object detector", + "description": "Path to a labelmap file that maps numeric classes to string labels for the detector." + }, + "width": { + "label": "Object detection model input width", + "description": "Width of the model input tensor in pixels." + }, + "height": { + "label": "Object detection model input height", + "description": "Height of the model input tensor in pixels." + }, + "labelmap": { + "label": "Labelmap customization", + "description": "Overrides or remapping entries to merge into the standard labelmap." + }, + "attributes_map": { + "label": "Map of object labels to their attribute labels", + "description": "Mapping from object labels to attribute labels used to attach metadata (for example 'car' -> ['license_plate'])." + }, + "input_tensor": { + "label": "Model Input Tensor Shape", + "description": "Tensor format expected by the model: 'nhwc' or 'nchw'." + }, + "input_pixel_format": { + "label": "Model Input Pixel Color Format", + "description": "Pixel colorspace expected by the model: 'rgb', 'bgr', or 'yuv'." + }, + "input_dtype": { + "label": "Model Input D Type", + "description": "Data type of the model input tensor (for example 'float32')." + }, + "model_type": { + "label": "Object Detection Model Type", + "description": "Detector model architecture type (ssd, yolox, yolonas) used by some detectors for optimization." + } + }, + "model_path": { + "label": "Detector specific model path", + "description": "File path to the detector model binary if required by the chosen detector." + }, + "num_cores": { + "label": "Number of NPU cores to use.", + "description": "The number of NPU cores to use (0 for auto)." + } + }, + "synaptics": { + "label": "Synaptics", + "description": "Synaptics NPU detector for models in .synap format using the Synap SDK on Synaptics hardware.", + "type": { + "label": "Type" + }, + "model": { + "label": "Detector specific model configuration", + "description": "Detector-specific model configuration options (path, input size, etc.).", + "path": { + "label": "Custom Object detection model path", + "description": "Path to a custom detection model file (or plus:// for Frigate+ models)." + }, + "labelmap_path": { + "label": "Label map for custom object detector", + "description": "Path to a labelmap file that maps numeric classes to string labels for the detector." + }, + "width": { + "label": "Object detection model input width", + "description": "Width of the model input tensor in pixels." + }, + "height": { + "label": "Object detection model input height", + "description": "Height of the model input tensor in pixels." + }, + "labelmap": { + "label": "Labelmap customization", + "description": "Overrides or remapping entries to merge into the standard labelmap." + }, + "attributes_map": { + "label": "Map of object labels to their attribute labels", + "description": "Mapping from object labels to attribute labels used to attach metadata (for example 'car' -> ['license_plate'])." + }, + "input_tensor": { + "label": "Model Input Tensor Shape", + "description": "Tensor format expected by the model: 'nhwc' or 'nchw'." + }, + "input_pixel_format": { + "label": "Model Input Pixel Color Format", + "description": "Pixel colorspace expected by the model: 'rgb', 'bgr', or 'yuv'." + }, + "input_dtype": { + "label": "Model Input D Type", + "description": "Data type of the model input tensor (for example 'float32')." + }, + "model_type": { + "label": "Object Detection Model Type", + "description": "Detector model architecture type (ssd, yolox, yolonas) used by some detectors for optimization." + } + }, + "model_path": { + "label": "Detector specific model path", + "description": "File path to the detector model binary if required by the chosen detector." + } + }, + "teflon_tfl": { + "label": "Teflon", + "description": "Teflon delegate detector for TFLite using Mesa Teflon delegate library to accelerate inference on supported GPUs.", + "type": { + "label": "Type" + }, + "model": { + "label": "Detector specific model configuration", + "description": "Detector-specific model configuration options (path, input size, etc.).", + "path": { + "label": "Custom Object detection model path", + "description": "Path to a custom detection model file (or plus:// for Frigate+ models)." + }, + "labelmap_path": { + "label": "Label map for custom object detector", + "description": "Path to a labelmap file that maps numeric classes to string labels for the detector." + }, + "width": { + "label": "Object detection model input width", + "description": "Width of the model input tensor in pixels." + }, + "height": { + "label": "Object detection model input height", + "description": "Height of the model input tensor in pixels." + }, + "labelmap": { + "label": "Labelmap customization", + "description": "Overrides or remapping entries to merge into the standard labelmap." + }, + "attributes_map": { + "label": "Map of object labels to their attribute labels", + "description": "Mapping from object labels to attribute labels used to attach metadata (for example 'car' -> ['license_plate'])." + }, + "input_tensor": { + "label": "Model Input Tensor Shape", + "description": "Tensor format expected by the model: 'nhwc' or 'nchw'." + }, + "input_pixel_format": { + "label": "Model Input Pixel Color Format", + "description": "Pixel colorspace expected by the model: 'rgb', 'bgr', or 'yuv'." + }, + "input_dtype": { + "label": "Model Input D Type", + "description": "Data type of the model input tensor (for example 'float32')." + }, + "model_type": { + "label": "Object Detection Model Type", + "description": "Detector model architecture type (ssd, yolox, yolonas) used by some detectors for optimization." + } + }, + "model_path": { + "label": "Detector specific model path", + "description": "File path to the detector model binary if required by the chosen detector." + } + }, + "tensorrt": { + "label": "TensorRT", + "description": "TensorRT detector for Nvidia Jetson devices using serialized TensorRT engines for accelerated inference.", + "type": { + "label": "Type" + }, + "model": { + "label": "Detector specific model configuration", + "description": "Detector-specific model configuration options (path, input size, etc.).", + "path": { + "label": "Custom Object detection model path", + "description": "Path to a custom detection model file (or plus:// for Frigate+ models)." + }, + "labelmap_path": { + "label": "Label map for custom object detector", + "description": "Path to a labelmap file that maps numeric classes to string labels for the detector." + }, + "width": { + "label": "Object detection model input width", + "description": "Width of the model input tensor in pixels." + }, + "height": { + "label": "Object detection model input height", + "description": "Height of the model input tensor in pixels." + }, + "labelmap": { + "label": "Labelmap customization", + "description": "Overrides or remapping entries to merge into the standard labelmap." + }, + "attributes_map": { + "label": "Map of object labels to their attribute labels", + "description": "Mapping from object labels to attribute labels used to attach metadata (for example 'car' -> ['license_plate'])." + }, + "input_tensor": { + "label": "Model Input Tensor Shape", + "description": "Tensor format expected by the model: 'nhwc' or 'nchw'." + }, + "input_pixel_format": { + "label": "Model Input Pixel Color Format", + "description": "Pixel colorspace expected by the model: 'rgb', 'bgr', or 'yuv'." + }, + "input_dtype": { + "label": "Model Input D Type", + "description": "Data type of the model input tensor (for example 'float32')." + }, + "model_type": { + "label": "Object Detection Model Type", + "description": "Detector model architecture type (ssd, yolox, yolonas) used by some detectors for optimization." + } + }, + "model_path": { + "label": "Detector specific model path", + "description": "File path to the detector model binary if required by the chosen detector." + }, + "device": { + "label": "GPU Device Index", + "description": "The GPU device index to use." + } + }, + "zmq": { + "label": "ZMQ IPC", + "description": "ZMQ IPC detector that offloads inference to an external process via a ZeroMQ IPC endpoint.", + "type": { + "label": "Type" + }, + "model": { + "label": "Detector specific model configuration", + "description": "Detector-specific model configuration options (path, input size, etc.).", + "path": { + "label": "Custom Object detection model path", + "description": "Path to a custom detection model file (or plus:// for Frigate+ models)." + }, + "labelmap_path": { + "label": "Label map for custom object detector", + "description": "Path to a labelmap file that maps numeric classes to string labels for the detector." + }, + "width": { + "label": "Object detection model input width", + "description": "Width of the model input tensor in pixels." + }, + "height": { + "label": "Object detection model input height", + "description": "Height of the model input tensor in pixels." + }, + "labelmap": { + "label": "Labelmap customization", + "description": "Overrides or remapping entries to merge into the standard labelmap." + }, + "attributes_map": { + "label": "Map of object labels to their attribute labels", + "description": "Mapping from object labels to attribute labels used to attach metadata (for example 'car' -> ['license_plate'])." + }, + "input_tensor": { + "label": "Model Input Tensor Shape", + "description": "Tensor format expected by the model: 'nhwc' or 'nchw'." + }, + "input_pixel_format": { + "label": "Model Input Pixel Color Format", + "description": "Pixel colorspace expected by the model: 'rgb', 'bgr', or 'yuv'." + }, + "input_dtype": { + "label": "Model Input D Type", + "description": "Data type of the model input tensor (for example 'float32')." + }, + "model_type": { + "label": "Object Detection Model Type", + "description": "Detector model architecture type (ssd, yolox, yolonas) used by some detectors for optimization." + } + }, + "model_path": { + "label": "Detector specific model path", + "description": "File path to the detector model binary if required by the chosen detector." + }, + "endpoint": { + "label": "ZMQ IPC endpoint", + "description": "The ZMQ endpoint to connect to." + }, + "request_timeout_ms": { + "label": "ZMQ request timeout in milliseconds", + "description": "Timeout for ZMQ requests in milliseconds." + }, + "linger_ms": { + "label": "ZMQ socket linger in milliseconds", + "description": "Socket linger period in milliseconds." + } + } + }, + "model": { + "label": "Detection model", + "description": "Settings to configure a custom object detection model and its input shape.", + "path": { + "label": "Custom Object detection model path", + "description": "Path to a custom detection model file (or plus:// for Frigate+ models)." + }, + "labelmap_path": { + "label": "Label map for custom object detector", + "description": "Path to a labelmap file that maps numeric classes to string labels for the detector." + }, + "width": { + "label": "Object detection model input width", + "description": "Width of the model input tensor in pixels." + }, + "height": { + "label": "Object detection model input height", + "description": "Height of the model input tensor in pixels." + }, + "labelmap": { + "label": "Labelmap customization", + "description": "Overrides or remapping entries to merge into the standard labelmap." + }, + "attributes_map": { + "label": "Map of object labels to their attribute labels", + "description": "Mapping from object labels to attribute labels used to attach metadata (for example 'car' -> ['license_plate'])." + }, + "input_tensor": { + "label": "Model Input Tensor Shape", + "description": "Tensor format expected by the model: 'nhwc' or 'nchw'." + }, + "input_pixel_format": { + "label": "Model Input Pixel Color Format", + "description": "Pixel colorspace expected by the model: 'rgb', 'bgr', or 'yuv'." + }, + "input_dtype": { + "label": "Model Input D Type", + "description": "Data type of the model input tensor (for example 'float32')." + }, + "model_type": { + "label": "Object Detection Model Type", + "description": "Detector model architecture type (ssd, yolox, yolonas) used by some detectors for optimization." + } + }, + "genai": { + "label": "Generative AI configuration (named providers).", + "description": "Settings for integrated generative AI providers used to generate object descriptions and review summaries.", + "api_key": { + "label": "API key", + "description": "API key required by some providers (can also be set via environment variables)." + }, + "base_url": { + "label": "Base URL", + "description": "Base URL for self-hosted or compatible providers (for example an Ollama instance)." + }, + "model": { + "label": "Model", + "description": "The model to use from the provider for generating descriptions or summaries." + }, + "provider": { + "label": "Provider", + "description": "The GenAI provider to use (for example: ollama, gemini, openai)." + }, + "roles": { + "label": "Roles", + "description": "GenAI roles (tools, vision, embeddings); one provider per role." + }, + "provider_options": { + "label": "Provider options", + "description": "Additional provider-specific options to pass to the GenAI client." + }, + "runtime_options": { + "label": "Runtime options", + "description": "Runtime options passed to the provider for each inference call." + } + }, + "audio": { + "label": "Audio events", + "description": "Settings for audio-based event detection for all cameras; can be overridden per-camera.", + "enabled": { + "label": "Enable audio detection", + "description": "Enable or disable audio event detection for all cameras; can be overridden per-camera." + }, + "max_not_heard": { + "label": "End timeout", + "description": "Amount of seconds without the configured audio type before the audio event is ended." + }, + "min_volume": { + "label": "Minimum volume", + "description": "Minimum RMS volume threshold required to run audio detection; lower values increase sensitivity (e.g., 200 high, 500 medium, 1000 low)." + }, + "listen": { + "label": "Listen types", + "description": "List of audio event types to detect (for example: bark, fire_alarm, scream, speech, yell)." + }, + "filters": { + "label": "Audio filters", + "description": "Per-audio-type filter settings such as confidence thresholds used to reduce false positives." + }, + "enabled_in_config": { + "label": "Original audio state", + "description": "Indicates whether audio detection was originally enabled in the static config file." + }, + "num_threads": { + "label": "Detection threads", + "description": "Number of threads to use for audio detection processing." + } + }, + "birdseye": { + "label": "Birdseye", + "description": "Settings for the Birdseye composite view that composes multiple camera feeds into a single layout.", + "enabled": { + "label": "Enable Birdseye", + "description": "Enable or disable the Birdseye view feature." + }, + "mode": { + "label": "Tracking mode", + "description": "Mode for including cameras in Birdseye: 'objects', 'motion', or 'continuous'." + }, + "restream": { + "label": "Restream RTSP", + "description": "Re-stream the Birdseye output as an RTSP feed; enabling this will keep Birdseye running continuously." + }, + "width": { + "label": "Width", + "description": "Output width (pixels) of the composed Birdseye frame." + }, + "height": { + "label": "Height", + "description": "Output height (pixels) of the composed Birdseye frame." + }, + "quality": { + "label": "Encoding quality", + "description": "Encoding quality for the Birdseye mpeg1 feed (1 highest quality, 31 lowest)." + }, + "inactivity_threshold": { + "label": "Inactivity threshold", + "description": "Seconds of inactivity after which a camera will stop being shown in Birdseye." + }, + "layout": { + "label": "Layout", + "description": "Layout options for the Birdseye composition.", + "scaling_factor": { + "label": "Scaling factor", + "description": "Scaling factor used by the layout calculator (range 1.0 to 5.0)." + }, + "max_cameras": { + "label": "Max cameras", + "description": "Maximum number of cameras to display at once in Birdseye; shows the most recent cameras." + } + }, + "idle_heartbeat_fps": { + "label": "Idle heartbeat FPS", + "description": "Frames-per-second to resend the last composed Birdseye frame when idle; set to 0 to disable." + }, + "order": { + "label": "Position", + "description": "Numeric position controlling the camera's ordering in the Birdseye layout." + } + }, + "detect": { + "label": "Object Detection", + "description": "Settings for the detection/detect role used to run object detection and initialize trackers.", + "enabled": { + "label": "Detection enabled", + "description": "Enable or disable object detection for all cameras; can be overridden per-camera. Detection must be enabled for object tracking to run." + }, + "height": { + "label": "Detect height", + "description": "Height (pixels) of frames used for the detect stream; leave empty to use the native stream resolution." + }, + "width": { + "label": "Detect width", + "description": "Width (pixels) of frames used for the detect stream; leave empty to use the native stream resolution." + }, + "fps": { + "label": "Detect FPS", + "description": "Desired frames per second to run detection on; lower values reduce CPU usage (recommended value is 5, only set higher - at most 10 - if tracking extremely fast moving objects)." + }, + "min_initialized": { + "label": "Minimum initialization frames", + "description": "Number of consecutive detection hits required before creating a tracked object. Increase to reduce false initializations. Default value is fps divided by 2." + }, + "max_disappeared": { + "label": "Maximum disappeared frames", + "description": "Number of frames without a detection before a tracked object is considered gone." + }, + "stationary": { + "label": "Stationary objects config", + "description": "Settings to detect and manage objects that remain stationary for a period of time.", + "interval": { + "label": "Stationary interval", + "description": "How often (in frames) to run a detection check to confirm a stationary object." + }, + "threshold": { + "label": "Stationary threshold", + "description": "Number of frames with no position change required to mark an object as stationary." + }, + "max_frames": { + "label": "Max frames", + "description": "Limits how long stationary objects are tracked before being discarded.", + "default": { + "label": "Default max frames", + "description": "Default maximum frames to track a stationary object before stopping." + }, + "objects": { + "label": "Object max frames", + "description": "Per-object overrides for maximum frames to track stationary objects." + } + }, + "classifier": { + "label": "Enable visual classifier", + "description": "Use a visual classifier to detect truly stationary objects even when bounding boxes jitter." + } + }, + "annotation_offset": { + "label": "Annotation offset", + "description": "Milliseconds to shift detect annotations to better align timeline bounding boxes with recordings; can be positive or negative." + } + }, + "ffmpeg": { + "label": "FFmpeg", + "description": "FFmpeg settings including binary path, args, hwaccel options, and per-role output args.", + "path": { + "label": "FFmpeg path", + "description": "Path to the FFmpeg binary to use or a version alias (\"5.0\" or \"7.0\")." + }, + "global_args": { + "label": "FFmpeg global arguments", + "description": "Global arguments passed to FFmpeg processes." + }, + "hwaccel_args": { + "label": "Hardware acceleration arguments", + "description": "Hardware acceleration arguments for FFmpeg. Provider-specific presets are recommended." + }, + "input_args": { + "label": "Input arguments", + "description": "Input arguments applied to FFmpeg input streams." + }, + "output_args": { + "label": "Output arguments", + "description": "Default output arguments used for different FFmpeg roles such as detect and record.", + "detect": { + "label": "Detect output arguments", + "description": "Default output arguments for detect role streams." + }, + "record": { + "label": "Record output arguments", + "description": "Default output arguments for record role streams." + } + }, + "retry_interval": { + "label": "FFmpeg retry time", + "description": "Seconds to wait before attempting to reconnect a camera stream after failure. Default is 10." + }, + "apple_compatibility": { + "label": "Apple compatibility", + "description": "Enable HEVC tagging for better Apple player compatibility when recording H.265." + }, + "gpu": { + "label": "GPU index", + "description": "Default GPU index used for hardware acceleration if available." + }, + "inputs": { + "label": "Camera inputs", + "description": "List of input stream definitions (paths and roles) for this camera.", + "path": { + "label": "Input path", + "description": "Camera input stream URL or path." + }, + "roles": { + "label": "Input roles", + "description": "Roles for this input stream." + }, + "global_args": { + "label": "FFmpeg global arguments", + "description": "FFmpeg global arguments for this input stream." + }, + "hwaccel_args": { + "label": "Hardware acceleration arguments", + "description": "Hardware acceleration arguments for this input stream." + }, + "input_args": { + "label": "Input arguments", + "description": "Input arguments specific to this stream." + } + } + }, + "live": { + "label": "Live playback", + "description": "Settings used by the Web UI to control live stream resolution and quality.", + "streams": { + "label": "Live stream names", + "description": "Mapping of configured stream names to restream/go2rtc names used for live playback." + }, + "height": { + "label": "Live height", + "description": "Height (pixels) to render the jsmpeg live stream in the Web UI; must be <= detect stream height." + }, + "quality": { + "label": "Live quality", + "description": "Encoding quality for the jsmpeg stream (1 highest, 31 lowest)." + } + }, + "motion": { + "label": "Motion detection", + "description": "Default motion detection settings applied to cameras unless overridden per-camera.", + "enabled": { + "label": "Enable motion detection", + "description": "Enable or disable motion detection for all cameras; can be overridden per-camera." + }, + "threshold": { + "label": "Motion threshold", + "description": "Pixel difference threshold used by the motion detector; higher values reduce sensitivity (range 1-255)." + }, + "lightning_threshold": { + "label": "Lightning threshold", + "description": "Threshold to detect and ignore brief lighting spikes (lower is more sensitive, values between 0.3 and 1.0)." + }, + "improve_contrast": { + "label": "Improve contrast", + "description": "Apply contrast improvement to frames before motion analysis to help detection." + }, + "contour_area": { + "label": "Contour area", + "description": "Minimum contour area in pixels required for a motion contour to be counted." + }, + "delta_alpha": { + "label": "Delta alpha", + "description": "Alpha blending factor used in frame differencing for motion calculation." + }, + "frame_alpha": { + "label": "Frame alpha", + "description": "Alpha value used when blending frames for motion preprocessing." + }, + "frame_height": { + "label": "Frame height", + "description": "Height in pixels to scale frames to when computing motion." + }, + "mask": { + "label": "Mask coordinates", + "description": "Ordered x,y coordinates defining the motion mask polygon used to include/exclude areas." + }, + "mqtt_off_delay": { + "label": "MQTT off delay", + "description": "Seconds to wait after last motion before publishing an MQTT 'off' state." + }, + "enabled_in_config": { + "label": "Original motion state", + "description": "Indicates whether motion detection was enabled in the original static configuration." + }, + "raw_mask": { + "label": "Raw Mask" + } + }, + "objects": { + "label": "Objects", + "description": "Object tracking defaults including which labels to track and per-object filters.", + "track": { + "label": "Objects to track", + "description": "List of object labels to track for all cameras; can be overridden per-camera." + }, + "filters": { + "label": "Object filters", + "description": "Filters applied to detected objects to reduce false positives (area, ratio, confidence).", + "min_area": { + "label": "Minimum object area", + "description": "Minimum bounding box area (pixels or percentage) required for this object type. Can be pixels (int) or percentage (float between 0.000001 and 0.99)." + }, + "max_area": { + "label": "Maximum object area", + "description": "Maximum bounding box area (pixels or percentage) allowed for this object type. Can be pixels (int) or percentage (float between 0.000001 and 0.99)." + }, + "min_ratio": { + "label": "Minimum aspect ratio", + "description": "Minimum width/height ratio required for the bounding box to qualify." + }, + "max_ratio": { + "label": "Maximum aspect ratio", + "description": "Maximum width/height ratio allowed for the bounding box to qualify." + }, + "threshold": { + "label": "Confidence threshold", + "description": "Average detection confidence threshold required for the object to be considered a true positive." + }, + "min_score": { + "label": "Minimum confidence", + "description": "Minimum single-frame detection confidence required for the object to be counted." + }, + "mask": { + "label": "Filter mask", + "description": "Polygon coordinates defining where this filter applies within the frame." + }, + "raw_mask": { + "label": "Raw Mask" + } + }, + "mask": { + "label": "Object mask", + "description": "Mask polygon used to prevent object detection in specified areas." + }, + "genai": { + "label": "GenAI object config", + "description": "GenAI options for describing tracked objects and sending frames for generation.", + "enabled": { + "label": "Enable GenAI", + "description": "Enable GenAI generation of descriptions for tracked objects by default." + }, + "use_snapshot": { + "label": "Use snapshots", + "description": "Use object snapshots instead of thumbnails for GenAI description generation." + }, + "prompt": { + "label": "Caption prompt", + "description": "Default prompt template used when generating descriptions with GenAI." + }, + "object_prompts": { + "label": "Object prompts", + "description": "Per-object prompts to customize GenAI outputs for specific labels." + }, + "objects": { + "label": "GenAI objects", + "description": "List of object labels to send to GenAI by default." + }, + "required_zones": { + "label": "Required zones", + "description": "Zones that must be entered for objects to qualify for GenAI description generation." + }, + "debug_save_thumbnails": { + "label": "Save thumbnails", + "description": "Save thumbnails sent to GenAI for debugging and review." + }, + "send_triggers": { + "label": "GenAI triggers", + "description": "Defines when frames should be sent to GenAI (on end, after updates, etc.).", + "tracked_object_end": { + "label": "Send on end", + "description": "Send a request to GenAI when the tracked object ends." + }, + "after_significant_updates": { + "label": "Early GenAI trigger", + "description": "Send a request to GenAI after a specified number of significant updates for the tracked object." + } + }, + "enabled_in_config": { + "label": "Original GenAI state", + "description": "Indicates whether GenAI was enabled in the original static config." + } + } + }, + "record": { + "label": "Recording", + "description": "Recording and retention settings applied to cameras unless overridden per-camera.", + "enabled": { + "label": "Enable recording", + "description": "Enable or disable recording for all cameras; can be overridden per-camera." + }, + "expire_interval": { + "label": "Record cleanup interval", + "description": "Minutes between cleanup passes that remove expired recording segments." + }, + "continuous": { + "label": "Continuous retention", + "description": "Number of days to retain recordings regardless of tracked objects or motion. Set to 0 if you only want to retain recordings of alerts and detections.", + "days": { + "label": "Retention days", + "description": "Days to retain recordings." + } + }, + "motion": { + "label": "Motion retention", + "description": "Number of days to retain recordings triggered by motion regardless of tracked objects. Set to 0 if you only want to retain recordings of alerts and detections.", + "days": { + "label": "Retention days", + "description": "Days to retain recordings." + } + }, + "detections": { + "label": "Detection retention", + "description": "Recording retention settings for detection events including pre/post capture durations.", + "pre_capture": { + "label": "Pre-capture seconds", + "description": "Number of seconds before the detection event to include in the recording." + }, + "post_capture": { + "label": "Post-capture seconds", + "description": "Number of seconds after the detection event to include in the recording." + }, + "retain": { + "label": "Event retention", + "description": "Retention settings for recordings of detection events.", + "days": { + "label": "Retention days", + "description": "Number of days to retain recordings of detection events." + }, + "mode": { + "label": "Retention mode", + "description": "Mode for retention: all (save all segments), motion (save segments with motion), or active_objects (save segments with active objects)." + } + } + }, + "alerts": { + "label": "Alert retention", + "description": "Recording retention settings for alert events including pre/post capture durations.", + "pre_capture": { + "label": "Pre-capture seconds", + "description": "Number of seconds before the detection event to include in the recording." + }, + "post_capture": { + "label": "Post-capture seconds", + "description": "Number of seconds after the detection event to include in the recording." + }, + "retain": { + "label": "Event retention", + "description": "Retention settings for recordings of detection events.", + "days": { + "label": "Retention days", + "description": "Number of days to retain recordings of detection events." + }, + "mode": { + "label": "Retention mode", + "description": "Mode for retention: all (save all segments), motion (save segments with motion), or active_objects (save segments with active objects)." + } + } + }, + "export": { + "label": "Export config", + "description": "Settings used when exporting recordings such as timelapse and hardware acceleration.", + "hwaccel_args": { + "label": "Export hwaccel args", + "description": "Hardware acceleration args to use for export/transcode operations." + } + }, + "preview": { + "label": "Preview config", + "description": "Settings controlling the quality of recording previews shown in the UI.", + "quality": { + "label": "Preview quality", + "description": "Preview quality level (very_low, low, medium, high, very_high)." + } + }, + "enabled_in_config": { + "label": "Original recording state", + "description": "Indicates whether recording was enabled in the original static configuration." + } + }, + "review": { + "label": "Review", + "description": "Settings that control alerts, detections, and GenAI review summaries used by the UI and storage.", + "alerts": { + "label": "Alerts config", + "description": "Settings for which tracked objects generate alerts and how alerts are retained.", + "enabled": { + "label": "Enable alerts", + "description": "Enable or disable alert generation for all cameras; can be overridden per-camera." + }, + "labels": { + "label": "Alert labels", + "description": "List of object labels that qualify as alerts (for example: car, person)." + }, + "required_zones": { + "label": "Required zones", + "description": "Zones that an object must enter to be considered an alert; leave empty to allow any zone." + }, + "enabled_in_config": { + "label": "Original alerts state", + "description": "Tracks whether alerts were originally enabled in the static configuration." + }, + "cutoff_time": { + "label": "Alerts cutoff time", + "description": "Seconds to wait after no alert-causing activity before cutting off an alert." + } + }, + "detections": { + "label": "Detections config", + "description": "Settings for creating detection events (non-alert) and how long to keep them.", + "enabled": { + "label": "Enable detections", + "description": "Enable or disable detection events for all cameras; can be overridden per-camera." + }, + "labels": { + "label": "Detection labels", + "description": "List of object labels that qualify as detection events." + }, + "required_zones": { + "label": "Required zones", + "description": "Zones that an object must enter to be considered a detection; leave empty to allow any zone." + }, + "cutoff_time": { + "label": "Detections cutoff time", + "description": "Seconds to wait after no detection-causing activity before cutting off a detection." + }, + "enabled_in_config": { + "label": "Original detections state", + "description": "Tracks whether detections were originally enabled in the static configuration." + } + }, + "genai": { + "label": "GenAI config", + "description": "Controls use of generative AI for producing descriptions and summaries of review items.", + "enabled": { + "label": "Enable GenAI descriptions", + "description": "Enable or disable GenAI-generated descriptions and summaries for review items." + }, + "alerts": { + "label": "Enable GenAI for alerts", + "description": "Use GenAI to generate descriptions for alert items." + }, + "detections": { + "label": "Enable GenAI for detections", + "description": "Use GenAI to generate descriptions for detection items." + }, + "image_source": { + "label": "Review image source", + "description": "Source of images sent to GenAI ('preview' or 'recordings'); 'recordings' uses higher quality frames but more tokens." + }, + "additional_concerns": { + "label": "Additional concerns", + "description": "A list of additional concerns or notes the GenAI should consider when evaluating activity on this camera." + }, + "debug_save_thumbnails": { + "label": "Save thumbnails", + "description": "Save thumbnails that are sent to the GenAI provider for debugging and review." + }, + "enabled_in_config": { + "label": "Original GenAI state", + "description": "Tracks whether GenAI review was originally enabled in the static configuration." + }, + "preferred_language": { + "label": "Preferred language", + "description": "Preferred language to request from the GenAI provider for generated responses." + }, + "activity_context_prompt": { + "label": "Activity context prompt", + "description": "Custom prompt describing what is and is not suspicious activity to provide context for GenAI summaries." + } + } + }, + "snapshots": { + "label": "Snapshots", + "description": "Settings for saved JPEG snapshots of tracked objects for all cameras; can be overridden per-camera.", + "enabled": { + "label": "Snapshots enabled", + "description": "Enable or disable saving snapshots for all cameras; can be overridden per-camera." + }, + "clean_copy": { + "label": "Save clean copy", + "description": "Save an unannotated clean copy of snapshots in addition to annotated ones." + }, + "timestamp": { + "label": "Timestamp overlay", + "description": "Overlay a timestamp on saved snapshots." + }, + "bounding_box": { + "label": "Bounding box overlay", + "description": "Draw bounding boxes for tracked objects on saved snapshots." + }, + "crop": { + "label": "Crop snapshot", + "description": "Crop saved snapshots to the detected object's bounding box." + }, + "required_zones": { + "label": "Required zones", + "description": "Zones an object must enter for a snapshot to be saved." + }, + "height": { + "label": "Snapshot height", + "description": "Height (pixels) to resize saved snapshots to; leave empty to preserve original size." + }, + "retain": { + "label": "Snapshot retention", + "description": "Retention settings for saved snapshots including default days and per-object overrides.", + "default": { + "label": "Default retention", + "description": "Default number of days to retain snapshots." + }, + "mode": { + "label": "Retention mode", + "description": "Mode for retention: all (save all segments), motion (save segments with motion), or active_objects (save segments with active objects)." + }, + "objects": { + "label": "Object retention", + "description": "Per-object overrides for snapshot retention days." + } + }, + "quality": { + "label": "JPEG quality", + "description": "JPEG encode quality for saved snapshots (0-100)." + } + }, + "timestamp_style": { + "label": "Timestamp style", + "description": "Styling options for in-feed timestamps applied to debug view and snapshots.", + "position": { + "label": "Timestamp position", + "description": "Position of the timestamp on the image (tl/tr/bl/br)." + }, + "format": { + "label": "Timestamp format", + "description": "Datetime format string used for timestamps (Python datetime format codes)." + }, + "color": { + "label": "Timestamp color", + "description": "RGB color values for the timestamp text (all values 0-255).", + "red": { + "label": "Red", + "description": "Red component (0-255) for timestamp color." + }, + "green": { + "label": "Green", + "description": "Green component (0-255) for timestamp color." + }, + "blue": { + "label": "Blue", + "description": "Blue component (0-255) for timestamp color." + } + }, + "thickness": { + "label": "Timestamp thickness", + "description": "Line thickness of the timestamp text." + }, + "effect": { + "label": "Timestamp effect", + "description": "Visual effect for the timestamp text (none, solid, shadow)." + } + }, + "audio_transcription": { + "label": "Audio transcription", + "description": "Settings for live and speech audio transcription used for events and live captions.", + "enabled": { + "label": "Enable audio transcription", + "description": "Enable or disable automatic audio transcription for all cameras; can be overridden per-camera." + }, + "language": { + "label": "Transcription language", + "description": "Language code used for transcription/translation (for example 'en' for English). See https://whisper-api.com/docs/languages/ for supported language codes." + }, + "device": { + "label": "Transcription device", + "description": "Device key (CPU/GPU) to run the transcription model on. Only NVIDIA CUDA GPUs are currently supported for transcription." + }, + "model_size": { + "label": "Model size", + "description": "Model size to use for offline audio event transcription." + }, + "live_enabled": { + "label": "Live transcription", + "description": "Enable streaming live transcription for audio as it is received." + } + }, + "classification": { + "label": "Object classification", + "description": "Settings for classification models used to refine object labels or state classification.", + "bird": { + "label": "Bird classification config", + "description": "Settings specific to bird classification models.", + "enabled": { + "label": "Bird classification", + "description": "Enable or disable bird classification." + }, + "threshold": { + "label": "Minimum score", + "description": "Minimum classification score required to accept a bird classification." + } + }, + "custom": { + "label": "Custom Classification Models", + "description": "Configuration for custom classification models used for objects or state detection.", + "enabled": { + "label": "Enable model", + "description": "Enable or disable the custom classification model." + }, + "name": { + "label": "Model name", + "description": "Identifier for the custom classification model to use." + }, + "threshold": { + "label": "Score threshold", + "description": "Score threshold used to change the classification state." + }, + "save_attempts": { + "label": "Save attempts", + "description": "How many classification attempts to save for recent classifications UI." + }, + "object_config": { + "objects": { + "label": "Classify objects", + "description": "List of object types to run object classification on." + }, + "classification_type": { + "label": "Classification type", + "description": "Classification type applied: 'sub_label' (adds sub_label) or other supported types." + } + }, + "state_config": { + "cameras": { + "label": "Classification cameras", + "description": "Per-camera crop and settings for running state classification.", + "crop": { + "label": "Classification crop", + "description": "Crop coordinates to use for running classification on this camera." + } + }, + "motion": { + "label": "Run on motion", + "description": "If true, run classification when motion is detected within the specified crop." + }, + "interval": { + "label": "Classification interval", + "description": "Interval (seconds) between periodic classification runs for state classification." + } + } + } + }, + "semantic_search": { + "label": "Semantic Search", + "description": "Settings for Semantic Search which builds and queries object embeddings to find similar items.", + "enabled": { + "label": "Enable semantic search", + "description": "Enable or disable the semantic search feature." + }, + "reindex": { + "label": "Reindex on startup", + "description": "Trigger a full reindex of historical tracked objects into the embeddings database." + }, + "model": { + "label": "Semantic search model", + "description": "The embeddings model to use for semantic search (for example 'jinav1')." + }, + "model_size": { + "label": "Model size", + "description": "Select model size; 'small' runs on CPU and 'large' typically requires GPU." + }, + "device": { + "label": "Device", + "description": "This is an override, to target a specific device. See https://onnxruntime.ai/docs/execution-providers/ for more information" + }, + "triggers": { + "label": "Triggers", + "description": "Actions and matching criteria for camera-specific semantic search triggers.", + "friendly_name": { + "label": "Friendly name", + "description": "Optional friendly name displayed in the UI for this trigger." + }, + "enabled": { + "label": "Enable this trigger", + "description": "Enable or disable this semantic search trigger." + }, + "type": { + "label": "Trigger type", + "description": "Type of trigger: 'thumbnail' (match against image) or 'description' (match against text)." + }, + "data": { + "label": "Trigger content", + "description": "Text phrase or thumbnail ID to match against tracked objects." + }, + "threshold": { + "label": "Trigger threshold", + "description": "Minimum similarity score (0-1) required to activate this trigger." + }, + "actions": { + "label": "Trigger actions", + "description": "List of actions to execute when trigger matches (notification, sub_label, attribute)." + } + } + }, + "face_recognition": { + "label": "Face recognition", + "description": "Settings for face detection and recognition for all cameras; can be overridden per-camera.", + "enabled": { + "label": "Enable face recognition", + "description": "Enable or disable face recognition for all cameras; can be overridden per-camera." + }, + "model_size": { + "label": "Model size", + "description": "Model size to use for face embeddings (small/large); larger may require GPU." + }, + "unknown_score": { + "label": "Unknown score threshold", + "description": "Distance threshold below which a face is considered a potential match (higher = stricter)." + }, + "detection_threshold": { + "label": "Detection threshold", + "description": "Minimum detection confidence required to consider a face detection valid." + }, + "recognition_threshold": { + "label": "Recognition threshold", + "description": "Face embedding distance threshold to consider two faces a match." + }, + "min_area": { + "label": "Minimum face area", + "description": "Minimum area (pixels) of a detected face box required to attempt recognition." + }, + "min_faces": { + "label": "Minimum faces", + "description": "Minimum number of face recognitions required before applying a recognized sub-label to a person." + }, + "save_attempts": { + "label": "Save attempts", + "description": "Number of face recognition attempts to retain for recent recognition UI." + }, + "blur_confidence_filter": { + "label": "Blur confidence filter", + "description": "Adjust confidence scores based on image blur to reduce false positives for poor quality faces." + }, + "device": { + "label": "Device", + "description": "This is an override, to target a specific device. See https://onnxruntime.ai/docs/execution-providers/ for more information" + } + }, + "lpr": { + "label": "License Plate Recognition", + "description": "License plate recognition settings including detection thresholds, formatting, and known plates.", + "enabled": { + "label": "Enable LPR", + "description": "Enable or disable license plate recognition for all cameras; can be overridden per-camera." + }, + "model_size": { + "label": "Model size", + "description": "Model size used for text detection/recognition. Most users should use 'small'." + }, + "detection_threshold": { + "label": "Detection threshold", + "description": "Detection confidence threshold to begin running OCR on a suspected plate." + }, + "min_area": { + "label": "Minimum plate area", + "description": "Minimum plate area (pixels) required to attempt recognition." + }, + "recognition_threshold": { + "label": "Recognition threshold", + "description": "Confidence threshold required for recognized plate text to be attached as a sub-label." + }, + "min_plate_length": { + "label": "Min plate length", + "description": "Minimum number of characters a recognized plate must contain to be considered valid." + }, + "format": { + "label": "Plate format regex", + "description": "Optional regex to validate recognized plate strings against an expected format." + }, + "match_distance": { + "label": "Match distance", + "description": "Number of character mismatches allowed when comparing detected plates to known plates." + }, + "known_plates": { + "label": "Known plates", + "description": "List of plates or regexes to specially track or alert on." + }, + "enhancement": { + "label": "Enhancement level", + "description": "Enhancement level (0-10) to apply to plate crops prior to OCR; higher values may not always improve results, levels above 5 may only work with night time plates and should be used with caution." + }, + "debug_save_plates": { + "label": "Save debug plates", + "description": "Save plate crop images for debugging LPR performance." + }, + "device": { + "label": "Device", + "description": "This is an override, to target a specific device. See https://onnxruntime.ai/docs/execution-providers/ for more information" + }, + "replace_rules": { + "label": "Replacement rules", + "description": "Regex replacement rules used to normalize detected plate strings before matching.", + "pattern": { + "label": "Regex pattern" + }, + "replacement": { + "label": "Replacement string" + } + }, + "expire_time": { + "label": "Expire seconds", + "description": "Time in seconds after which an unseen plate is expired from the tracker (for dedicated LPR cameras only)." + } + }, + "camera_groups": { + "label": "Camera groups", + "description": "Configuration for named camera groups used to organize cameras in the UI.", + "cameras": { + "label": "Camera list", + "description": "Array of camera names included in this group." + }, + "icon": { + "label": "Group icon", + "description": "Icon used to represent the camera group in the UI." + }, + "order": { + "label": "Sort order", + "description": "Numeric order used to sort camera groups in the UI; larger numbers appear later." + } + }, + "camera_mqtt": { + "label": "MQTT", + "description": "MQTT image publishing settings.", + "enabled": { + "label": "Send image", + "description": "Enable publishing image snapshots for objects to MQTT topics for this camera." + }, + "timestamp": { + "label": "Add timestamp", + "description": "Overlay a timestamp on images published to MQTT." + }, + "bounding_box": { + "label": "Add bounding box", + "description": "Draw bounding boxes on images published over MQTT." + }, + "crop": { + "label": "Crop image", + "description": "Crop images published to MQTT to the detected object's bounding box." + }, + "height": { + "label": "Image height", + "description": "Height (pixels) to resize images published over MQTT." + }, + "required_zones": { + "label": "Required zones", + "description": "Zones that an object must enter for an MQTT image to be published." + }, + "quality": { + "label": "JPEG quality", + "description": "JPEG quality for images published to MQTT (0-100)." + } + }, + "camera_ui": { + "label": "Camera UI", + "description": "Display ordering and visibility for this camera in the UI. Ordering affects the default dashboard. For more granular control, use camera groups.", + "order": { + "label": "UI order", + "description": "Numeric order used to sort the camera in the UI (default dashboard and lists); larger numbers appear later." + }, + "dashboard": { + "label": "Show in UI", + "description": "Toggle whether this camera is visible everywhere in the Frigate UI. Disabling this will require manually editing the config to view this camera in the UI again." + } + }, + "onvif": { + "label": "ONVIF", + "description": "ONVIF connection and PTZ autotracking settings for this camera.", + "host": { + "label": "ONVIF host", + "description": "Host (and optional scheme) for the ONVIF service for this camera." + }, + "port": { + "label": "ONVIF port", + "description": "Port number for the ONVIF service." + }, + "user": { + "label": "ONVIF username", + "description": "Username for ONVIF authentication; some devices require admin user for ONVIF." + }, + "password": { + "label": "ONVIF password", + "description": "Password for ONVIF authentication." + }, + "tls_insecure": { + "label": "Disable TLS verify", + "description": "Skip TLS verification and disable digest auth for ONVIF (unsafe; use in safe networks only)." + }, + "autotracking": { + "label": "Autotracking", + "description": "Automatically track moving objects and keep them centered in the frame using PTZ camera movements.", + "enabled": { + "label": "Enable Autotracking", + "description": "Enable or disable automatic PTZ camera tracking of detected objects." + }, + "calibrate_on_startup": { + "label": "Calibrate on start", + "description": "Measure PTZ motor speeds on startup to improve tracking accuracy. Frigate will update config with movement_weights after calibration." + }, + "zooming": { + "label": "Zoom mode", + "description": "Control zoom behavior: disabled (pan/tilt only), absolute (most compatible), or relative (concurrent pan/tilt/zoom)." + }, + "zoom_factor": { + "label": "Zoom factor", + "description": "Control zoom level on tracked objects. Lower values keep more scene in view; higher values zoom in closer but may lose tracking. Values between 0.1 and 0.75." + }, + "track": { + "label": "Tracked objects", + "description": "List of object types that should trigger autotracking." + }, + "required_zones": { + "label": "Required zones", + "description": "Objects must enter one of these zones before autotracking begins." + }, + "return_preset": { + "label": "Return preset", + "description": "ONVIF preset name configured in camera firmware to return to after tracking ends." + }, + "timeout": { + "label": "Return timeout", + "description": "Wait this many seconds after losing tracking before returning camera to preset position." + }, + "movement_weights": { + "label": "Movement weights", + "description": "Calibration values automatically generated by camera calibration. Do not modify manually." + }, + "enabled_in_config": { + "label": "Original autotrack state", + "description": "Internal field to track whether autotracking was enabled in configuration." + } + }, + "ignore_time_mismatch": { + "label": "Ignore time mismatch", + "description": "Ignore time synchronization differences between camera and Frigate server for ONVIF communication." + } + } +} diff --git a/web/public/locales/en/config/go2rtc.json b/web/public/locales/en/config/go2rtc.json deleted file mode 100644 index 76ec33020..000000000 --- a/web/public/locales/en/config/go2rtc.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "label": "Global restream configuration." -} \ No newline at end of file diff --git a/web/public/locales/en/config/groups.json b/web/public/locales/en/config/groups.json new file mode 100644 index 000000000..1663ad169 --- /dev/null +++ b/web/public/locales/en/config/groups.json @@ -0,0 +1,73 @@ +{ + "audio": { + "global": { + "detection": "Global Detection", + "sensitivity": "Global Sensitivity" + }, + "cameras": { + "detection": "Detection", + "sensitivity": "Sensitivity" + } + }, + "timestamp_style": { + "global": { + "appearance": "Global Appearance" + }, + "cameras": { + "appearance": "Appearance" + } + }, + "motion": { + "global": { + "sensitivity": "Global Sensitivity", + "algorithm": "Global Algorithm" + }, + "cameras": { + "sensitivity": "Sensitivity", + "algorithm": "Algorithm" + } + }, + "snapshots": { + "global": { + "display": "Global Display" + }, + "cameras": { + "display": "Display" + } + }, + "detect": { + "global": { + "resolution": "Global Resolution", + "tracking": "Global Tracking" + }, + "cameras": { + "resolution": "Resolution", + "tracking": "Tracking" + } + }, + "objects": { + "global": { + "tracking": "Global Tracking", + "filtering": "Global Filtering" + }, + "cameras": { + "tracking": "Tracking", + "filtering": "Filtering" + } + }, + "record": { + "global": { + "retention": "Global Retention", + "events": "Global Events" + }, + "cameras": { + "retention": "Retention", + "events": "Events" + } + }, + "ffmpeg": { + "cameras": { + "cameraFfmpeg": "Camera-specific FFmpeg arguments" + } + } +} diff --git a/web/public/locales/en/config/live.json b/web/public/locales/en/config/live.json deleted file mode 100644 index 362170137..000000000 --- a/web/public/locales/en/config/live.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "label": "Live playback settings.", - "properties": { - "streams": { - "label": "Friendly names and restream names to use for live view." - }, - "height": { - "label": "Live camera view height" - }, - "quality": { - "label": "Live camera view quality" - } - } -} \ No newline at end of file diff --git a/web/public/locales/en/config/logger.json b/web/public/locales/en/config/logger.json deleted file mode 100644 index 3d51786a7..000000000 --- a/web/public/locales/en/config/logger.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "label": "Logging configuration.", - "properties": { - "default": { - "label": "Default logging level." - }, - "logs": { - "label": "Log level for specified processes." - } - } -} \ No newline at end of file diff --git a/web/public/locales/en/config/lpr.json b/web/public/locales/en/config/lpr.json deleted file mode 100644 index 951d1f8f6..000000000 --- a/web/public/locales/en/config/lpr.json +++ /dev/null @@ -1,45 +0,0 @@ -{ - "label": "License Plate recognition config.", - "properties": { - "enabled": { - "label": "Enable license plate recognition." - }, - "model_size": { - "label": "The size of the embeddings model used." - }, - "detection_threshold": { - "label": "License plate object confidence score required to begin running recognition." - }, - "min_area": { - "label": "Minimum area of license plate to begin running recognition." - }, - "recognition_threshold": { - "label": "Recognition confidence score required to add the plate to the object as a sub label." - }, - "min_plate_length": { - "label": "Minimum number of characters a license plate must have to be added to the object as a sub label." - }, - "format": { - "label": "Regular expression for the expected format of license plate." - }, - "match_distance": { - "label": "Allow this number of missing/incorrect characters to still cause a detected plate to match a known plate." - }, - "known_plates": { - "label": "Known plates to track (strings or regular expressions)." - }, - "enhancement": { - "label": "Amount of contrast adjustment and denoising to apply to license plate images before recognition." - }, - "debug_save_plates": { - "label": "Save plates captured for LPR for debugging purposes." - }, - "device": { - "label": "The device key to use for LPR.", - "description": "This is an override, to target a specific device. See https://onnxruntime.ai/docs/execution-providers/ for more information" - }, - "replace_rules": { - "label": "List of regex replacement rules for normalizing detected plates. Each rule has 'pattern' and 'replacement'." - } - } -} \ No newline at end of file diff --git a/web/public/locales/en/config/model.json b/web/public/locales/en/config/model.json deleted file mode 100644 index 0bc2c1ddf..000000000 --- a/web/public/locales/en/config/model.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "label": "Detection model configuration.", - "properties": { - "path": { - "label": "Custom Object detection model path." - }, - "labelmap_path": { - "label": "Label map for custom object detector." - }, - "width": { - "label": "Object detection model input width." - }, - "height": { - "label": "Object detection model input height." - }, - "labelmap": { - "label": "Labelmap customization." - }, - "attributes_map": { - "label": "Map of object labels to their attribute labels." - }, - "input_tensor": { - "label": "Model Input Tensor Shape" - }, - "input_pixel_format": { - "label": "Model Input Pixel Color Format" - }, - "input_dtype": { - "label": "Model Input D Type" - }, - "model_type": { - "label": "Object Detection Model Type" - } - } -} \ No newline at end of file diff --git a/web/public/locales/en/config/motion.json b/web/public/locales/en/config/motion.json deleted file mode 100644 index 183bfdf34..000000000 --- a/web/public/locales/en/config/motion.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "label": "Global motion detection configuration." -} \ No newline at end of file diff --git a/web/public/locales/en/config/mqtt.json b/web/public/locales/en/config/mqtt.json deleted file mode 100644 index d2625ac83..000000000 --- a/web/public/locales/en/config/mqtt.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "label": "MQTT configuration.", - "properties": { - "enabled": { - "label": "Enable MQTT Communication." - }, - "host": { - "label": "MQTT Host" - }, - "port": { - "label": "MQTT Port" - }, - "topic_prefix": { - "label": "MQTT Topic Prefix" - }, - "client_id": { - "label": "MQTT Client ID" - }, - "stats_interval": { - "label": "MQTT Camera Stats Interval" - }, - "user": { - "label": "MQTT Username" - }, - "password": { - "label": "MQTT Password" - }, - "tls_ca_certs": { - "label": "MQTT TLS CA Certificates" - }, - "tls_client_cert": { - "label": "MQTT TLS Client Certificate" - }, - "tls_client_key": { - "label": "MQTT TLS Client Key" - }, - "tls_insecure": { - "label": "MQTT TLS Insecure" - }, - "qos": { - "label": "MQTT QoS" - } - } -} \ No newline at end of file diff --git a/web/public/locales/en/config/networking.json b/web/public/locales/en/config/networking.json deleted file mode 100644 index 592ea9477..000000000 --- a/web/public/locales/en/config/networking.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "label": "Networking configuration", - "properties": { - "ipv6": { - "label": "IPv6 configuration", - "properties": { - "enabled": { - "label": "Enable IPv6 for port 5000 and/or 8971" - } - } - }, - "listen": { - "label": "Listening ports configuration", - "properties": { - "internal": { - "label": "Internal listening port for Frigate" - }, - "external": { - "label": "External listening port for Frigate" - } - } - } - } -} diff --git a/web/public/locales/en/config/notifications.json b/web/public/locales/en/config/notifications.json deleted file mode 100644 index b529f10e0..000000000 --- a/web/public/locales/en/config/notifications.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "label": "Global notification configuration.", - "properties": { - "enabled": { - "label": "Enable notifications" - }, - "email": { - "label": "Email required for push." - }, - "cooldown": { - "label": "Cooldown period for notifications (time in seconds)." - }, - "enabled_in_config": { - "label": "Keep track of original state of notifications." - } - } -} \ No newline at end of file diff --git a/web/public/locales/en/config/objects.json b/web/public/locales/en/config/objects.json deleted file mode 100644 index f041672a0..000000000 --- a/web/public/locales/en/config/objects.json +++ /dev/null @@ -1,77 +0,0 @@ -{ - "label": "Global object configuration.", - "properties": { - "track": { - "label": "Objects to track." - }, - "filters": { - "label": "Object filters.", - "properties": { - "min_area": { - "label": "Minimum area of bounding box for object to be counted. Can be pixels (int) or percentage (float between 0.000001 and 0.99)." - }, - "max_area": { - "label": "Maximum area of bounding box for object to be counted. Can be pixels (int) or percentage (float between 0.000001 and 0.99)." - }, - "min_ratio": { - "label": "Minimum ratio of bounding box's width/height for object to be counted." - }, - "max_ratio": { - "label": "Maximum ratio of bounding box's width/height for object to be counted." - }, - "threshold": { - "label": "Average detection confidence threshold for object to be counted." - }, - "min_score": { - "label": "Minimum detection confidence for object to be counted." - }, - "mask": { - "label": "Detection area polygon mask for this filter configuration." - } - } - }, - "mask": { - "label": "Object mask." - }, - "genai": { - "label": "Config for using genai to analyze objects.", - "properties": { - "enabled": { - "label": "Enable GenAI for camera." - }, - "use_snapshot": { - "label": "Use snapshots for generating descriptions." - }, - "prompt": { - "label": "Default caption prompt." - }, - "object_prompts": { - "label": "Object specific prompts." - }, - "objects": { - "label": "List of objects to run generative AI for." - }, - "required_zones": { - "label": "List of required zones to be entered in order to run generative AI." - }, - "debug_save_thumbnails": { - "label": "Save thumbnails sent to generative AI for debugging purposes." - }, - "send_triggers": { - "label": "What triggers to use to send frames to generative AI for a tracked object.", - "properties": { - "tracked_object_end": { - "label": "Send once the object is no longer tracked." - }, - "after_significant_updates": { - "label": "Send an early request to generative AI when X frames accumulated." - } - } - }, - "enabled_in_config": { - "label": "Keep track of original state of generative AI." - } - } - } - } -} \ No newline at end of file diff --git a/web/public/locales/en/config/proxy.json b/web/public/locales/en/config/proxy.json deleted file mode 100644 index 732d6fafd..000000000 --- a/web/public/locales/en/config/proxy.json +++ /dev/null @@ -1,31 +0,0 @@ -{ - "label": "Proxy configuration.", - "properties": { - "header_map": { - "label": "Header mapping definitions for proxy user passing.", - "properties": { - "user": { - "label": "Header name from upstream proxy to identify user." - }, - "role": { - "label": "Header name from upstream proxy to identify user role." - }, - "role_map": { - "label": "Mapping of Frigate roles to upstream group values. " - } - } - }, - "logout_url": { - "label": "Redirect url for logging out with proxy." - }, - "auth_secret": { - "label": "Secret value for proxy authentication." - }, - "default_role": { - "label": "Default role for proxy users." - }, - "separator": { - "label": "The character used to separate values in a mapped header." - } - } -} \ No newline at end of file diff --git a/web/public/locales/en/config/record.json b/web/public/locales/en/config/record.json deleted file mode 100644 index 0c4a5fc42..000000000 --- a/web/public/locales/en/config/record.json +++ /dev/null @@ -1,90 +0,0 @@ -{ - "label": "Global record configuration.", - "properties": { - "enabled": { - "label": "Enable record on all cameras." - }, - "expire_interval": { - "label": "Number of minutes to wait between cleanup runs." - }, - "continuous": { - "label": "Continuous recording retention settings.", - "properties": { - "days": { - "label": "Default retention period." - } - } - }, - "motion": { - "label": "Motion recording retention settings.", - "properties": { - "days": { - "label": "Default retention period." - } - } - }, - "detections": { - "label": "Detection specific retention settings.", - "properties": { - "pre_capture": { - "label": "Seconds to retain before event starts." - }, - "post_capture": { - "label": "Seconds to retain after event ends." - }, - "retain": { - "label": "Event retention settings.", - "properties": { - "days": { - "label": "Default retention period." - }, - "mode": { - "label": "Retain mode." - } - } - } - } - }, - "alerts": { - "label": "Alert specific retention settings.", - "properties": { - "pre_capture": { - "label": "Seconds to retain before event starts." - }, - "post_capture": { - "label": "Seconds to retain after event ends." - }, - "retain": { - "label": "Event retention settings.", - "properties": { - "days": { - "label": "Default retention period." - }, - "mode": { - "label": "Retain mode." - } - } - } - } - }, - "export": { - "label": "Recording Export Config", - "properties": { - "timelapse_args": { - "label": "Timelapse Args" - } - } - }, - "preview": { - "label": "Recording Preview Config", - "properties": { - "quality": { - "label": "Quality of recording preview." - } - } - }, - "enabled_in_config": { - "label": "Keep track of original state of recording." - } - } -} diff --git a/web/public/locales/en/config/review.json b/web/public/locales/en/config/review.json deleted file mode 100644 index dba83ee1c..000000000 --- a/web/public/locales/en/config/review.json +++ /dev/null @@ -1,74 +0,0 @@ -{ - "label": "Review configuration.", - "properties": { - "alerts": { - "label": "Review alerts config.", - "properties": { - "enabled": { - "label": "Enable alerts." - }, - "labels": { - "label": "Labels to create alerts for." - }, - "required_zones": { - "label": "List of required zones to be entered in order to save the event as an alert." - }, - "enabled_in_config": { - "label": "Keep track of original state of alerts." - }, - "cutoff_time": { - "label": "Time to cutoff alerts after no alert-causing activity has occurred." - } - } - }, - "detections": { - "label": "Review detections config.", - "properties": { - "enabled": { - "label": "Enable detections." - }, - "labels": { - "label": "Labels to create detections for." - }, - "required_zones": { - "label": "List of required zones to be entered in order to save the event as a detection." - }, - "cutoff_time": { - "label": "Time to cutoff detection after no detection-causing activity has occurred." - }, - "enabled_in_config": { - "label": "Keep track of original state of detections." - } - } - }, - "genai": { - "label": "Review description genai config.", - "properties": { - "enabled": { - "label": "Enable GenAI descriptions for review items." - }, - "alerts": { - "label": "Enable GenAI for alerts." - }, - "detections": { - "label": "Enable GenAI for detections." - }, - "additional_concerns": { - "label": "Additional concerns that GenAI should make note of on this camera." - }, - "debug_save_thumbnails": { - "label": "Save thumbnails sent to generative AI for debugging purposes." - }, - "enabled_in_config": { - "label": "Keep track of original state of generative AI." - }, - "preferred_language": { - "label": "Preferred language for GenAI Response" - }, - "activity_context_prompt": { - "label": "Custom activity context prompt defining normal activity patterns for this property." - } - } - } - } -} diff --git a/web/public/locales/en/config/safe_mode.json b/web/public/locales/en/config/safe_mode.json deleted file mode 100644 index 352f78b29..000000000 --- a/web/public/locales/en/config/safe_mode.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "label": "If Frigate should be started in safe mode." -} \ No newline at end of file diff --git a/web/public/locales/en/config/semantic_search.json b/web/public/locales/en/config/semantic_search.json deleted file mode 100644 index 2c46640bb..000000000 --- a/web/public/locales/en/config/semantic_search.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "label": "Semantic search configuration.", - "properties": { - "enabled": { - "label": "Enable semantic search." - }, - "reindex": { - "label": "Reindex all tracked objects on startup." - }, - "model": { - "label": "The CLIP model to use for semantic search." - }, - "model_size": { - "label": "The size of the embeddings model used." - }, - "device": { - "label": "The device key to use for semantic search.", - "description": "This is an override, to target a specific device. See https://onnxruntime.ai/docs/execution-providers/ for more information" - } - } -} \ No newline at end of file diff --git a/web/public/locales/en/config/snapshots.json b/web/public/locales/en/config/snapshots.json deleted file mode 100644 index a6336140e..000000000 --- a/web/public/locales/en/config/snapshots.json +++ /dev/null @@ -1,43 +0,0 @@ -{ - "label": "Global snapshots configuration.", - "properties": { - "enabled": { - "label": "Snapshots enabled." - }, - "clean_copy": { - "label": "Create a clean copy of the snapshot image." - }, - "timestamp": { - "label": "Add a timestamp overlay on the snapshot." - }, - "bounding_box": { - "label": "Add a bounding box overlay on the snapshot." - }, - "crop": { - "label": "Crop the snapshot to the detected object." - }, - "required_zones": { - "label": "List of required zones to be entered in order to save a snapshot." - }, - "height": { - "label": "Snapshot image height." - }, - "retain": { - "label": "Snapshot retention.", - "properties": { - "default": { - "label": "Default retention period." - }, - "mode": { - "label": "Retain mode." - }, - "objects": { - "label": "Object retention period." - } - } - }, - "quality": { - "label": "Quality of the encoded jpeg (0-100)." - } - } -} \ No newline at end of file diff --git a/web/public/locales/en/config/telemetry.json b/web/public/locales/en/config/telemetry.json deleted file mode 100644 index 802ced2a0..000000000 --- a/web/public/locales/en/config/telemetry.json +++ /dev/null @@ -1,28 +0,0 @@ -{ - "label": "Telemetry configuration.", - "properties": { - "network_interfaces": { - "label": "Enabled network interfaces for bandwidth calculation." - }, - "stats": { - "label": "System Stats Configuration", - "properties": { - "amd_gpu_stats": { - "label": "Enable AMD GPU stats." - }, - "intel_gpu_stats": { - "label": "Enable Intel GPU stats." - }, - "network_bandwidth": { - "label": "Enable network bandwidth for ffmpeg processes." - }, - "intel_gpu_device": { - "label": "Define the device to use when gathering SR-IOV stats." - } - } - }, - "version_check": { - "label": "Enable latest version check." - } - } -} \ No newline at end of file diff --git a/web/public/locales/en/config/timestamp_style.json b/web/public/locales/en/config/timestamp_style.json deleted file mode 100644 index 6a3119423..000000000 --- a/web/public/locales/en/config/timestamp_style.json +++ /dev/null @@ -1,31 +0,0 @@ -{ - "label": "Global timestamp style configuration.", - "properties": { - "position": { - "label": "Timestamp position." - }, - "format": { - "label": "Timestamp format." - }, - "color": { - "label": "Timestamp color.", - "properties": { - "red": { - "label": "Red" - }, - "green": { - "label": "Green" - }, - "blue": { - "label": "Blue" - } - } - }, - "thickness": { - "label": "Timestamp thickness." - }, - "effect": { - "label": "Timestamp effect." - } - } -} \ No newline at end of file diff --git a/web/public/locales/en/config/tls.json b/web/public/locales/en/config/tls.json deleted file mode 100644 index 58493ff40..000000000 --- a/web/public/locales/en/config/tls.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "label": "TLS configuration.", - "properties": { - "enabled": { - "label": "Enable TLS for port 8971" - } - } -} \ No newline at end of file diff --git a/web/public/locales/en/config/ui.json b/web/public/locales/en/config/ui.json deleted file mode 100644 index cdd91cb53..000000000 --- a/web/public/locales/en/config/ui.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "label": "UI configuration.", - "properties": { - "timezone": { - "label": "Override UI timezone." - }, - "time_format": { - "label": "Override UI time format." - }, - "date_style": { - "label": "Override UI dateStyle." - }, - "time_style": { - "label": "Override UI timeStyle." - }, - "unit_system": { - "label": "The unit system to use for measurements." - } - } -} diff --git a/web/public/locales/en/config/validation.json b/web/public/locales/en/config/validation.json new file mode 100644 index 000000000..6f3b5f686 --- /dev/null +++ b/web/public/locales/en/config/validation.json @@ -0,0 +1,32 @@ +{ + "minimum": "Must be at least {{limit}}", + "maximum": "Must be at most {{limit}}", + "exclusiveMinimum": "Must be greater than {{limit}}", + "exclusiveMaximum": "Must be less than {{limit}}", + "minLength": "Must be at least {{limit}} character(s)", + "maxLength": "Must be at most {{limit}} character(s)", + "minItems": "Must have at least {{limit}} items", + "maxItems": "Must have at most {{limit}} items", + "pattern": "Invalid format", + "required": "This field is required", + "type": "Invalid value type", + "enum": "Must be one of the allowed values", + "const": "Value does not match expected constant", + "uniqueItems": "All items must be unique", + "format": "Invalid format", + "additionalProperties": "Unknown property is not allowed", + "oneOf": "Must match exactly one of the allowed schemas", + "anyOf": "Must match at least one of the allowed schemas", + "proxy": { + "header_map": { + "roleHeaderRequired": "Role header is required when role mappings are configured." + } + }, + "ffmpeg": { + "inputs": { + "rolesUnique": "Each role can only be assigned to one input stream.", + "detectRequired": "At least one input stream must be assigned the 'detect' role.", + "hwaccelDetectOnly": "Only the input stream with the detect role can define hardware acceleration arguments." + } + } +} diff --git a/web/public/locales/en/config/version.json b/web/public/locales/en/config/version.json deleted file mode 100644 index e777d7573..000000000 --- a/web/public/locales/en/config/version.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "label": "Current config version." -} \ No newline at end of file diff --git a/web/public/locales/en/views/settings.json b/web/public/locales/en/views/settings.json index a84c15619..4aa50ad9c 100644 --- a/web/public/locales/en/views/settings.json +++ b/web/public/locales/en/views/settings.json @@ -8,23 +8,82 @@ "masksAndZones": "Mask and Zone Editor - Frigate", "motionTuner": "Motion Tuner - Frigate", "object": "Debug - Frigate", - "general": "UI Settings - Frigate", + "general": "Profile Settings - Frigate", + "globalConfig": "Global Configuration - Frigate", + "cameraConfig": "Camera Configuration - Frigate", "frigatePlus": "Frigate+ Settings - Frigate", - "notifications": "Notification Settings - Frigate" + "notifications": "Notification Settings - Frigate", + "maintenance": "Maintenance - Frigate" }, "menu": { + "general": "General", + "globalConfig": "Global configuration", + "system": "System", + "integrations": "Integrations", + "cameras": "Camera configuration", "ui": "UI", - "enrichments": "Enrichments", + "profileSettings": "Profile settings", + "globalDetect": "Object detection", + "globalRecording": "Recording", + "globalSnapshots": "Snapshots", + "globalFfmpeg": "FFmpeg", + "globalMotion": "Motion detection", + "globalObjects": "Objects", + "globalReview": "Review", + "globalAudioEvents": "Audio events", + "globalLivePlayback": "Live playback", + "globalTimestampStyle": "Timestamp style", + "systemDatabase": "Database", + "systemTls": "TLS", + "systemAuthentication": "Authentication", + "systemNetworking": "Networking", + "systemProxy": "Proxy", + "systemUi": "UI", + "systemLogging": "Logging", + "systemEnvironmentVariables": "Environment variables", + "systemTelemetry": "Telemetry", + "systemBirdseye": "Birdseye", + "systemFfmpeg": "FFmpeg", + "systemDetectorHardware": "Detector hardware", + "systemDetectionModel": "Detection model", + "systemMqtt": "MQTT", + "integrationSemanticSearch": "Semantic search", + "integrationGenerativeAi": "Generative AI", + "integrationFaceRecognition": "Face recognition", + "integrationLpr": "License plate recognition", + "integrationObjectClassification": "Object classification", + "integrationAudioTranscription": "Audio transcription", + "cameraDetect": "Object detection", + "cameraFfmpeg": "FFmpeg", + "cameraRecording": "Recording", + "cameraSnapshots": "Snapshots", + "cameraMotion": "Motion detection", + "cameraObjects": "Objects", + "cameraConfigReview": "Review", + "cameraAudioEvents": "Audio events", + "cameraAudioTranscription": "Audio transcription", + "cameraNotifications": "Notifications", + "cameraLivePlayback": "Live playback", + "cameraBirdseye": "Birdseye", + "cameraFaceRecognition": "Face recognition", + "cameraLpr": "License plate recognition", + "cameraMqttConfig": "MQTT", + "cameraOnvif": "ONVIF", + "cameraUi": "Camera UI", + "cameraTimestampStyle": "Timestamp style", + "cameraMqtt": "Camera MQTT", "cameraManagement": "Management", "cameraReview": "Review", "masksAndZones": "Masks / Zones", - "motionTuner": "Motion Tuner", - "triggers": "Triggers", - "debug": "Debug", + "motionTuner": "Motion tuner", + "enrichments": "Enrichments", "users": "Users", "roles": "Roles", "notifications": "Notifications", - "frigateplus": "Frigate+" + "triggers": "Triggers", + "debug": "Debug", + "frigateplus": "Frigate+", + "maintenance": "Maintenance" }, "dialog": { "unsavedChanges": { @@ -32,12 +91,29 @@ "desc": "Do you want to save your changes before continuing?" } }, + "saveAllPreview": { + "title": "Changes to be saved", + "triggerLabel": "Review pending changes", + "empty": "No pending changes.", + "scope": { + "label": "Scope", + "global": "Global", + "camera": "Camera: {{cameraName}}" + }, + "field": { + "label": "Field" + }, + "value": { + "label": "New value", + "reset": "Reset" + } + }, "cameraSetting": { "camera": "Camera", "noCamera": "No Camera" }, "general": { - "title": "UI Settings", + "title": "Profile Settings", "liveDashboard": { "title": "Live Dashboard", "automaticLiveView": { @@ -106,7 +182,7 @@ "desc": "Semantic Search in Frigate allows you to find tracked objects within your review items using either the image itself, a user-defined text description, or an automatically generated one.", "reindexNow": { "label": "Reindex Now", - "desc": "Reindexing will regenerate embeddings for all tracked object. This process runs in the background and may max out your CPU and take a fair amount of time depending on the number of tracked objects you have.", + "desc": "Reindexing will regenerate embeddings for all tracked objects. This process runs in the background and may max out your CPU and take a fair amount of time depending on the number of tracked objects you have.", "confirmTitle": "Confirm Reindexing", "confirmDesc": "Are you sure you want to reindex all tracked object embeddings? This process will run in the background but it may max out your CPU and take a fair amount of time. You can watch the progress on the Explore page.", "confirmButton": "Reindex", @@ -350,7 +426,11 @@ "backToSettings": "Back to Camera Settings", "streams": { "title": "Enable / Disable Cameras", - "desc": "Temporarily disable a camera until Frigate restarts. Disabling a camera completely stops Frigate's processing of this camera's streams. Detection, recording, and debugging will be unavailable.
Note: This does not disable go2rtc restreams." + "enableLabel": "Enabled cameras", + "enableDesc": "Temporarily disable an enabled camera until Frigate restarts. Disabling a camera completely stops Frigate's processing of this camera's streams. Detection, recording, and debugging will be unavailable.
Note: This does not disable go2rtc restreams.", + "disableLabel": "Disabled cameras", + "disableDesc": "Enable a camera that is currently not visible in the UI and disabled in the configuration. A restart of Frigate is required after enabling.", + "enableSuccess": "Enabled {{cameraName}} in configuration. Restart Frigate to apply the changes." }, "cameraConfig": { "add": "Add Camera", @@ -906,6 +986,13 @@ }, "frigatePlus": { "title": "Frigate+ Settings", + "description": "Frigate+ is a subscription service that provides access to additional features and capabilities for your Frigate instance, including the ability to use custom object detection models trained on your own data. You can manage your Frigate+ model settings here.", + "cardTitles": { + "api": "API", + "currentModel": "Current Model", + "otherModels": "Other Models", + "configuration": "Configuration" + }, "apiKey": { "title": "Frigate+ API Key", "validated": "Frigate+ API key is detected and validated", @@ -947,6 +1034,15 @@ "error": "Failed to save config changes: {{errorMessage}}" } }, + "detectionModel": { + "plusActive": { + "title": "Frigate+ model management", + "label": "Current model source", + "description": "This instance is running a Frigate+ model. Select or change your model in Frigate+ settings.", + "goToFrigatePlus": "Go to Frigate+ settings", + "showModelForm": "Manually configure a model" + } + }, "triggers": { "documentTitle": "Triggers", "semanticSearch": { @@ -1115,5 +1211,178 @@ "exports": "Exports", "recordings": "Recordings" } - } + }, + "configForm": { + "global": { + "title": "Global Settings", + "description": "These settings apply to all cameras unless overridden in the camera-specific settings." + }, + "camera": { + "title": "Camera Settings", + "description": "These settings apply only to this camera and override the global settings." + }, + "advancedSettingsCount": "Advanced Settings ({{count}})", + "advancedCount": "Advanced ({{count}})", + "showAdvanced": "Show Advanced Settings", + "tabs": { + "sharedDefaults": "Shared Defaults", + "system": "System", + "integrations": "Integrations" + }, + "additionalProperties": { + "keyLabel": "Key", + "valueLabel": "Value", + "keyPlaceholder": "New key", + "remove": "Remove" + }, + "timezone": { + "defaultOption": "Use browser timezone" + }, + "roleMap": { + "empty": "No role mappings", + "roleLabel": "Role", + "groupsLabel": "Groups", + "addMapping": "Add role mapping", + "remove": "Remove" + }, + "ffmpegArgs": { + "preset": "Preset", + "manual": "Manual arguments", + "inherit": "Inherit from camera setting", + "selectPreset": "Select preset", + "manualPlaceholder": "Enter FFmpeg arguments" + }, + "cameraInputs": { + "itemTitle": "Stream {{index}}" + }, + "restartRequiredField": "Restart required", + "restartRequiredFooter": "Configuration changed - Restart required", + "sections": { + "detect": "Detection", + "record": "Recording", + "snapshots": "Snapshots", + "motion": "Motion", + "objects": "Objects", + "review": "Review", + "audio": "Audio", + "notifications": "Notifications", + "live": "Live View", + "timestamp_style": "Timestamps", + "mqtt": "MQTT", + "database": "Database", + "telemetry": "Telemetry", + "auth": "Authentication", + "tls": "TLS", + "proxy": "Proxy", + "go2rtc": "go2rtc", + "ffmpeg": "FFmpeg", + "detectors": "Detectors", + "model": "Model", + "semantic_search": "Semantic Search", + "genai": "GenAI", + "face_recognition": "Face Recognition", + "lpr": "License Plate Recognition", + "birdseye": "Birdseye" + }, + "detect": { + "title": "Detection Settings" + }, + "detectors": { + "title": "Detector Settings", + "singleType": "Only one {{type}} detector is allowed.", + "keyRequired": "Detector name is required.", + "keyDuplicate": "Detector name already exists.", + "noSchema": "No detector schemas are available.", + "none": "No detector instances configured.", + "add": "Add detector" + }, + "record": { + "title": "Recording Settings" + }, + "snapshots": { + "title": "Snapshot Settings" + }, + "motion": { + "title": "Motion Settings" + }, + "objects": { + "title": "Object Settings" + }, + "audioLabels": { + "summary": "{{count}} audio labels selected", + "empty": "No audio labels available" + }, + "objectLabels": { + "summary": "{{count}} object types selected", + "empty": "No object labels available" + }, + "filters": { + "objectFieldLabel": "{{field}} for {{label}}" + }, + "zoneNames": { + "summary": "{{count}} selected", + "empty": "No zones available" + }, + "inputRoles": { + "summary": "{{count}} roles selected", + "empty": "No roles available", + "options": { + "detect": "Detect", + "record": "Record", + "audio": "Audio" + } + }, + "review": { + "title": "Review Settings" + }, + "audio": { + "title": "Audio Settings" + }, + "notifications": { + "title": "Notification Settings" + }, + "live": { + "title": "Live View Settings" + }, + "timestamp_style": { + "title": "Timestamp Settings" + }, + "searchPlaceholder": "Search..." + }, + "globalConfig": { + "title": "Global Configuration", + "description": "Configure global settings that apply to all cameras unless overridden.", + "toast": { + "success": "Global settings saved successfully", + "error": "Failed to save global settings", + "validationError": "Validation failed" + } + }, + "cameraConfig": { + "title": "Camera Configuration", + "description": "Configure settings for individual cameras. Settings override global defaults.", + "overriddenBadge": "Overridden", + "resetToGlobal": "Reset to Global", + "toast": { + "success": "Camera settings saved successfully", + "error": "Failed to save camera settings" + } + }, + "toast": { + "success": "Settings saved successfully", + "successRestartRequired": "Settings saved successfully. Restart Frigate to apply your changes.", + "error": "Failed to save settings", + "validationError": "Validation failed: {{message}}", + "resetSuccess": "Reset to global defaults", + "resetError": "Failed to reset settings", + "saveAllSuccess_one": "Saved {{count}} section successfully.", + "saveAllSuccess_other": "All {{count}} sections saved successfully.", + "saveAllPartial_one": "{{successCount}} of {{totalCount}} section saved. {{failCount}} failed.", + "saveAllPartial_other": "{{successCount}} of {{totalCount}} sections saved. {{failCount}} failed.", + "saveAllFailure": "Failed to save all sections." + }, + "unsavedChanges": "You have unsaved changes", + "confirmReset": "Confirm Reset", + "resetToDefaultDescription": "This will reset all settings in this section to their default values. This action cannot be undone.", + "resetToGlobalDescription": "This will reset the settings in this section to the global defaults. This action cannot be undone." } diff --git a/web/src/components/card/SettingsGroupCard.tsx b/web/src/components/card/SettingsGroupCard.tsx new file mode 100644 index 000000000..4bfaa1402 --- /dev/null +++ b/web/src/components/card/SettingsGroupCard.tsx @@ -0,0 +1,56 @@ +import { ReactNode } from "react"; +import { Label } from "../ui/label"; + +export const SPLIT_ROW_CLASS_NAME = + "space-y-2 md:grid md:grid-cols-[minmax(14rem,24rem)_minmax(0,1fr)] md:items-start md:gap-x-6 md:space-y-0"; +export const DESCRIPTION_CLASS_NAME = "text-sm text-muted-foreground"; +export const CONTROL_COLUMN_CLASS_NAME = "w-full md:max-w-2xl"; + +type SettingsGroupCardProps = { + title: string | ReactNode; + children: ReactNode; +}; + +export function SettingsGroupCard({ title, children }: SettingsGroupCardProps) { + return ( +
+
+ {title} +
+ {children} +
+ ); +} + +type SplitCardRowProps = { + label: ReactNode; + description?: ReactNode; + content: ReactNode; +}; + +export function SplitCardRow({ + label, + description, + content, +}: SplitCardRowProps) { + return ( +
+
+ + {description && ( +
+ {description} +
+ )} +
+
+ {content} + {description && ( +
+ {description} +
+ )} +
+
+ ); +} diff --git a/web/src/components/config-form/ConfigForm.tsx b/web/src/components/config-form/ConfigForm.tsx new file mode 100644 index 000000000..7c24c6935 --- /dev/null +++ b/web/src/components/config-form/ConfigForm.tsx @@ -0,0 +1,370 @@ +// ConfigForm - Main RJSF form wrapper component +import Form from "@rjsf/shadcn"; +import validator from "@rjsf/validator-ajv8"; +import type { FormValidation, RJSFSchema, UiSchema } from "@rjsf/utils"; +import type { IChangeEvent } from "@rjsf/core"; +import { frigateTheme } from "./theme"; +import { transformSchema } from "@/lib/config-schema"; +import { createErrorTransformer } from "@/lib/config-schema/errorMessages"; +import { useMemo, useCallback } from "react"; +import { useTranslation } from "react-i18next"; +import { cn, mergeUiSchema } from "@/lib/utils"; +import type { ConfigFormContext } from "@/types/configForm"; + +type SchemaWithProperties = RJSFSchema & { + properties: Record; +}; + +type SchemaWithAdditionalProperties = RJSFSchema & { + additionalProperties: RJSFSchema; +}; + +// Runtime guards for schema fragments +const hasSchemaProperties = ( + schema: RJSFSchema, +): schema is SchemaWithProperties => + typeof schema === "object" && + schema !== null && + typeof schema.properties === "object" && + schema.properties !== null; + +const hasSchemaAdditionalProperties = ( + schema: RJSFSchema, +): schema is SchemaWithAdditionalProperties => + typeof schema === "object" && + schema !== null && + typeof schema.additionalProperties === "object" && + schema.additionalProperties !== null; + +// Detects path-style uiSchema keys (e.g., "filters.*.mask") +const isPathKey = (key: string) => key.includes(".") || key.includes("*"); + +type UiSchemaPathOverride = { + path: string[]; + value: UiSchema; +}; + +// Split uiSchema into normal keys vs path-based overrides +const splitUiSchemaOverrides = ( + uiSchema?: UiSchema, +): { baseUiSchema?: UiSchema; pathOverrides: UiSchemaPathOverride[] } => { + if (!uiSchema) { + return { baseUiSchema: undefined, pathOverrides: [] }; + } + + const baseUiSchema: UiSchema = {}; + const pathOverrides: UiSchemaPathOverride[] = []; + + Object.entries(uiSchema).forEach(([key, value]) => { + if (isPathKey(key)) { + pathOverrides.push({ + path: key.split("."), + value: value as UiSchema, + }); + } else { + baseUiSchema[key] = value as UiSchema; + } + }); + + return { baseUiSchema, pathOverrides }; +}; + +// Apply wildcard path overrides to uiSchema using the schema structure +const applyUiSchemaPathOverrides = ( + uiSchema: UiSchema, + schema: RJSFSchema, + overrides: UiSchemaPathOverride[], +): UiSchema => { + if (overrides.length === 0) { + return uiSchema; + } + + // Recursively apply a path override; supports "*" to match any property. + const applyOverride = ( + targetUi: UiSchema, + targetSchema: RJSFSchema, + path: string[], + value: UiSchema, + ) => { + if (path.length === 0) { + Object.assign(targetUi, mergeUiSchema(targetUi, value)); + return; + } + + const [segment, ...rest] = path; + const schemaObj = targetSchema; + + if (segment === "*") { + if (hasSchemaProperties(schemaObj)) { + Object.entries(schemaObj.properties).forEach( + ([propertyName, propertySchema]) => { + const existing = + (targetUi[propertyName] as UiSchema | undefined) || {}; + targetUi[propertyName] = { ...existing }; + applyOverride( + targetUi[propertyName] as UiSchema, + propertySchema, + rest, + value, + ); + }, + ); + } else if (hasSchemaAdditionalProperties(schemaObj)) { + // For dict schemas, apply override to additionalProperties + const existing = + (targetUi.additionalProperties as UiSchema | undefined) || {}; + targetUi.additionalProperties = { ...existing }; + applyOverride( + targetUi.additionalProperties as UiSchema, + schemaObj.additionalProperties, + rest, + value, + ); + } + return; + } + + if (hasSchemaProperties(schemaObj)) { + const propertySchema = schemaObj.properties[segment]; + if (propertySchema) { + const existing = (targetUi[segment] as UiSchema | undefined) || {}; + targetUi[segment] = { ...existing }; + applyOverride( + targetUi[segment] as UiSchema, + propertySchema, + rest, + value, + ); + } + } + }; + + const updated = { ...uiSchema }; + overrides.forEach(({ path, value }) => { + applyOverride(updated, schema, path, value); + }); + + return updated; +}; + +const applyLayoutGridFieldDefaults = (uiSchema: UiSchema): UiSchema => { + const applyDefaults = (node: unknown): unknown => { + if (Array.isArray(node)) { + return node.map((item) => applyDefaults(item)); + } + + if (typeof node !== "object" || node === null) { + return node; + } + + const nextNode: Record = {}; + + Object.entries(node).forEach(([key, value]) => { + nextNode[key] = applyDefaults(value); + }); + + if ( + Array.isArray(nextNode["ui:layoutGrid"]) && + nextNode["ui:field"] === undefined + ) { + nextNode["ui:field"] = "LayoutGridField"; + } + + return nextNode; + }; + + return applyDefaults(uiSchema) as UiSchema; +}; + +export interface ConfigFormProps { + /** JSON Schema for the form */ + schema: RJSFSchema; + /** Current form data */ + formData?: unknown; + /** Called when form data changes */ + onChange?: (data: unknown) => void; + /** Called when form is submitted */ + onSubmit?: (data: unknown) => void; + /** Called when form has errors on submit */ + onError?: (errors: unknown[]) => void; + /** Additional uiSchema overrides */ + uiSchema?: UiSchema; + /** Field ordering */ + fieldOrder?: string[]; + /** Field groups for layout */ + fieldGroups?: Record; + /** Fields to hide */ + hiddenFields?: string[]; + /** Fields marked as advanced (collapsed by default) */ + advancedFields?: string[]; + /** Whether form is disabled */ + disabled?: boolean; + /** Whether form is read-only */ + readonly?: boolean; + /** Whether to show submit button */ + showSubmit?: boolean; + /** Custom class name */ + className?: string; + /** Live validation mode */ + liveValidate?: boolean; + /** Form context passed to all widgets */ + formContext?: ConfigFormContext; + /** i18n namespace for field labels */ + i18nNamespace?: string; + /** Optional custom validation */ + customValidate?: ( + formData: unknown, + errors: FormValidation, + ) => FormValidation; + /** Called whenever form validation state changes */ + onValidationChange?: (hasErrors: boolean) => void; +} + +export function ConfigForm({ + schema, + formData, + onChange, + onSubmit, + onError, + uiSchema: customUiSchema, + fieldOrder, + fieldGroups, + hiddenFields, + advancedFields, + disabled = false, + readonly = false, + showSubmit = false, + className, + liveValidate = true, + formContext, + i18nNamespace, + customValidate, + onValidationChange, +}: ConfigFormProps) { + const { t, i18n } = useTranslation([ + i18nNamespace || "common", + "views/settings", + "config/validation", + ]); + + // Determine which fields to hide based on advanced toggle + const effectiveHiddenFields = useMemo(() => { + return hiddenFields; + }, [hiddenFields]); + + // Transform schema and generate uiSchema + const { schema: transformedSchema, uiSchema: generatedUiSchema } = useMemo( + () => + transformSchema(schema, { + fieldOrder, + hiddenFields: effectiveHiddenFields, + advancedFields: advancedFields, + i18nNamespace, + }), + [schema, fieldOrder, effectiveHiddenFields, advancedFields, i18nNamespace], + ); + + const { baseUiSchema, pathOverrides } = useMemo( + () => splitUiSchemaOverrides(customUiSchema), + [customUiSchema], + ); + + // Merge generated uiSchema with custom overrides + const finalUiSchema = useMemo(() => { + // Start with generated schema + const expandedUiSchema = applyUiSchemaPathOverrides( + generatedUiSchema, + transformedSchema, + pathOverrides, + ); + const merged = applyLayoutGridFieldDefaults( + mergeUiSchema(expandedUiSchema, baseUiSchema), + ); + + // Add field groups + if (fieldGroups) { + merged["ui:groups"] = fieldGroups; + } + + // Set submit button options + merged["ui:submitButtonOptions"] = showSubmit + ? { norender: false } + : { norender: true }; + + // Ensure hiddenFields take precedence over any custom uiSchema overrides + // Build path-based overrides for hidden fields and apply them after merging + if (hiddenFields && hiddenFields.length > 0) { + const hiddenOverrides = hiddenFields.map((field) => ({ + path: field.split("."), + value: { "ui:widget": "hidden" } as UiSchema, + })); + + return applyUiSchemaPathOverrides( + merged, + transformedSchema, + hiddenOverrides, + ); + } + + return merged; + }, [ + generatedUiSchema, + transformedSchema, + pathOverrides, + baseUiSchema, + showSubmit, + fieldGroups, + hiddenFields, + ]); + + // Create error transformer for user-friendly error messages + const errorTransformer = useMemo(() => createErrorTransformer(i18n), [i18n]); + + const handleChange = useCallback( + (e: IChangeEvent) => { + onValidationChange?.(Array.isArray(e.errors) && e.errors.length > 0); + onChange?.(e.formData); + }, + [onChange, onValidationChange], + ); + + const handleSubmit = useCallback( + (e: IChangeEvent) => { + onSubmit?.(e.formData); + }, + [onSubmit], + ); + + // Extended form context with i18n info + const extendedFormContext = useMemo( + () => ({ + ...formContext, + i18nNamespace, + t, + }), + [formContext, i18nNamespace, t], + ); + + return ( +
+
+
+ ); +} + +export default ConfigForm; diff --git a/web/src/components/config-form/section-configs/audio.ts b/web/src/components/config-form/section-configs/audio.ts new file mode 100644 index 000000000..81ddb9b0a --- /dev/null +++ b/web/src/components/config-form/section-configs/audio.ts @@ -0,0 +1,42 @@ +import type { SectionConfigOverrides } from "./types"; + +const audio: SectionConfigOverrides = { + base: { + sectionDocs: "/configuration/audio_detectors", + restartRequired: [], + fieldOrder: [ + "enabled", + "listen", + "filters", + "min_volume", + "max_not_heard", + "num_threads", + ], + fieldGroups: { + detection: ["enabled", "listen", "filters"], + sensitivity: ["min_volume", "max_not_heard"], + }, + hiddenFields: ["enabled_in_config"], + advancedFields: ["min_volume", "max_not_heard", "num_threads"], + uiSchema: { + listen: { + "ui:widget": "audioLabels", + }, + }, + }, + global: { + restartRequired: [ + "enabled", + "listen", + "filters", + "min_volume", + "max_not_heard", + "num_threads", + ], + }, + camera: { + restartRequired: ["num_threads"], + }, +}; + +export default audio; diff --git a/web/src/components/config-form/section-configs/audio_transcription.ts b/web/src/components/config-form/section-configs/audio_transcription.ts new file mode 100644 index 000000000..169a77954 --- /dev/null +++ b/web/src/components/config-form/section-configs/audio_transcription.ts @@ -0,0 +1,19 @@ +import type { SectionConfigOverrides } from "./types"; + +const audioTranscription: SectionConfigOverrides = { + base: { + sectionDocs: "/configuration/audio_detectors#audio-transcription", + restartRequired: [], + fieldOrder: ["enabled", "language", "device", "model_size"], + hiddenFields: ["enabled_in_config", "live_enabled"], + advancedFields: ["language", "device", "model_size"], + overrideFields: ["enabled", "live_enabled"], + }, + global: { + fieldOrder: ["enabled", "language", "device", "model_size"], + advancedFields: ["language", "device", "model_size"], + restartRequired: ["enabled", "language", "device", "model_size"], + }, +}; + +export default audioTranscription; diff --git a/web/src/components/config-form/section-configs/auth.ts b/web/src/components/config-form/section-configs/auth.ts new file mode 100644 index 000000000..6adb08007 --- /dev/null +++ b/web/src/components/config-form/section-configs/auth.ts @@ -0,0 +1,49 @@ +import type { SectionConfigOverrides } from "./types"; + +const auth: SectionConfigOverrides = { + base: { + sectionDocs: "/configuration/authentication", + restartRequired: [ + "enabled", + "reset_admin_password", + "failed_login_rate_limit", + ], + fieldOrder: [ + "enabled", + "reset_admin_password", + "cookie_name", + "cookie_secure", + "session_length", + "refresh_time", + "native_oauth_url", + "failed_login_rate_limit", + "trusted_proxies", + "hash_iterations", + "roles", + ], + hiddenFields: ["admin_first_time_login", "roles"], + advancedFields: [ + "cookie_name", + "cookie_secure", + "session_length", + "refresh_time", + "failed_login_rate_limit", + "trusted_proxies", + "hash_iterations", + "roles", + ], + uiSchema: { + reset_admin_password: { + "ui:widget": "switch", + }, + native_oauth_url: { + "ui:options": { size: "lg" }, + }, + failed_login_rate_limit: { + "ui:options": { size: "md" }, + }, + }, + }, +}; + +export default auth; diff --git a/web/src/components/config-form/section-configs/birdseye.ts b/web/src/components/config-form/section-configs/birdseye.ts new file mode 100644 index 000000000..7df38edd8 --- /dev/null +++ b/web/src/components/config-form/section-configs/birdseye.ts @@ -0,0 +1,45 @@ +import type { SectionConfigOverrides } from "./types"; + +const birdseye: SectionConfigOverrides = { + base: { + sectionDocs: "/configuration/birdseye", + restartRequired: [], + fieldOrder: ["enabled", "mode", "order"], + hiddenFields: [], + advancedFields: [], + overrideFields: ["enabled", "mode"], + }, + global: { + fieldOrder: [ + "enabled", + "restream", + "width", + "height", + "quality", + "mode", + "layout", + "inactivity_threshold", + "idle_heartbeat_fps", + ], + advancedFields: ["width", "height", "quality", "inactivity_threshold"], + restartRequired: [ + "enabled", + "restream", + "width", + "height", + "quality", + "mode", + "layout.scaling_factor", + "inactivity_threshold", + "layout.max_cameras", + "idle_heartbeat_fps", + ], + uiSchema: { + mode: { + "ui:size": "xs", + }, + }, + }, +}; + +export default birdseye; diff --git a/web/src/components/config-form/section-configs/classification.ts b/web/src/components/config-form/section-configs/classification.ts new file mode 100644 index 000000000..015f723bd --- /dev/null +++ b/web/src/components/config-form/section-configs/classification.ts @@ -0,0 +1,12 @@ +import type { SectionConfigOverrides } from "./types"; + +const classification: SectionConfigOverrides = { + base: { + sectionDocs: "/configuration/custom_classification/object_classification", + restartRequired: ["bird.enabled", "bird.threshold"], + hiddenFields: ["custom"], + advancedFields: [], + }, +}; + +export default classification; diff --git a/web/src/components/config-form/section-configs/database.ts b/web/src/components/config-form/section-configs/database.ts new file mode 100644 index 000000000..aa86798a9 --- /dev/null +++ b/web/src/components/config-form/section-configs/database.ts @@ -0,0 +1,17 @@ +import type { SectionConfigOverrides } from "./types"; + +const database: SectionConfigOverrides = { + base: { + sectionDocs: "/configuration/advanced#database", + restartRequired: ["path"], + fieldOrder: ["path"], + advancedFields: [], + uiSchema: { + path: { + "ui:options": { size: "md" }, + }, + }, + }, +}; + +export default database; diff --git a/web/src/components/config-form/section-configs/detect.ts b/web/src/components/config-form/section-configs/detect.ts new file mode 100644 index 000000000..2c3da7b06 --- /dev/null +++ b/web/src/components/config-form/section-configs/detect.ts @@ -0,0 +1,49 @@ +import type { SectionConfigOverrides } from "./types"; + +const detect: SectionConfigOverrides = { + base: { + sectionDocs: "/configuration/camera_specific", + fieldOrder: [ + "enabled", + "width", + "height", + "fps", + "min_initialized", + "max_disappeared", + "annotation_offset", + "stationary", + "interval", + "threshold", + "max_frames", + ], + restartRequired: [], + fieldGroups: { + resolution: ["enabled", "width", "height", "fps"], + tracking: ["min_initialized", "max_disappeared"], + }, + hiddenFields: ["enabled_in_config"], + advancedFields: [ + "min_initialized", + "max_disappeared", + "annotation_offset", + "stationary", + ], + }, + global: { + restartRequired: [ + "enabled", + "width", + "height", + "fps", + "min_initialized", + "max_disappeared", + "annotation_offset", + "stationary", + ], + }, + camera: { + restartRequired: ["width", "height", "min_initialized", "max_disappeared"], + }, +}; + +export default detect; diff --git a/web/src/components/config-form/section-configs/detectors.ts b/web/src/components/config-form/section-configs/detectors.ts new file mode 100644 index 000000000..3ca2dd81d --- /dev/null +++ b/web/src/components/config-form/section-configs/detectors.ts @@ -0,0 +1,28 @@ +import type { SectionConfigOverrides } from "./types"; + +const detectorHiddenFields = [ + "*.model.labelmap", + "*.model.attributes_map", + "*.model", + "*.model_path", +]; + +const detectors: SectionConfigOverrides = { + base: { + sectionDocs: "/configuration/object_detectors", + fieldOrder: [], + advancedFields: [], + hiddenFields: detectorHiddenFields, + uiSchema: { + "ui:field": "DetectorHardwareField", + "ui:options": { + multiInstanceTypes: ["cpu", "onnx", "openvino"], + typeOrder: ["onnx", "openvino", "edgetpu"], + hiddenByType: {}, + hiddenFields: detectorHiddenFields, + }, + }, + }, +}; + +export default detectors; diff --git a/web/src/components/config-form/section-configs/environment_vars.ts b/web/src/components/config-form/section-configs/environment_vars.ts new file mode 100644 index 000000000..2100d3e35 --- /dev/null +++ b/web/src/components/config-form/section-configs/environment_vars.ts @@ -0,0 +1,16 @@ +import type { SectionConfigOverrides } from "./types"; + +const environmentVars: SectionConfigOverrides = { + base: { + sectionDocs: "/configuration/advanced#environment_vars", + fieldOrder: [], + advancedFields: [], + uiSchema: { + additionalProperties: { + "ui:options": { size: "lg" }, + }, + }, + }, +}; + +export default environmentVars; diff --git a/web/src/components/config-form/section-configs/face_recognition.ts b/web/src/components/config-form/section-configs/face_recognition.ts new file mode 100644 index 000000000..2f5b2dd46 --- /dev/null +++ b/web/src/components/config-form/section-configs/face_recognition.ts @@ -0,0 +1,50 @@ +import type { SectionConfigOverrides } from "./types"; + +const faceRecognition: SectionConfigOverrides = { + base: { + sectionDocs: "/configuration/face_recognition", + restartRequired: [], + fieldOrder: ["enabled", "min_area"], + hiddenFields: [], + advancedFields: ["min_area"], + overrideFields: ["enabled", "min_area"], + }, + global: { + fieldOrder: [ + "enabled", + "model_size", + "unknown_score", + "detection_threshold", + "recognition_threshold", + "min_area", + "min_faces", + "save_attempts", + "blur_confidence_filter", + "device", + ], + advancedFields: [ + "unknown_score", + "detection_threshold", + "recognition_threshold", + "min_area", + "min_faces", + "save_attempts", + "blur_confidence_filter", + "device", + ], + restartRequired: [ + "enabled", + "model_size", + "unknown_score", + "detection_threshold", + "recognition_threshold", + "min_area", + "min_faces", + "save_attempts", + "blur_confidence_filter", + "device", + ], + }, +}; + +export default faceRecognition; diff --git a/web/src/components/config-form/section-configs/ffmpeg.ts b/web/src/components/config-form/section-configs/ffmpeg.ts new file mode 100644 index 000000000..ccbca5609 --- /dev/null +++ b/web/src/components/config-form/section-configs/ffmpeg.ts @@ -0,0 +1,179 @@ +import type { SectionConfigOverrides } from "./types"; + +const arrayAsTextWidget = { + "ui:widget": "ArrayAsTextWidget", + "ui:options": { + suppressMultiSchema: true, + }, +}; + +const ffmpegArgsWidget = ( + presetField: string, + extraOptions?: Record, +) => ({ + "ui:widget": "FfmpegArgsWidget", + "ui:options": { + suppressMultiSchema: true, + ffmpegPresetField: presetField, + ...extraOptions, + }, +}); + +const ffmpeg: SectionConfigOverrides = { + base: { + sectionDocs: "/configuration/ffmpeg_presets", + fieldDocs: { + hwaccel_args: "/configuration/ffmpeg_presets#hwaccel-presets", + "inputs.hwaccel_args": "/configuration/ffmpeg_presets#hwaccel-presets", + input_args: "/configuration/ffmpeg_presets#input-args-presets", + "inputs.input_args": "/configuration/ffmpeg_presets#input-args-presets", + output_args: "/configuration/ffmpeg_presets#output-args-presets", + "inputs.output_args": "/configuration/ffmpeg_presets#output-args-presets", + "output_args.record": "/configuration/ffmpeg_presets#output-args-presets", + "inputs.roles": "/configuration/cameras/#setting-up-camera-inputs", + }, + restartRequired: [], + fieldOrder: [ + "inputs", + "global_args", + "input_args", + "hwaccel_args", + "output_args", + "path", + "retry_interval", + "apple_compatibility", + "gpu", + ], + hiddenFields: [], + advancedFields: [ + "path", + "global_args", + "retry_interval", + "apple_compatibility", + "gpu", + ], + overrideFields: [ + "inputs", + "path", + "global_args", + "input_args", + "hwaccel_args", + "output_args", + "retry_interval", + "apple_compatibility", + "gpu", + ], + uiSchema: { + path: { + "ui:options": { size: "md" }, + }, + global_args: arrayAsTextWidget, + hwaccel_args: ffmpegArgsWidget("hwaccel_args"), + input_args: ffmpegArgsWidget("input_args"), + output_args: { + detect: arrayAsTextWidget, + record: ffmpegArgsWidget("output_args.record"), + items: { + detect: arrayAsTextWidget, + record: ffmpegArgsWidget("output_args.record"), + }, + }, + inputs: { + "ui:field": "CameraInputsField", + items: { + path: { + "ui:options": { size: "full" }, + }, + roles: { + "ui:widget": "inputRoles", + "ui:options": { + showArrayItemDescription: true, + }, + }, + global_args: { + "ui:widget": "hidden", + }, + hwaccel_args: ffmpegArgsWidget("hwaccel_args", { + allowInherit: true, + hideDescription: true, + forceSplitLayout: true, + showArrayItemDescription: true, + }), + input_args: ffmpegArgsWidget("input_args", { + allowInherit: true, + hideDescription: true, + forceSplitLayout: true, + showArrayItemDescription: true, + }), + output_args: { + items: { + detect: arrayAsTextWidget, + record: ffmpegArgsWidget("output_args.record"), + }, + }, + }, + }, + }, + }, + global: { + restartRequired: [ + "path", + "global_args", + "hwaccel_args", + "input_args", + "output_args", + "retry_interval", + "apple_compatibility", + "gpu", + ], + fieldOrder: [ + "hwaccel_args", + "path", + "global_args", + "input_args", + "output_args", + "retry_interval", + "apple_compatibility", + "gpu", + ], + advancedFields: [ + "global_args", + "input_args", + "output_args", + "path", + "retry_interval", + "apple_compatibility", + "gpu", + ], + uiSchema: { + path: { + "ui:options": { size: "md" }, + }, + global_args: arrayAsTextWidget, + hwaccel_args: ffmpegArgsWidget("hwaccel_args"), + input_args: ffmpegArgsWidget("input_args"), + output_args: { + detect: arrayAsTextWidget, + record: ffmpegArgsWidget("output_args.record"), + }, + }, + }, + camera: { + fieldGroups: { + cameraFfmpeg: ["input_args", "hwaccel_args", "output_args"], + }, + restartRequired: [ + "inputs", + "path", + "global_args", + "hwaccel_args", + "input_args", + "output_args", + "retry_interval", + "apple_compatibility", + "gpu", + ], + }, +}; + +export default ffmpeg; diff --git a/web/src/components/config-form/section-configs/genai.ts b/web/src/components/config-form/section-configs/genai.ts new file mode 100644 index 000000000..739659496 --- /dev/null +++ b/web/src/components/config-form/section-configs/genai.ts @@ -0,0 +1,48 @@ +import type { SectionConfigOverrides } from "./types"; + +const genai: SectionConfigOverrides = { + base: { + sectionDocs: "/configuration/genai/config", + restartRequired: [ + "provider", + "api_key", + "base_url", + "model", + "provider_options", + "runtime_options", + ], + fieldOrder: [ + "provider", + "api_key", + "base_url", + "model", + "provider_options", + "runtime_options", + ], + advancedFields: ["base_url", "provider_options", "runtime_options"], + hiddenFields: ["genai.enabled_in_config"], + uiSchema: { + api_key: { + "ui:options": { size: "md" }, + }, + base_url: { + "ui:options": { size: "lg" }, + }, + model: { + "ui:options": { size: "md" }, + }, + provider_options: { + additionalProperties: { + "ui:options": { size: "lg" }, + }, + }, + runtime_options: { + additionalProperties: { + "ui:options": { size: "lg" }, + }, + }, + }, + }, +}; + +export default genai; diff --git a/web/src/components/config-form/section-configs/live.ts b/web/src/components/config-form/section-configs/live.ts new file mode 100644 index 000000000..c0d80627c --- /dev/null +++ b/web/src/components/config-form/section-configs/live.ts @@ -0,0 +1,21 @@ +import type { SectionConfigOverrides } from "./types"; + +const live: SectionConfigOverrides = { + base: { + sectionDocs: "/configuration/live", + restartRequired: [], + fieldOrder: ["stream_name", "height", "quality"], + fieldGroups: {}, + hiddenFields: ["enabled_in_config"], + advancedFields: ["height", "quality"], + }, + global: { + restartRequired: ["stream_name", "height", "quality"], + hiddenFields: ["streams"], + }, + camera: { + restartRequired: ["height", "quality"], + }, +}; + +export default live; diff --git a/web/src/components/config-form/section-configs/logger.ts b/web/src/components/config-form/section-configs/logger.ts new file mode 100644 index 000000000..8100ee4c1 --- /dev/null +++ b/web/src/components/config-form/section-configs/logger.ts @@ -0,0 +1,12 @@ +import type { SectionConfigOverrides } from "./types"; + +const logger: SectionConfigOverrides = { + base: { + sectionDocs: "/configuration/advanced#logger", + restartRequired: ["default", "logs"], + fieldOrder: ["default", "logs"], + advancedFields: ["logs"], + }, +}; + +export default logger; diff --git a/web/src/components/config-form/section-configs/lpr.ts b/web/src/components/config-form/section-configs/lpr.ts new file mode 100644 index 000000000..3e2561f64 --- /dev/null +++ b/web/src/components/config-form/section-configs/lpr.ts @@ -0,0 +1,73 @@ +import type { SectionConfigOverrides } from "./types"; + +const lpr: SectionConfigOverrides = { + base: { + sectionDocs: "/configuration/license_plate_recognition", + fieldDocs: { + enhancement: "/configuration/license_plate_recognition#enhancement", + }, + restartRequired: [], + fieldOrder: ["enabled", "expire_time", "min_area", "enhancement"], + hiddenFields: [], + advancedFields: ["expire_time", "min_area", "enhancement"], + overrideFields: ["enabled", "min_area", "enhancement"], + }, + global: { + fieldOrder: [ + "enabled", + "model_size", + "detection_threshold", + "min_area", + "recognition_threshold", + "min_plate_length", + "format", + "match_distance", + "known_plates", + "enhancement", + "debug_save_plates", + "device", + "replace_rules", + ], + advancedFields: [ + "detection_threshold", + "recognition_threshold", + "min_plate_length", + "format", + "match_distance", + "known_plates", + "enhancement", + "debug_save_plates", + "device", + "replace_rules", + ], + restartRequired: [ + "enabled", + "model_size", + "detection_threshold", + "min_area", + "recognition_threshold", + "min_plate_length", + "format", + "match_distance", + "known_plates", + "enhancement", + "debug_save_plates", + "device", + "replace_rules", + ], + uiSchema: { + format: { + "ui:options": { size: "md" }, + }, + replace_rules: { + "ui:field": "ReplaceRulesField", + "ui:options": { + label: false, + suppressDescription: true, + }, + }, + }, + }, +}; + +export default lpr; diff --git a/web/src/components/config-form/section-configs/model.ts b/web/src/components/config-form/section-configs/model.ts new file mode 100644 index 000000000..c94a1f2cf --- /dev/null +++ b/web/src/components/config-form/section-configs/model.ts @@ -0,0 +1,53 @@ +import type { SectionConfigOverrides } from "./types"; + +const model: SectionConfigOverrides = { + base: { + sectionDocs: "/configuration/object_detectors#model", + restartRequired: [ + "path", + "labelmap_path", + "width", + "height", + "labelmap", + "attributes_map", + "input_tensor", + "input_pixel_format", + "input_dtype", + "model_type", + ], + fieldOrder: [ + "path", + "labelmap_path", + "width", + "height", + "input_pixel_format", + "input_tensor", + "input_dtype", + "model_type", + ], + advancedFields: [ + "input_pixel_format", + "input_tensor", + "input_dtype", + "model_type", + ], + hiddenFields: [ + "labelmap", + "attributes_map", + "colormap", + "all_attributes", + "non_logo_attributes", + "plus", + ], + uiSchema: { + path: { + "ui:options": { size: "md" }, + }, + labelmap_path: { + "ui:options": { size: "md" }, + }, + }, + }, +}; + +export default model; diff --git a/web/src/components/config-form/section-configs/motion.ts b/web/src/components/config-form/section-configs/motion.ts new file mode 100644 index 000000000..0acdc0d99 --- /dev/null +++ b/web/src/components/config-form/section-configs/motion.ts @@ -0,0 +1,49 @@ +import type { SectionConfigOverrides } from "./types"; + +const motion: SectionConfigOverrides = { + base: { + sectionDocs: "/configuration/motion_detection", + restartRequired: [], + fieldOrder: [ + "enabled", + "threshold", + "lightning_threshold", + "improve_contrast", + "contour_area", + "delta_alpha", + "frame_alpha", + "frame_height", + "mqtt_off_delay", + ], + fieldGroups: { + sensitivity: ["enabled", "threshold", "contour_area"], + algorithm: ["improve_contrast", "delta_alpha", "frame_alpha"], + }, + hiddenFields: ["enabled_in_config", "mask", "raw_mask"], + advancedFields: [ + "lightning_threshold", + "delta_alpha", + "frame_alpha", + "frame_height", + "mqtt_off_delay", + ], + }, + global: { + restartRequired: [ + "enabled", + "threshold", + "lightning_threshold", + "improve_contrast", + "contour_area", + "delta_alpha", + "frame_alpha", + "frame_height", + "mqtt_off_delay", + ], + }, + camera: { + restartRequired: ["frame_height"], + }, +}; + +export default motion; diff --git a/web/src/components/config-form/section-configs/mqtt.ts b/web/src/components/config-form/section-configs/mqtt.ts new file mode 100644 index 000000000..67d863b08 --- /dev/null +++ b/web/src/components/config-form/section-configs/mqtt.ts @@ -0,0 +1,73 @@ +import type { SectionConfigOverrides } from "./types"; + +const mqtt: SectionConfigOverrides = { + base: { + sectionDocs: "/integrations/mqtt", + restartRequired: [], + fieldOrder: [ + "enabled", + "timestamp", + "bounding_box", + "crop", + "height", + "required_zones", + "quality", + ], + hiddenFields: [], + advancedFields: ["height", "quality"], + overrideFields: [], + uiSchema: { + required_zones: { + "ui:widget": "zoneNames", + }, + }, + }, + global: { + fieldOrder: [ + "enabled", + "host", + "port", + "user", + "password", + "topic_prefix", + "client_id", + "stats_interval", + "qos", + "tls_ca_certs", + "tls_client_cert", + "tls_client_key", + "tls_insecure", + ], + advancedFields: [ + "stats_interval", + "qos", + "tls_ca_certs", + "tls_client_cert", + "tls_client_key", + "tls_insecure", + ], + restartRequired: [ + "enabled", + "host", + "port", + "user", + "password", + "topic_prefix", + "client_id", + "stats_interval", + "qos", + "tls_ca_certs", + "tls_client_cert", + "tls_client_key", + "tls_insecure", + ], + liveValidate: true, + uiSchema: { + password: { + "ui:options": { size: "xs" }, + }, + }, + }, +}; + +export default mqtt; diff --git a/web/src/components/config-form/section-configs/networking.ts b/web/src/components/config-form/section-configs/networking.ts new file mode 100644 index 000000000..a7ed95bf0 --- /dev/null +++ b/web/src/components/config-form/section-configs/networking.ts @@ -0,0 +1,30 @@ +import type { SectionConfigOverrides } from "./types"; + +const networking: SectionConfigOverrides = { + base: { + sectionDocs: "/configuration/advanced", + fieldDocs: { + "listen.internal": "/configuration/advanced#listen-on-different-ports", + "listen.external": "/configuration/advanced#listen-on-different-ports", + }, + restartRequired: ["ipv6.enabled", "listen.internal", "listen.external"], + fieldOrder: [], + advancedFields: [], + uiSchema: { + "listen.internal": { + "ui:options": { + suppressMultiSchema: true, + size: "sm", + }, + }, + "listen.external": { + "ui:options": { + suppressMultiSchema: true, + size: "sm", + }, + }, + }, + }, +}; + +export default networking; diff --git a/web/src/components/config-form/section-configs/notifications.ts b/web/src/components/config-form/section-configs/notifications.ts new file mode 100644 index 000000000..68fd78f78 --- /dev/null +++ b/web/src/components/config-form/section-configs/notifications.ts @@ -0,0 +1,26 @@ +import type { SectionConfigOverrides } from "./types"; + +const notifications: SectionConfigOverrides = { + base: { + sectionDocs: "/configuration/notifications", + restartRequired: [], + fieldOrder: ["enabled", "email"], + fieldGroups: {}, + hiddenFields: ["enabled_in_config"], + advancedFields: [], + }, + global: { + uiSchema: { + "ui:before": { render: "NotificationsSettingsExtras" }, + enabled: { "ui:widget": "hidden" }, + email: { "ui:widget": "hidden" }, + cooldown: { "ui:widget": "hidden" }, + enabled_in_config: { "ui:widget": "hidden" }, + }, + }, + camera: { + hiddenFields: ["enabled_in_config", "email"], + }, +}; + +export default notifications; diff --git a/web/src/components/config-form/section-configs/objects.ts b/web/src/components/config-form/section-configs/objects.ts new file mode 100644 index 000000000..1dfb31053 --- /dev/null +++ b/web/src/components/config-form/section-configs/objects.ts @@ -0,0 +1,104 @@ +import type { SectionConfigOverrides } from "./types"; + +const objects: SectionConfigOverrides = { + base: { + sectionDocs: "/configuration/object_filters", + fieldDocs: { + "filters.min_area": "/configuration/object_filters#object-area", + "filters.max_area": "/configuration/object_filters#object-area", + "filters.min_score": "/configuration/object_filters#minimum-score", + "filters.threshold": "/configuration/object_filters#threshold", + "filters.min_ratio": "/configuration/object_filters/#object-proportions", + "filters.max_ratio": "/configuration/object_filters/#object-proportions", + }, + restartRequired: [], + fieldOrder: ["track", "alert", "detect", "filters"], + fieldGroups: { + tracking: ["track", "alert", "detect"], + filtering: ["filters"], + }, + hiddenFields: [ + "enabled_in_config", + "mask", + "raw_mask", + "genai.enabled_in_config", + "filters.*.mask", + "filters.*.raw_mask", + "filters.mask", + "filters.raw_mask", + ], + advancedFields: ["genai"], + uiSchema: { + "filters.*.min_area": { + "ui:options": { + suppressMultiSchema: true, + }, + }, + "filters.*": { + "ui:options": { + additionalPropertyKeyReadonly: true, + }, + }, + "filters.*.max_area": { + "ui:options": { + suppressMultiSchema: true, + }, + }, + track: { + "ui:widget": "objectLabels", + "ui:options": { + suppressMultiSchema: true, + }, + }, + genai: { + objects: { + "ui:widget": "objectLabels", + "ui:options": { + suppressMultiSchema: true, + }, + }, + prompt: { + "ui:widget": "textarea", + "ui:options": { + size: "full", + }, + }, + object_prompts: { + additionalProperties: { + "ui:options": { + size: "full", + }, + }, + }, + required_zones: { + "ui:widget": "zoneNames", + "ui:options": { + suppressMultiSchema: true, + }, + }, + enabled_in_config: { + "ui:widget": "hidden", + }, + }, + }, + }, + global: { + restartRequired: ["track", "alert", "detect", "filters", "genai"], + hiddenFields: [ + "enabled_in_config", + "mask", + "raw_mask", + "genai.enabled_in_config", + "filters.*.mask", + "filters.*.raw_mask", + "filters.mask", + "filters.raw_mask", + "genai.required_zones", + ], + }, + camera: { + restartRequired: [], + }, +}; + +export default objects; diff --git a/web/src/components/config-form/section-configs/onvif.ts b/web/src/components/config-form/section-configs/onvif.ts new file mode 100644 index 000000000..b8be693d6 --- /dev/null +++ b/web/src/components/config-form/section-configs/onvif.ts @@ -0,0 +1,46 @@ +import type { SectionConfigOverrides } from "./types"; + +const onvif: SectionConfigOverrides = { + base: { + sectionDocs: "/configuration/cameras#setting-up-camera-ptz-controls", + restartRequired: [ + "host", + "port", + "user", + "password", + "tls_insecure", + "ignore_time_mismatch", + "autotracking.calibrate_on_startup", + ], + fieldOrder: [ + "host", + "port", + "user", + "password", + "tls_insecure", + "ignore_time_mismatch", + "autotracking", + ], + hiddenFields: [ + "autotracking.enabled_in_config", + "autotracking.movement_weights", + ], + advancedFields: ["tls_insecure", "ignore_time_mismatch"], + overrideFields: [], + uiSchema: { + host: { + "ui:options": { size: "sm" }, + }, + autotracking: { + required_zones: { + "ui:widget": "zoneNames", + }, + track: { + "ui:widget": "objectLabels", + }, + }, + }, + }, +}; + +export default onvif; diff --git a/web/src/components/config-form/section-configs/proxy.ts b/web/src/components/config-form/section-configs/proxy.ts new file mode 100644 index 000000000..ffdb27cf9 --- /dev/null +++ b/web/src/components/config-form/section-configs/proxy.ts @@ -0,0 +1,33 @@ +import type { SectionConfigOverrides } from "./types"; + +const proxy: SectionConfigOverrides = { + base: { + sectionDocs: "/configuration/authentication#proxy", + restartRequired: [], + fieldOrder: [ + "header_map", + "logout_url", + "auth_secret", + "default_role", + "separator", + ], + advancedFields: ["header_map", "auth_secret", "separator"], + liveValidate: true, + uiSchema: { + logout_url: { + "ui:options": { size: "lg" }, + }, + auth_secret: { + "ui:options": { size: "md" }, + }, + header_map: { + "ui:after": { render: "ProxyRoleMap" }, + }, + "header_map.role_map": { + "ui:widget": "hidden", + }, + }, + }, +}; + +export default proxy; diff --git a/web/src/components/config-form/section-configs/record.ts b/web/src/components/config-form/section-configs/record.ts new file mode 100644 index 000000000..c47d67ad0 --- /dev/null +++ b/web/src/components/config-form/section-configs/record.ts @@ -0,0 +1,48 @@ +import type { SectionConfigOverrides } from "./types"; + +const record: SectionConfigOverrides = { + base: { + sectionDocs: "/configuration/record", + restartRequired: [], + fieldOrder: [ + "enabled", + "expire_interval", + "continuous", + "motion", + "alerts", + "detections", + "preview", + "export", + ], + fieldGroups: { + retention: ["enabled", "continuous", "motion"], + events: ["alerts", "detections"], + }, + hiddenFields: ["enabled_in_config", "sync_recordings"], + advancedFields: ["expire_interval", "preview", "export"], + uiSchema: { + export: { + hwaccel_args: { + "ui:options": { size: "lg" }, + }, + }, + }, + }, + global: { + restartRequired: [ + "enabled", + "expire_interval", + "continuous", + "motion", + "alerts", + "detections", + "preview", + "export", + ], + }, + camera: { + restartRequired: [], + }, +}; + +export default record; diff --git a/web/src/components/config-form/section-configs/review.ts b/web/src/components/config-form/section-configs/review.ts new file mode 100644 index 000000000..bb3ec4ca4 --- /dev/null +++ b/web/src/components/config-form/section-configs/review.ts @@ -0,0 +1,54 @@ +import type { SectionConfigOverrides } from "./types"; + +const review: SectionConfigOverrides = { + base: { + sectionDocs: "/configuration/review", + restartRequired: [], + fieldOrder: ["alerts", "detections", "genai"], + fieldGroups: {}, + hiddenFields: [ + "enabled_in_config", + "alerts.labels", + "alerts.enabled_in_config", + "detections.labels", + "detections.enabled_in_config", + "genai.enabled_in_config", + ], + advancedFields: [], + uiSchema: { + alerts: { + "ui:before": { render: "CameraReviewStatusToggles" }, + required_zones: { + "ui:widget": "hidden", + }, + }, + detections: { + required_zones: { + "ui:widget": "hidden", + }, + }, + genai: { + additional_concerns: { + "ui:widget": "textarea", + "ui:options": { + size: "full", + }, + }, + activity_context_prompt: { + "ui:widget": "textarea", + "ui:options": { + size: "full", + }, + }, + }, + }, + }, + global: { + restartRequired: ["alerts", "detections", "genai"], + }, + camera: { + restartRequired: [], + }, +}; + +export default review; diff --git a/web/src/components/config-form/section-configs/semantic_search.ts b/web/src/components/config-form/section-configs/semantic_search.ts new file mode 100644 index 000000000..2fea46782 --- /dev/null +++ b/web/src/components/config-form/section-configs/semantic_search.ts @@ -0,0 +1,24 @@ +import type { SectionConfigOverrides } from "./types"; + +const semanticSearch: SectionConfigOverrides = { + base: { + sectionDocs: "/configuration/semantic_search", + restartRequired: [], + hiddenFields: [], + advancedFields: [], + overrideFields: [], + uiSchema: { + enabled: { + "ui:after": { render: "SemanticSearchReindex" }, + }, + }, + }, + global: { + fieldOrder: ["enabled", "reindex", "model", "model_size", "device"], + advancedFields: ["reindex", "device"], + restartRequired: ["enabled", "model", "model_size", "device"], + hiddenFields: ["reindex"], + }, +}; + +export default semanticSearch; diff --git a/web/src/components/config-form/section-configs/snapshots.ts b/web/src/components/config-form/section-configs/snapshots.ts new file mode 100644 index 000000000..b098d84a5 --- /dev/null +++ b/web/src/components/config-form/section-configs/snapshots.ts @@ -0,0 +1,45 @@ +import type { SectionConfigOverrides } from "./types"; + +const snapshots: SectionConfigOverrides = { + base: { + sectionDocs: "/configuration/snapshots", + restartRequired: [], + fieldOrder: [ + "enabled", + "bounding_box", + "crop", + "quality", + "timestamp", + "retain", + ], + fieldGroups: { + display: ["enabled", "bounding_box", "crop", "quality", "timestamp"], + }, + hiddenFields: ["enabled_in_config"], + advancedFields: ["height", "quality", "retain"], + uiSchema: { + required_zones: { + "ui:widget": "zoneNames", + "ui:options": { + suppressMultiSchema: true, + }, + }, + }, + }, + global: { + restartRequired: [ + "enabled", + "bounding_box", + "crop", + "quality", + "timestamp", + "retain", + ], + hiddenFields: ["enabled_in_config", "required_zones"], + }, + camera: { + restartRequired: [], + }, +}; + +export default snapshots; diff --git a/web/src/components/config-form/section-configs/telemetry.ts b/web/src/components/config-form/section-configs/telemetry.ts new file mode 100644 index 000000000..f197e3bcf --- /dev/null +++ b/web/src/components/config-form/section-configs/telemetry.ts @@ -0,0 +1,19 @@ +import type { SectionConfigOverrides } from "./types"; + +const telemetry: SectionConfigOverrides = { + base: { + sectionDocs: "/configuration/reference", + restartRequired: [ + "network_interfaces", + "stats.amd_gpu_stats", + "stats.intel_gpu_stats", + "stats.intel_gpu_device", + "stats.network_bandwidth", + "version_check", + ], + fieldOrder: ["network_interfaces", "stats", "version_check"], + advancedFields: [], + }, +}; + +export default telemetry; diff --git a/web/src/components/config-form/section-configs/timestamp_style.ts b/web/src/components/config-form/section-configs/timestamp_style.ts new file mode 100644 index 000000000..2f51b2416 --- /dev/null +++ b/web/src/components/config-form/section-configs/timestamp_style.ts @@ -0,0 +1,27 @@ +import type { SectionConfigOverrides } from "./types"; + +const timestampStyle: SectionConfigOverrides = { + base: { + sectionDocs: "/configuration/reference", + restartRequired: [], + fieldOrder: ["position", "format", "color", "thickness"], + hiddenFields: ["effect", "enabled_in_config"], + advancedFields: [], + uiSchema: { + position: { + "ui:size": "xs", + }, + format: { + "ui:size": "xs", + }, + }, + }, + global: { + restartRequired: ["position", "format", "color", "thickness", "effect"], + }, + camera: { + restartRequired: [], + }, +}; + +export default timestampStyle; diff --git a/web/src/components/config-form/section-configs/tls.ts b/web/src/components/config-form/section-configs/tls.ts new file mode 100644 index 000000000..07e8b53bc --- /dev/null +++ b/web/src/components/config-form/section-configs/tls.ts @@ -0,0 +1,20 @@ +import type { SectionConfigOverrides } from "./types"; + +const tls: SectionConfigOverrides = { + base: { + sectionDocs: "/configuration/tls", + restartRequired: ["enabled"], + fieldOrder: ["enabled", "cert", "key"], + advancedFields: [], + uiSchema: { + cert: { + "ui:options": { size: "md" }, + }, + key: { + "ui:options": { size: "md" }, + }, + }, + }, +}; + +export default tls; diff --git a/web/src/components/config-form/section-configs/types.ts b/web/src/components/config-form/section-configs/types.ts new file mode 100644 index 000000000..600a3ca50 --- /dev/null +++ b/web/src/components/config-form/section-configs/types.ts @@ -0,0 +1,7 @@ +import type { SectionConfig } from "../sections/BaseSection"; + +export type SectionConfigOverrides = { + base?: SectionConfig; + global?: Partial; + camera?: Partial; +}; diff --git a/web/src/components/config-form/section-configs/ui.ts b/web/src/components/config-form/section-configs/ui.ts new file mode 100644 index 000000000..2de77d879 --- /dev/null +++ b/web/src/components/config-form/section-configs/ui.ts @@ -0,0 +1,30 @@ +import type { SectionConfigOverrides } from "./types"; + +const ui: SectionConfigOverrides = { + base: { + sectionDocs: "/configuration/reference", + restartRequired: [], + fieldOrder: ["dashboard", "order"], + hiddenFields: [], + advancedFields: [], + overrideFields: [], + }, + global: { + fieldOrder: [ + "timezone", + "time_format", + "date_style", + "time_style", + "unit_system", + ], + advancedFields: [], + restartRequired: ["unit_system"], + uiSchema: { + timezone: { + "ui:widget": "timezoneSelect", + }, + }, + }, +}; + +export default ui; diff --git a/web/src/components/config-form/section-validations/ffmpeg.ts b/web/src/components/config-form/section-validations/ffmpeg.ts new file mode 100644 index 000000000..d751a84db --- /dev/null +++ b/web/src/components/config-form/section-validations/ffmpeg.ts @@ -0,0 +1,84 @@ +import type { FormValidation } from "@rjsf/utils"; +import type { TFunction } from "i18next"; +import { isJsonObject } from "@/lib/utils"; +import type { JsonObject } from "@/types/configForm"; + +function hasValue(value: unknown): boolean { + if (value === null || value === undefined || value === "") { + return false; + } + + if (Array.isArray(value)) { + return value.length > 0; + } + + return true; +} + +export function validateFfmpegInputRoles( + formData: unknown, + errors: FormValidation, + t: TFunction, +): FormValidation { + if (!isJsonObject(formData as JsonObject)) { + return errors; + } + + const inputs = (formData as JsonObject).inputs; + if (!Array.isArray(inputs)) { + return errors; + } + + const roleCounts = new Map(); + let hasDetect = false; + let hasInvalidHwaccel = false; + inputs.forEach((input) => { + if (!isJsonObject(input) || !Array.isArray(input.roles)) { + return; + } + input.roles.forEach((role) => { + if (typeof role !== "string") { + return; + } + roleCounts.set(role, (roleCounts.get(role) || 0) + 1); + }); + if (input.roles.includes("detect")) { + hasDetect = true; + } else if (hasValue(input.hwaccel_args)) { + hasInvalidHwaccel = true; + } + }); + + const hasDuplicates = Array.from(roleCounts.values()).some( + (count) => count > 1, + ); + + if (hasDuplicates) { + const inputsErrors = errors.inputs as { + addError?: (message: string) => void; + }; + inputsErrors?.addError?.( + t("ffmpeg.inputs.rolesUnique", { ns: "config/validation" }), + ); + } + + if (!hasDetect) { + const inputsErrors = errors.inputs as { + addError?: (message: string) => void; + }; + inputsErrors?.addError?.( + t("ffmpeg.inputs.detectRequired", { ns: "config/validation" }), + ); + } + + if (hasInvalidHwaccel) { + const inputsErrors = errors.inputs as { + addError?: (message: string) => void; + }; + inputsErrors?.addError?.( + t("ffmpeg.inputs.hwaccelDetectOnly", { ns: "config/validation" }), + ); + } + + return errors; +} diff --git a/web/src/components/config-form/section-validations/index.ts b/web/src/components/config-form/section-validations/index.ts new file mode 100644 index 000000000..31a02a1d1 --- /dev/null +++ b/web/src/components/config-form/section-validations/index.ts @@ -0,0 +1,31 @@ +import type { FormValidation } from "@rjsf/utils"; +import type { TFunction } from "i18next"; +import { validateFfmpegInputRoles } from "./ffmpeg"; +import { validateProxyRoleHeader } from "./proxy"; + +export type SectionValidation = ( + formData: unknown, + errors: FormValidation, +) => FormValidation; + +type SectionValidationOptions = { + sectionPath: string; + level: "global" | "camera"; + t: TFunction; +}; + +export function getSectionValidation({ + sectionPath, + level, + t, +}: SectionValidationOptions): SectionValidation | undefined { + if (sectionPath === "ffmpeg" && level === "camera") { + return (formData, errors) => validateFfmpegInputRoles(formData, errors, t); + } + + if (sectionPath === "proxy" && level === "global") { + return (formData, errors) => validateProxyRoleHeader(formData, errors, t); + } + + return undefined; +} diff --git a/web/src/components/config-form/section-validations/proxy.ts b/web/src/components/config-form/section-validations/proxy.ts new file mode 100644 index 000000000..340e17222 --- /dev/null +++ b/web/src/components/config-form/section-validations/proxy.ts @@ -0,0 +1,37 @@ +import type { FormValidation } from "@rjsf/utils"; +import type { TFunction } from "i18next"; +import { isJsonObject } from "@/lib/utils"; +import type { JsonObject } from "@/types/configForm"; + +export function validateProxyRoleHeader( + formData: unknown, + errors: FormValidation, + t: TFunction, +): FormValidation { + if (!isJsonObject(formData as JsonObject)) { + return errors; + } + + const headerMap = (formData as JsonObject).header_map; + if (!isJsonObject(headerMap)) { + return errors; + } + + const roleHeader = headerMap.role; + const roleHeaderDefined = + typeof roleHeader === "string" && roleHeader.trim().length > 0; + const roleMap = headerMap.role_map; + const roleMapHasEntries = + isJsonObject(roleMap) && Object.keys(roleMap).length > 0; + + if (roleMapHasEntries && !roleHeaderDefined) { + const headerMapErrors = errors.header_map as { + role?: { addError?: (message: string) => void }; + }; + headerMapErrors?.role?.addError?.( + t("proxy.header_map.roleHeaderRequired", { ns: "config/validation" }), + ); + } + + return errors; +} diff --git a/web/src/components/config-form/sectionConfigs.ts b/web/src/components/config-form/sectionConfigs.ts new file mode 100644 index 000000000..c3a9158a9 --- /dev/null +++ b/web/src/components/config-form/sectionConfigs.ts @@ -0,0 +1,85 @@ +/* + sectionConfigs.ts — section configuration overrides + + Purpose: + - Centralize UI configuration hints for each config section (field ordering, + grouping, hidden/advanced fields, uiSchema overrides, and overrideFields). + + Shape: + - Each section key maps to an object with optional `base`, `global`, and + `camera` entries where each is a `SectionConfig` (or partial): + { + base?: SectionConfig; // common defaults (typically camera-level) + global?: Partial; // overrides for global-level UI + camera?: Partial; // overrides for camera-level UI + } +*/ + +import type { SectionConfigOverrides } from "./section-configs/types"; +import audio from "./section-configs/audio"; +import audioTranscription from "./section-configs/audio_transcription"; +import auth from "./section-configs/auth"; +import birdseye from "./section-configs/birdseye"; +import classification from "./section-configs/classification"; +import database from "./section-configs/database"; +import detect from "./section-configs/detect"; +import detectors from "./section-configs/detectors"; +import environmentVars from "./section-configs/environment_vars"; +import faceRecognition from "./section-configs/face_recognition"; +import ffmpeg from "./section-configs/ffmpeg"; +import genai from "./section-configs/genai"; +import live from "./section-configs/live"; +import logger from "./section-configs/logger"; +import lpr from "./section-configs/lpr"; +import model from "./section-configs/model"; +import motion from "./section-configs/motion"; +import mqtt from "./section-configs/mqtt"; +import networking from "./section-configs/networking"; +import notifications from "./section-configs/notifications"; +import objects from "./section-configs/objects"; +import onvif from "./section-configs/onvif"; +import proxy from "./section-configs/proxy"; +import record from "./section-configs/record"; +import review from "./section-configs/review"; +import semanticSearch from "./section-configs/semantic_search"; +import snapshots from "./section-configs/snapshots"; +import telemetry from "./section-configs/telemetry"; +import timestampStyle from "./section-configs/timestamp_style"; +import tls from "./section-configs/tls"; +import ui from "./section-configs/ui"; + +export const sectionConfigs: Record = { + detect, + record, + snapshots, + motion, + objects, + review, + audio, + live, + timestamp_style: timestampStyle, + notifications, + onvif, + ffmpeg, + audio_transcription: audioTranscription, + birdseye, + face_recognition: faceRecognition, + lpr, + semantic_search: semanticSearch, + mqtt, + ui, + database, + auth, + tls, + networking, + proxy, + logger, + environment_vars: environmentVars, + telemetry, + detectors, + model, + genai, + classification, +}; + +export type { SectionConfigOverrides } from "./section-configs/types"; diff --git a/web/src/components/config-form/sectionExtras/CameraReviewClassification.tsx b/web/src/components/config-form/sectionExtras/CameraReviewClassification.tsx new file mode 100644 index 000000000..ffe9b19b2 --- /dev/null +++ b/web/src/components/config-form/sectionExtras/CameraReviewClassification.tsx @@ -0,0 +1,403 @@ +import { useCallback, useEffect, useMemo, useRef, useState } from "react"; +import { Link } from "react-router-dom"; +import { Trans, useTranslation } from "react-i18next"; +import cloneDeep from "lodash/cloneDeep"; +import get from "lodash/get"; +import isEqual from "lodash/isEqual"; +import set from "lodash/set"; +import { LuExternalLink } from "react-icons/lu"; +import { MdCircle } from "react-icons/md"; +import Heading from "@/components/ui/heading"; +import { Checkbox } from "@/components/ui/checkbox"; +import { Label } from "@/components/ui/label"; +import { cn } from "@/lib/utils"; +import { useDocDomain } from "@/hooks/use-doc-domain"; +import { useCameraFriendlyName } from "@/hooks/use-camera-friendly-name"; +import { resolveZoneName } from "@/hooks/use-zone-friendly-name"; +import { getTranslatedLabel } from "@/utils/i18n"; +import { formatList } from "@/utils/stringUtil"; +import type { ConfigSectionData, JsonObject } from "@/types/configForm"; +import type { SectionRendererProps } from "./registry"; + +const EMPTY_ZONES: string[] = []; + +function getRequiredZones( + formData: JsonObject | undefined, + path: string, +): string[] { + const value = get(formData, path); + return Array.isArray(value) ? (value as string[]) : EMPTY_ZONES; +} + +export default function CameraReviewClassification({ + formContext, + selectedCamera, +}: SectionRendererProps) { + const { t } = useTranslation(["views/settings", "common"]); + const { getLocaleDocUrl } = useDocDomain(); + const cameraName = formContext?.cameraName ?? selectedCamera; + const fullFormData = formContext?.formData as JsonObject | undefined; + const baselineFormData = formContext?.baselineFormData as + | JsonObject + | undefined; + const cameraConfig = formContext?.fullCameraConfig; + + const alertsZones = useMemo( + () => getRequiredZones(fullFormData, "alerts.required_zones"), + [fullFormData], + ); + const detectionsZones = useMemo( + () => getRequiredZones(fullFormData, "detections.required_zones"), + [fullFormData], + ); + + // Track whether zones have been modified from baseline for label coloring + const alertsZonesModified = useMemo(() => { + if (!baselineFormData) return false; + const baseline = getRequiredZones( + baselineFormData, + "alerts.required_zones", + ); + return !isEqual(alertsZones, baseline); + }, [alertsZones, baselineFormData]); + + const detectionsZonesModified = useMemo(() => { + if (!baselineFormData) return false; + const baseline = getRequiredZones( + baselineFormData, + "detections.required_zones", + ); + return !isEqual(detectionsZones, baseline); + }, [detectionsZones, baselineFormData]); + + const [selectDetections, setSelectDetections] = useState( + detectionsZones.length > 0, + ); + const previousCameraRef = useRef(cameraName); + const isSynced = formContext?.hasChanges === false; + + useEffect(() => { + const cameraChanged = previousCameraRef.current !== cameraName; + if (cameraChanged) { + previousCameraRef.current = cameraName; + } + + if (cameraChanged || isSynced) { + setSelectDetections(detectionsZones.length > 0); + } + }, [cameraName, detectionsZones.length, isSynced]); + + const zones = useMemo(() => { + if (!cameraConfig) { + return undefined; + } + return Object.entries(cameraConfig.zones).map(([name, zoneData]) => { + const zone = + zoneData as (typeof cameraConfig.zones)[keyof typeof cameraConfig.zones]; + return { + camera: cameraConfig.name, + name, + friendly_name: cameraConfig.zones[name].friendly_name, + objects: zone.objects, + color: zone.color, + }; + }); + }, [cameraConfig]); + + const alertsLabels = useMemo(() => { + return cameraConfig?.review.alerts.labels + ? formatList( + cameraConfig.review.alerts.labels.map((label: string) => + getTranslatedLabel( + label, + cameraConfig?.audio?.listen?.includes(label) ? "audio" : "object", + ), + ), + ) + : ""; + }, [cameraConfig]); + + const detectionsLabels = useMemo(() => { + return cameraConfig?.review.detections.labels + ? formatList( + cameraConfig.review.detections.labels.map((label: string) => + getTranslatedLabel( + label, + cameraConfig?.audio?.listen?.includes(label) ? "audio" : "object", + ), + ), + ) + : ""; + }, [cameraConfig]); + + const selectCameraName = useCameraFriendlyName(cameraName); + + const getZoneName = useCallback( + (zoneId: string, camId?: string) => + resolveZoneName(formContext?.fullConfig, zoneId, camId), + [formContext?.fullConfig], + ); + + const updateFormData = useCallback( + (path: string, nextValue: string[]) => { + if (!formContext?.onFormDataChange || !fullFormData) { + return; + } + const nextData = cloneDeep(fullFormData) as JsonObject; + set(nextData, path, nextValue); + formContext.onFormDataChange(nextData as ConfigSectionData); + }, + [formContext, fullFormData], + ); + + const handleZoneToggle = useCallback( + (path: string, zoneName: string) => { + const currentZones = getRequiredZones(fullFormData, path); + const nextZones = currentZones.includes(zoneName) + ? currentZones.filter((value) => value !== zoneName) + : [...currentZones, zoneName]; + updateFormData(path, nextZones); + }, + [fullFormData, updateFormData], + ); + + const handleDetectionsToggle = useCallback( + (checked: boolean | string) => { + const isChecked = checked === true; + if (!isChecked) { + updateFormData("detections.required_zones", []); + } + setSelectDetections(isChecked); + }, + [updateFormData], + ); + + if (!cameraName || formContext?.level !== "camera") { + return null; + } + + return ( +
+ + + cameraReview.reviewClassification.title + + + +
+
+

+ + cameraReview.reviewClassification.desc + +

+
+ + {t("readTheDocumentation", { ns: "common" })} + + +
+
+
+ +
0 && "grid items-start gap-5 md:grid-cols-2", + )} + > +
+ {zones && zones.length > 0 ? ( + <> +
+ +
+ + cameraReview.reviewClassification.selectAlertsZones + +
+
+
+ {zones.map((zone) => ( +
+ + handleZoneToggle("alerts.required_zones", zone.name) + } + /> + +
+ ))} +
+ + ) : ( +
+ + cameraReview.reviewClassification.noDefinedZones + +
+ )} + +
+ {alertsZones.length > 0 + ? t("cameraReview.reviewClassification.zoneObjectAlertsTips", { + alertsLabels, + zone: formatList( + alertsZones.map((zone) => getZoneName(zone, cameraName)), + ), + cameraName: selectCameraName, + }) + : t("cameraReview.reviewClassification.objectAlertsTips", { + alertsLabels, + cameraName: selectCameraName, + })} +
+
+ +
+ {zones && zones.length > 0 && ( + <> +
+ + {selectDetections && ( +
+ + cameraReview.reviewClassification.selectDetectionsZones + +
+ )} +
+ + {selectDetections && ( +
+ {zones.map((zone) => ( +
+ + handleZoneToggle( + "detections.required_zones", + zone.name, + ) + } + /> + +
+ ))} +
+ )} + +
+ +
+ +
+
+ + )} + +
+ {detectionsZones.length > 0 ? ( + !selectDetections ? ( + + getZoneName(zone, cameraName), + ), + ), + cameraName: selectCameraName, + }} + ns="views/settings" + /> + ) : ( + + getZoneName(zone, cameraName), + ), + ), + cameraName: selectCameraName, + }} + ns="views/settings" + /> + ) + ) : ( + + )} +
+
+
+
+ ); +} diff --git a/web/src/components/config-form/sectionExtras/CameraReviewStatusToggles.tsx b/web/src/components/config-form/sectionExtras/CameraReviewStatusToggles.tsx new file mode 100644 index 000000000..a6d02253b --- /dev/null +++ b/web/src/components/config-form/sectionExtras/CameraReviewStatusToggles.tsx @@ -0,0 +1,164 @@ +import { useMemo } from "react"; +import useSWR from "swr"; +import { Trans } from "react-i18next"; +import Heading from "@/components/ui/heading"; +import { Separator } from "@/components/ui/separator"; +import { Switch } from "@/components/ui/switch"; +import { Label } from "@/components/ui/label"; +import { FrigateConfig } from "@/types/frigateConfig"; +import { + useAlertsState, + useDetectionsState, + useObjectDescriptionState, + useReviewDescriptionState, +} from "@/api/ws"; +import type { SectionRendererProps } from "./registry"; +import CameraReviewClassification from "./CameraReviewClassification"; + +export default function CameraReviewStatusToggles({ + selectedCamera, + formContext, +}: SectionRendererProps) { + const { data: config } = useSWR("config"); + const cameraId = selectedCamera ?? ""; + + const cameraConfig = useMemo(() => { + if (config && selectedCamera) { + return config.cameras[selectedCamera]; + } + }, [config, selectedCamera]); + + const { payload: alertsState, send: sendAlerts } = useAlertsState(cameraId); + const { payload: detectionsState, send: sendDetections } = + useDetectionsState(cameraId); + + const { payload: objDescState, send: sendObjDesc } = + useObjectDescriptionState(cameraId); + const { payload: revDescState, send: sendRevDesc } = + useReviewDescriptionState(cameraId); + + if (!selectedCamera || !cameraConfig) { + return null; + } + + return ( +
+ + cameraReview.title + + +
+
+ { + sendAlerts(isChecked ? "ON" : "OFF"); + }} + /> +
+ +
+
+
+
+ { + sendDetections(isChecked ? "ON" : "OFF"); + }} + /> +
+ +
+
+
+ cameraReview.review.desc +
+
+
+ + {cameraConfig?.objects?.genai?.enabled_in_config && ( + <> + + + + + cameraReview.object_descriptions.title + + + +
+
+ { + sendObjDesc(isChecked ? "ON" : "OFF"); + }} + /> +
+ +
+
+
+ + cameraReview.object_descriptions.desc + +
+
+ + )} + + {cameraConfig?.review?.genai?.enabled_in_config && ( + <> + + + + + cameraReview.review_descriptions.title + + + +
+
+ { + sendRevDesc(isChecked ? "ON" : "OFF"); + }} + /> +
+ +
+
+
+ + cameraReview.review_descriptions.desc + +
+
+ + )} + + +
+ ); +} diff --git a/web/src/components/config-form/sectionExtras/NotificationsSettingsExtras.tsx b/web/src/components/config-form/sectionExtras/NotificationsSettingsExtras.tsx new file mode 100644 index 000000000..f9fb6addf --- /dev/null +++ b/web/src/components/config-form/sectionExtras/NotificationsSettingsExtras.tsx @@ -0,0 +1,843 @@ +import ActivityIndicator from "@/components/indicators/activity-indicator"; +import { Button } from "@/components/ui/button"; +import { + Form, + FormControl, + FormDescription, + FormField, + FormItem, + FormLabel, + FormMessage, +} from "@/components/ui/form"; +import { Input } from "@/components/ui/input"; +import { Toaster } from "@/components/ui/sonner"; +import { StatusBarMessagesContext } from "@/context/statusbar-provider"; +import { FrigateConfig } from "@/types/frigateConfig"; +import { zodResolver } from "@hookform/resolvers/zod"; +import axios from "axios"; +import { + useCallback, + useContext, + useEffect, + useMemo, + useRef, + useState, +} from "react"; +import { useForm } from "react-hook-form"; +import { LuCheck, LuExternalLink, LuX } from "react-icons/lu"; +import { CiCircleAlert } from "react-icons/ci"; +import { Link } from "react-router-dom"; +import { toast } from "sonner"; +import useSWR from "swr"; +import { z } from "zod"; +import { + useNotifications, + useNotificationSuspend, + useNotificationTest, +} from "@/api/ws"; +import { + Select, + SelectTrigger, + SelectValue, + SelectContent, + SelectItem, +} from "@/components/ui/select"; +import { formatUnixTimestampToDateTime } from "@/utils/dateUtil"; +import FilterSwitch from "@/components/filter/FilterSwitch"; +import { Alert, AlertDescription, AlertTitle } from "@/components/ui/alert"; +import { Trans, useTranslation } from "react-i18next"; +import { useDateLocale } from "@/hooks/use-date-locale"; +import { useDocDomain } from "@/hooks/use-doc-domain"; +import { CameraNameLabel } from "@/components/camera/FriendlyNameLabel"; +import { useIsAdmin } from "@/hooks/use-is-admin"; +import { cn } from "@/lib/utils"; +import cloneDeep from "lodash/cloneDeep"; +import isEqual from "lodash/isEqual"; +import set from "lodash/set"; +import type { ConfigSectionData, JsonObject } from "@/types/configForm"; +import { sanitizeSectionData } from "@/utils/configUtil"; +import type { SectionRendererProps } from "./registry"; + +const NOTIFICATION_SERVICE_WORKER = "/notification-worker.js"; +import { + SettingsGroupCard, + SPLIT_ROW_CLASS_NAME, + CONTROL_COLUMN_CLASS_NAME, +} from "@/components/card/SettingsGroupCard"; + +export default function NotificationsSettingsExtras({ + formContext, +}: SectionRendererProps) { + const { t } = useTranslation([ + "views/settings", + "common", + "components/filter", + ]); + const { getLocaleDocUrl } = useDocDomain(); + + // roles + const isAdmin = useIsAdmin(); + + // status bar + const { addMessage, removeMessage } = useContext(StatusBarMessagesContext)!; + + // config + const { data: config } = useSWR("config", { + revalidateOnFocus: false, + }); + + const allCameras = useMemo(() => { + if (!config) { + return []; + } + + return Object.values(config.cameras) + .sort((aConf, bConf) => aConf.ui.order - bConf.ui.order) + .filter((c) => c.enabled_in_config); + }, [config]); + + const notificationCameras = useMemo(() => { + if (!config) { + return []; + } + + return Object.values(config.cameras) + .filter( + (conf) => + conf.enabled_in_config && + conf.notifications && + conf.notifications.enabled_in_config, + ) + .sort((aConf, bConf) => aConf.ui.order - bConf.ui.order); + }, [config]); + + const { send: sendTestNotification } = useNotificationTest(); + + // notification state + const [registration, setRegistration] = + useState(); + const [cameraSelectionTouched, setCameraSelectionTouched] = useState(false); + + useEffect(() => { + if (!("Notification" in window) || !window.isSecureContext) { + return; + } + navigator.serviceWorker + .getRegistration(NOTIFICATION_SERVICE_WORKER) + .then((worker) => { + if (worker) { + setRegistration(worker); + } else { + setRegistration(null); + } + }) + .catch(() => { + setRegistration(null); + }); + }, []); + + // form + const formSchema = z.object({ + allEnabled: z.boolean(), + email: z.string(), + cameras: z.array(z.string()), + }); + + const pendingDataBySection = useMemo( + () => formContext?.pendingDataBySection ?? {}, + [formContext?.pendingDataBySection], + ); + const pendingCameraOverrides = useMemo(() => { + const overrides: Record = {}; + Object.entries(pendingDataBySection).forEach(([key, data]) => { + if (!key.endsWith("::notifications")) { + return; + } + const cameraName = key.slice(0, key.indexOf("::")); + const enabled = (data as JsonObject | undefined)?.enabled; + if (typeof enabled === "boolean") { + overrides[cameraName] = enabled; + } + }); + return overrides; + }, [pendingDataBySection]); + + const defaultValues = useMemo(() => { + const formData = formContext?.formData as JsonObject | undefined; + const enabledValue = + typeof formData?.enabled === "boolean" + ? formData.enabled + : (config?.notifications.enabled ?? false); + const emailValue = + typeof formData?.email === "string" + ? formData.email + : (config?.notifications.email ?? ""); + const baseEnabledSet = new Set( + notificationCameras.map((camera) => camera.name), + ); + const selectedCameras = enabledValue + ? [] + : allCameras + .filter((camera) => { + const pendingEnabled = pendingCameraOverrides[camera.name]; + if (typeof pendingEnabled === "boolean") { + return pendingEnabled; + } + return baseEnabledSet.has(camera.name); + }) + .map((camera) => camera.name); + + return { + allEnabled: Boolean(enabledValue), + email: typeof emailValue === "string" ? emailValue : "", + cameras: selectedCameras, + }; + }, [ + allCameras, + config?.notifications.email, + config?.notifications.enabled, + formContext?.formData, + notificationCameras, + pendingCameraOverrides, + ]); + + const form = useForm>({ + resolver: zodResolver(formSchema), + mode: "onChange", + defaultValues, + }); + + const watchAllEnabled = form.watch("allEnabled"); + const watchCameras = form.watch("cameras"); + const watchEmail = form.watch("email"); + const pendingCameraOverridesRef = useRef>(new Set()); + + const resetFormState = useCallback( + (values: z.infer) => { + form.reset(values); + setCameraSelectionTouched(false); + pendingCameraOverridesRef.current.clear(); + }, + [form], + ); + + // pending changes sync (Undo All / Save All) + const hasPendingNotifications = useMemo( + () => + Object.keys(pendingDataBySection).some( + (key) => key === "notifications" || key.endsWith("::notifications"), + ), + [pendingDataBySection], + ); + const hasPendingNotificationsRef = useRef(hasPendingNotifications); + + useEffect(() => { + if (!config || form.formState.isDirty || hasPendingNotifications) { + return; + } + resetFormState(defaultValues); + }, [ + config, + defaultValues, + form.formState.isDirty, + hasPendingNotifications, + resetFormState, + ]); + + useEffect(() => { + const hadPending = hasPendingNotificationsRef.current; + hasPendingNotificationsRef.current = hasPendingNotifications; + + if (hadPending && !hasPendingNotifications) { + resetFormState(defaultValues); + } + }, [hasPendingNotifications, defaultValues, resetFormState]); + + useEffect(() => { + if (!formContext?.onFormDataChange) { + return; + } + const baseData = + (formContext.formData as JsonObject | undefined) ?? + (config?.notifications as JsonObject | undefined); + if (!baseData) { + return; + } + const nextData = cloneDeep(baseData); + const normalizedEmail = watchEmail?.trim() ? watchEmail : null; + set(nextData, "enabled", Boolean(watchAllEnabled)); + set(nextData, "email", normalizedEmail); + formContext.onFormDataChange(nextData as ConfigSectionData); + }, [config, formContext, watchAllEnabled, watchEmail]); + + // camera selection overrides + const baselineCameraSelection = useMemo(() => { + if (!config) { + return [] as string[]; + } + return config.notifications.enabled + ? [] + : notificationCameras.map((camera) => camera.name); + }, [config, notificationCameras]); + + const cameraSelectionDirty = useMemo(() => { + const current = Array.isArray(watchCameras) ? watchCameras : []; + return !isEqual([...current].sort(), [...baselineCameraSelection].sort()); + }, [watchCameras, baselineCameraSelection]); + + useEffect(() => { + formContext?.setExtraHasChanges?.(cameraSelectionDirty); + }, [cameraSelectionDirty, formContext]); + + useEffect(() => { + const onPendingDataChange = formContext?.onPendingDataChange; + if (!onPendingDataChange || !config) { + return; + } + + if (!cameraSelectionTouched) { + return; + } + + if (!cameraSelectionDirty) { + pendingCameraOverridesRef.current.forEach((cameraName) => { + onPendingDataChange("notifications", cameraName, null); + }); + pendingCameraOverridesRef.current.clear(); + setCameraSelectionTouched(false); + return; + } + + const selectedCameras = Array.isArray(watchCameras) ? watchCameras : []; + + allCameras.forEach((camera) => { + const desiredEnabled = watchAllEnabled + ? true + : selectedCameras.includes(camera.name); + const currentNotifications = config.cameras[camera.name]?.notifications; + const currentEnabled = currentNotifications?.enabled; + + if (desiredEnabled === currentEnabled) { + if (pendingCameraOverridesRef.current.has(camera.name)) { + onPendingDataChange("notifications", camera.name, null); + pendingCameraOverridesRef.current.delete(camera.name); + } + return; + } + + if (!currentNotifications) { + return; + } + + const nextNotifications = cloneDeep( + currentNotifications as JsonObject, + ) as JsonObject; + set(nextNotifications, "enabled", desiredEnabled); + const sanitizedNotifications = sanitizeSectionData( + nextNotifications as ConfigSectionData, + ["enabled_in_config", "email"], + ); + onPendingDataChange("notifications", camera.name, sanitizedNotifications); + pendingCameraOverridesRef.current.add(camera.name); + }); + }, [ + allCameras, + cameraSelectionDirty, + cameraSelectionTouched, + config, + formContext, + watchAllEnabled, + watchCameras, + ]); + + const anyCameraNotificationsEnabled = useMemo( + () => + config && + Object.values(config.cameras).some( + (c) => + c.enabled_in_config && + c.notifications && + c.notifications.enabled_in_config, + ), + [config], + ); + + const shouldFetchPubKey = Boolean( + config && + (config.notifications?.enabled || anyCameraNotificationsEnabled) && + (watchAllEnabled || + (Array.isArray(watchCameras) && watchCameras.length > 0)), + ); + + const { data: publicKey } = useSWR( + shouldFetchPubKey ? "notifications/pubkey" : null, + { revalidateOnFocus: false }, + ); + + const subscribeToNotifications = useCallback( + (workerRegistration: ServiceWorkerRegistration) => { + if (!workerRegistration) { + return; + } + + addMessage( + "notification_settings", + t("notification.unsavedRegistrations"), + undefined, + "registration", + ); + + workerRegistration.pushManager + .subscribe({ + userVisibleOnly: true, + applicationServerKey: publicKey, + }) + .then((pushSubscription) => { + axios + .post("notifications/register", { + sub: pushSubscription, + }) + .catch(() => { + toast.error(t("notification.toast.error.registerFailed"), { + position: "top-center", + }); + pushSubscription.unsubscribe(); + workerRegistration.unregister(); + setRegistration(null); + }); + toast.success(t("notification.toast.success.registered"), { + position: "top-center", + }); + }); + }, + [addMessage, publicKey, t], + ); + + useEffect(() => { + if (watchCameras.length > 0) { + form.setValue("allEnabled", false); + } + }, [watchCameras, allCameras, form]); + + useEffect(() => { + document.title = t("documentTitle.notifications"); + }, [t]); + + if (formContext?.level && formContext.level !== "global") { + return null; + } + + if (!config) { + return ; + } + + if (!("Notification" in window) || !window.isSecureContext) { + return ( +
+
+ +
+
+

{t("notification.notificationSettings.desc")}

+
+ + {t("readTheDocumentation", { ns: "common" })} + + +
+
+ + + + + {t("notification.notificationUnavailable.title")} + + + + notification.notificationUnavailable.desc + +
+ + {t("readTheDocumentation", { ns: "common" })}{" "} + + +
+
+
+
+
+
+
+ ); + } + + return ( +
+ +
+
+ {isAdmin && ( + +
+ +
+ ( + +
+ + {t("notification.email.title")} + + + {t("notification.email.desc")} + +
+ +
+ + + + + {t("notification.email.desc")} + + +
+
+ )} + /> + + ( + +
+ + {t("notification.cameras.title")} + + + {t("notification.cameras.desc")} + +
+ +
+ {allCameras.length > 0 ? ( +
+ ( + { + setCameraSelectionTouched(true); + if (checked) { + form.setValue("cameras", []); + } + allEnabledField.onChange(checked); + }} + /> + )} + /> + {allCameras.map((camera) => { + const currentCameras = Array.isArray( + field.value, + ) + ? field.value + : []; + return ( + { + setCameraSelectionTouched(true); + const newCameras = checked + ? Array.from( + new Set([ + ...currentCameras, + camera.name, + ]), + ) + : currentCameras.filter( + (value) => value !== camera.name, + ); + field.onChange(newCameras); + form.setValue("allEnabled", false); + }} + /> + ); + })} +
+ ) : ( +
+ {t("notification.cameras.noCameras")} +
+ )} + + {t("notification.cameras.desc")} + + +
+
+ )} + /> +
+ +
+
+ )} + +
+ +
+ + {isAdmin && registration != null && registration.active && ( + + )} +
+
+ + {isAdmin && notificationCameras.length > 0 && ( + +
+
+

{t("notification.globalSettings.desc")}

+
+
+
+ {notificationCameras.map((item) => ( + + ))} +
+
+
+
+ )} +
+
+
+
+ ); +} + +type CameraNotificationSwitchProps = { + config?: FrigateConfig; + camera: string; +}; + +export function CameraNotificationSwitch({ + config, + camera, +}: CameraNotificationSwitchProps) { + const { t } = useTranslation(["views/settings"]); + const { payload: notificationState, send: sendNotification } = + useNotifications(camera); + const { payload: notificationSuspendUntil, send: sendNotificationSuspend } = + useNotificationSuspend(camera); + const [isSuspended, setIsSuspended] = useState(false); + + useEffect(() => { + if (notificationSuspendUntil) { + setIsSuspended( + notificationSuspendUntil !== "0" || notificationState === "OFF", + ); + } + }, [notificationSuspendUntil, notificationState]); + + const handleSuspend = (duration: string) => { + setIsSuspended(true); + if (duration == "off") { + sendNotification("OFF"); + } else { + sendNotificationSuspend(parseInt(duration)); + } + }; + + const handleCancelSuspension = () => { + sendNotification("ON"); + sendNotificationSuspend(0); + }; + + const locale = useDateLocale(); + + const formatSuspendedUntil = (timestamp: string) => { + if (timestamp === "0") return t("time.untilForRestart", { ns: "common" }); + + const time = formatUnixTimestampToDateTime(parseInt(timestamp), { + time_style: "medium", + date_style: "medium", + timezone: config?.ui.timezone, + date_format: + config?.ui.time_format == "24hour" + ? t("time.formattedTimestampMonthDayHourMinute.24hour", { + ns: "common", + }) + : t("time.formattedTimestampMonthDayHourMinute.12hour", { + ns: "common", + }), + locale: locale, + }); + return t("time.untilForTime", { ns: "common", time }); + }; + + return ( +
+
+
+ {!isSuspended ? ( + + ) : ( + + )} +
+ + + {!isSuspended ? ( +
+ {t("notification.active")} +
+ ) : ( +
+ {t("notification.suspended", { + time: formatSuspendedUntil(notificationSuspendUntil), + })} +
+ )} +
+
+
+ + {!isSuspended ? ( + + ) : ( + + )} +
+ ); +} diff --git a/web/src/components/config-form/sectionExtras/ProxyRoleMap.tsx b/web/src/components/config-form/sectionExtras/ProxyRoleMap.tsx new file mode 100644 index 000000000..2846b0500 --- /dev/null +++ b/web/src/components/config-form/sectionExtras/ProxyRoleMap.tsx @@ -0,0 +1,201 @@ +import { useMemo } from "react"; +import type { ComponentType } from "react"; +import { useTranslation } from "react-i18next"; +import cloneDeep from "lodash/cloneDeep"; +import get from "lodash/get"; +import set from "lodash/set"; +import { Button } from "@/components/ui/button"; +import { Label } from "@/components/ui/label"; +import { + Select, + SelectContent, + SelectItem, + SelectTrigger, + SelectValue, +} from "@/components/ui/select"; +import { LuPlus, LuTrash2 } from "react-icons/lu"; +import { TagsWidget } from "@/components/config-form/theme/widgets/TagsWidget"; +import { isJsonObject } from "@/lib/utils"; +import type { ConfigSectionData, JsonObject } from "@/types/configForm"; +import type { SectionRendererProps } from "./registry"; + +const EMPTY_FORM_DATA: JsonObject = {}; +const RoleMapTags = TagsWidget as unknown as ComponentType<{ + id: string; + value: string[]; + onChange: (value: unknown) => void; + schema: { title: string }; +}>; + +export default function ProxyRoleMap({ formContext }: SectionRendererProps) { + const { t } = useTranslation(["views/settings", "config/global"]); + + const fullFormData = + (formContext?.formData as JsonObject | undefined) ?? EMPTY_FORM_DATA; + const onFormDataChange = formContext?.onFormDataChange; + + const roleHeader = get(fullFormData, "header_map.role"); + const hasRoleHeader = + typeof roleHeader === "string" && roleHeader.trim().length > 0; + + const roleMap = useMemo(() => { + const roleMapValue = get(fullFormData, "header_map.role_map"); + return isJsonObject(roleMapValue) + ? (roleMapValue as Record) + : {}; + }, [fullFormData]); + + const roleOptions = useMemo(() => { + const rolesFromConfig = formContext?.fullConfig?.auth?.roles + ? Object.keys(formContext.fullConfig.auth.roles) + : []; + const roles = + rolesFromConfig.length > 0 ? rolesFromConfig : ["admin", "viewer"]; + + return Array.from(new Set([...roles, ...Object.keys(roleMap)])).sort(); + }, [formContext?.fullConfig, roleMap]); + + if (!onFormDataChange || !formContext?.formData) { + return null; + } + + if (!hasRoleHeader) { + return null; + } + + const usedRoles = new Set(Object.keys(roleMap)); + const nextRole = roleOptions.find((role) => !usedRoles.has(role)); + + const updateRoleMap = (nextRoleMap: Record) => { + const nextFormData = cloneDeep(fullFormData) as JsonObject; + set(nextFormData, "header_map.role_map", nextRoleMap); + onFormDataChange(nextFormData as ConfigSectionData); + }; + + const handleAdd = () => { + if (!nextRole) return; + updateRoleMap({ + ...roleMap, + [nextRole]: [], + }); + }; + + const handleRemove = (role: string) => { + const next = { ...roleMap }; + delete next[role]; + updateRoleMap(next); + }; + + const handleRoleChange = (currentRole: string, newRole: string) => { + if (currentRole === newRole) return; + const next = { ...roleMap } as Record; + const groups = next[currentRole] ?? []; + delete next[currentRole]; + next[newRole] = groups; + updateRoleMap(next); + }; + + const handleGroupsChange = (role: string, groups: unknown) => { + updateRoleMap({ + ...roleMap, + [role]: Array.isArray(groups) ? groups : [], + }); + }; + + const roleMapLabel = t("proxy.header_map.role_map.label", { + ns: "config/global", + }); + const roleMapDescription = t("proxy.header_map.role_map.description", { + ns: "config/global", + }); + + return ( +
+
+ +

{roleMapDescription}

+
+ + {Object.keys(roleMap).length === 0 && ( +

+ {t("configForm.roleMap.empty", { ns: "views/settings" })} +

+ )} + + {Object.entries(roleMap).map(([role, groups], index) => { + const rowId = `role-map-${role}-${index}`; + const roleLabel = t("configForm.roleMap.roleLabel", { + ns: "views/settings", + }); + const groupsLabel = t("configForm.roleMap.groupsLabel", { + ns: "views/settings", + }); + const normalizedGroups = Array.isArray(groups) ? groups : []; + + return ( +
+
+ + +
+ +
+ + handleGroupsChange(role, next)} + schema={{ title: groupsLabel }} + /> +
+ +
+ +
+
+ ); + })} + + +
+ ); +} diff --git a/web/src/components/config-form/sectionExtras/SemanticSearchReindex.tsx b/web/src/components/config-form/sectionExtras/SemanticSearchReindex.tsx new file mode 100644 index 000000000..f44bcd8e1 --- /dev/null +++ b/web/src/components/config-form/sectionExtras/SemanticSearchReindex.tsx @@ -0,0 +1,106 @@ +import { useState } from "react"; +import axios from "axios"; +import { Button, buttonVariants } from "@/components/ui/button"; +import { Trans, useTranslation } from "react-i18next"; +import { toast } from "sonner"; +import { + AlertDialog, + AlertDialogAction, + AlertDialogCancel, + AlertDialogContent, + AlertDialogDescription, + AlertDialogFooter, + AlertDialogHeader, + AlertDialogTitle, +} from "@/components/ui/alert-dialog"; + +export default function SemanticSearchReindex() { + const { t } = useTranslation("views/settings"); + const [isLoading, setIsLoading] = useState(false); + const [isDialogOpen, setIsDialogOpen] = useState(false); + + const onReindex = async () => { + setIsLoading(true); + try { + const res = await axios.put("/reindex"); + if (res.status === 202) { + toast.success(t("enrichments.semanticSearch.reindexNow.success"), { + position: "top-center", + }); + } else { + toast.error( + t("enrichments.semanticSearch.reindexNow.error", { + errorMessage: res.statusText, + }), + { position: "top-center" }, + ); + } + } catch (caught) { + const error = caught as { + response?: { data?: { message?: string; detail?: string } }; + }; + const errorMessage = + error.response?.data?.message || error.response?.data?.detail || ""; + toast.error( + t("enrichments.semanticSearch.reindexNow.error", { + errorMessage: errorMessage || undefined, + }), + { position: "top-center" }, + ); + } finally { + setIsLoading(false); + } + }; + + return ( + <> +
+
+ +
+
+ + enrichments.semanticSearch.reindexNow.desc + +
+ + + + + + {t("enrichments.semanticSearch.reindexNow.confirmTitle")} + + + + enrichments.semanticSearch.reindexNow.confirmDesc + + + + + setIsDialogOpen(false)}> + {t("button.cancel", { ns: "common" })} + + { + await onReindex(); + setIsDialogOpen(false); + }} + > + {t("enrichments.semanticSearch.reindexNow.confirmButton")} + + + + +
+ + ); +} diff --git a/web/src/components/config-form/sectionExtras/registry.ts b/web/src/components/config-form/sectionExtras/registry.ts new file mode 100644 index 000000000..08e3dd86a --- /dev/null +++ b/web/src/components/config-form/sectionExtras/registry.ts @@ -0,0 +1,57 @@ +import type { ComponentType } from "react"; +import SemanticSearchReindex from "./SemanticSearchReindex.tsx"; +import CameraReviewStatusToggles from "./CameraReviewStatusToggles"; +import ProxyRoleMap from "./ProxyRoleMap"; +import NotificationsSettingsExtras from "./NotificationsSettingsExtras"; +import type { ConfigFormContext } from "@/types/configForm"; + +// Props that will be injected into all section renderers +export type SectionRendererProps = { + selectedCamera?: string; + setUnsavedChanges?: (hasChanges: boolean) => void; + formContext?: ConfigFormContext; + [key: string]: unknown; // Allow additional props from uiSchema +}; + +export type RendererComponent = ComponentType; + +export type SectionRenderers = Record< + string, + Record +>; + +// Section renderers registry +// Used to register custom renderer components for specific config sections. +// Maps a section key (e.g., `semantic_search`) to a mapping of renderer +// names to React components. These names are referenced from `uiSchema` +// descriptors (e.g., `{ "ui:after": { render: "SemanticSearchReindex" } }`) and +// are resolved by `FieldTemplate` through `formContext.renderers`. +// +// RUNTIME PROPS INJECTION: +// All renderers automatically receive the following props from BaseSection: +// - selectedCamera?: string - The current camera name (camera-level only) +// - setUnsavedChanges?: (hasChanges: boolean) => void - Callback to signal unsaved state +// +// Additional static props can be passed via uiSchema: +// { "ui:after": { render: "MyRenderer", props: { customProp: "value" } } } +// +// ADDING NEW RENDERERS: +// 1. Create your component accepting SectionRendererProps +// 2. Import and add it to the appropriate section in this registry +// 3. Reference it in your section's uiSchema using the { render: "ComponentName" } syntax +export const sectionRenderers: SectionRenderers = { + semantic_search: { + SemanticSearchReindex, + }, + review: { + CameraReviewStatusToggles, + }, + proxy: { + ProxyRoleMap, + }, + notifications: { + NotificationsSettingsExtras, + }, +}; + +export default sectionRenderers; diff --git a/web/src/components/config-form/sections/BaseSection.tsx b/web/src/components/config-form/sections/BaseSection.tsx new file mode 100644 index 000000000..606919dcc --- /dev/null +++ b/web/src/components/config-form/sections/BaseSection.tsx @@ -0,0 +1,1009 @@ +// Base Section Component for config form sections +// Used as a foundation for reusable section components + +import { + useMemo, + useCallback, + useState, + useEffect, + useRef, + useContext, +} from "react"; +import useSWR from "swr"; +import axios from "axios"; +import { toast } from "sonner"; +import { useTranslation } from "react-i18next"; +import sectionRenderers, { + RendererComponent, +} from "@/components/config-form/sectionExtras/registry"; +import { ConfigForm } from "../ConfigForm"; +import type { FormValidation, UiSchema } from "@rjsf/utils"; +import { + modifySchemaForSection, + getEffectiveDefaultsForSection, + sanitizeOverridesForSection, +} from "./section-special-cases"; +import { getSectionValidation } from "../section-validations"; +import { useConfigOverride } from "@/hooks/use-config-override"; +import { useSectionSchema } from "@/hooks/use-config-schema"; +import type { FrigateConfig } from "@/types/frigateConfig"; +import { Badge } from "@/components/ui/badge"; +import { Button } from "@/components/ui/button"; +import { LuChevronDown, LuChevronRight } from "react-icons/lu"; +import Heading from "@/components/ui/heading"; +import get from "lodash/get"; +import cloneDeep from "lodash/cloneDeep"; +import isEqual from "lodash/isEqual"; +import { + Collapsible, + CollapsibleContent, + CollapsibleTrigger, +} from "@/components/ui/collapsible"; +import { + AlertDialog, + AlertDialogAction, + AlertDialogCancel, + AlertDialogContent, + AlertDialogDescription, + AlertDialogFooter, + AlertDialogHeader, + AlertDialogTitle, +} from "@/components/ui/alert-dialog"; +import { applySchemaDefaults } from "@/lib/config-schema"; +import { cn } from "@/lib/utils"; +import { ConfigSectionData, JsonValue } from "@/types/configForm"; +import ActivityIndicator from "@/components/indicators/activity-indicator"; +import { StatusBarMessagesContext } from "@/context/statusbar-provider"; +import { + cameraUpdateTopicMap, + buildOverrides, + buildConfigDataForPath, + sanitizeSectionData as sharedSanitizeSectionData, + requiresRestartForOverrides as sharedRequiresRestartForOverrides, +} from "@/utils/configUtil"; +import RestartDialog from "@/components/overlay/dialog/RestartDialog"; +import { useRestart } from "@/api/ws"; + +export interface SectionConfig { + /** Field ordering within the section */ + fieldOrder?: string[]; + /** Fields to group together */ + fieldGroups?: Record; + /** Fields to hide from UI */ + hiddenFields?: string[]; + /** Fields to show in advanced section */ + advancedFields?: string[]; + /** Fields to compare for override detection */ + overrideFields?: string[]; + /** Documentation link for the section */ + sectionDocs?: string; + /** Per-field documentation links */ + fieldDocs?: Record; + /** Fields that require restart when modified (empty means none; undefined uses default) */ + restartRequired?: string[]; + /** Whether to enable live validation */ + liveValidate?: boolean; + /** Additional uiSchema overrides */ + uiSchema?: UiSchema; + /** Optional per-section renderers usable by FieldTemplate `ui:before`/`ui:after` */ + renderers?: Record; + /** Optional custom validation for section data */ + customValidate?: ( + formData: unknown, + errors: FormValidation, + ) => FormValidation; +} + +export interface BaseSectionProps { + /** Whether this is at global or camera level */ + level: "global" | "camera"; + /** Camera name (required if level is "camera") */ + cameraName?: string; + /** Whether to show override indicator badge */ + showOverrideIndicator?: boolean; + /** Custom section configuration */ + sectionConfig?: SectionConfig; + /** Whether the section is disabled */ + disabled?: boolean; + /** Whether the section is read-only */ + readonly?: boolean; + /** Callback when settings are saved */ + onSave?: () => void; + /** Whether a restart is required after changes */ + requiresRestart?: boolean; + /** Whether section is collapsible */ + collapsible?: boolean; + /** Default collapsed state */ + defaultCollapsed?: boolean; + /** Whether to show the section title (default: false for global, true for camera) */ + showTitle?: boolean; + /** Callback when section status changes */ + onStatusChange?: (status: { + hasChanges: boolean; + isOverridden: boolean; + hasValidationErrors: boolean; + }) => void; + /** Pending form data keyed by "sectionKey" or "cameraName::sectionKey" */ + pendingDataBySection?: Record; + /** Callback to update pending data for a section */ + onPendingDataChange?: ( + sectionKey: string, + cameraName: string | undefined, + data: ConfigSectionData | null, + ) => void; +} + +export interface CreateSectionOptions { + /** The config path for this section (e.g., "detect", "record") */ + sectionPath: string; + /** Default section configuration */ + defaultConfig: SectionConfig; +} + +export type ConfigSectionProps = BaseSectionProps & CreateSectionOptions; + +export function ConfigSection({ + sectionPath, + defaultConfig, + level, + cameraName, + showOverrideIndicator = true, + sectionConfig = defaultConfig, + disabled = false, + readonly = false, + onSave, + requiresRestart = true, + collapsible = false, + defaultCollapsed = true, + showTitle, + onStatusChange, + pendingDataBySection, + onPendingDataChange, +}: ConfigSectionProps) { + const { t, i18n } = useTranslation([ + level === "camera" ? "config/cameras" : "config/global", + "config/cameras", + "views/settings", + "common", + "components/dialog", + ]); + const [isOpen, setIsOpen] = useState(!defaultCollapsed); + const { send: sendRestart } = useRestart(); + const statusBar = useContext(StatusBarMessagesContext); + + // Create a key for this section's pending data + const pendingDataKey = useMemo( + () => + level === "camera" && cameraName + ? `${cameraName}::${sectionPath}` + : sectionPath, + [level, cameraName, sectionPath], + ); + + // Use pending data from parent if available, otherwise use local state + const [localPendingData, setLocalPendingData] = + useState(null); + const [pendingOverrides, setPendingOverrides] = useState< + JsonValue | undefined + >(undefined); + const [dirtyOverrides, setDirtyOverrides] = useState( + undefined, + ); + const baselineByKeyRef = useRef>({}); + + const pendingData = + pendingDataBySection !== undefined + ? (pendingDataBySection[pendingDataKey] as ConfigSectionData | null) + : localPendingData; + const pendingDataRef = useRef(null); + + useEffect(() => { + pendingDataRef.current = pendingData; + }, [pendingData]); + + const setPendingData = useCallback( + (data: ConfigSectionData | null) => { + if (onPendingDataChange) { + onPendingDataChange(sectionPath, cameraName, data); + } else { + setLocalPendingData(data); + } + }, + [onPendingDataChange, sectionPath, cameraName], + ); + const [isSaving, setIsSaving] = useState(false); + const [hasValidationErrors, setHasValidationErrors] = useState(false); + const [extraHasChanges, setExtraHasChanges] = useState(false); + const [formKey, setFormKey] = useState(0); + const [isResetDialogOpen, setIsResetDialogOpen] = useState(false); + const [restartDialogOpen, setRestartDialogOpen] = useState(false); + const isResettingRef = useRef(false); + const isInitializingRef = useRef(true); + const lastPendingDataKeyRef = useRef(null); + + const updateTopic = + level === "camera" && cameraName + ? cameraUpdateTopicMap[sectionPath] + ? `config/cameras/${cameraName}/${cameraUpdateTopicMap[sectionPath]}` + : undefined + : `config/${sectionPath}`; + // Default: show title for camera level (since it might be collapsible), hide for global + const shouldShowTitle = showTitle ?? level === "camera"; + + // Fetch config + const { data: config, mutate: refreshConfig } = + useSWR("config"); + + // Get section schema using cached hook + const sectionSchema = useSectionSchema(sectionPath, level); + + // Apply special case handling for sections with problematic schema defaults + const modifiedSchema = useMemo( + () => + modifySchemaForSection(sectionPath, level, sectionSchema ?? undefined), + [sectionPath, level, sectionSchema], + ); + + // Get override status + const { isOverridden, globalValue, cameraValue } = useConfigOverride({ + config, + cameraName: level === "camera" ? cameraName : undefined, + sectionPath, + compareFields: sectionConfig.overrideFields, + }); + + // Get current form data + const rawSectionValue = useMemo(() => { + if (!config) return undefined; + + if (level === "camera" && cameraName) { + return get(config.cameras?.[cameraName], sectionPath); + } + + return get(config, sectionPath); + }, [config, level, cameraName, sectionPath]); + + const rawFormData = useMemo(() => { + if (!config) return {}; + + if (rawSectionValue === undefined || rawSectionValue === null) { + return {}; + } + + return rawSectionValue; + }, [config, rawSectionValue]); + + const sanitizeSectionData = useCallback( + (data: ConfigSectionData) => + sharedSanitizeSectionData(data, sectionConfig.hiddenFields), + [sectionConfig.hiddenFields], + ); + + const formData = useMemo(() => { + const baseData = modifiedSchema + ? applySchemaDefaults(modifiedSchema, rawFormData) + : rawFormData; + return sanitizeSectionData(baseData); + }, [rawFormData, modifiedSchema, sanitizeSectionData]); + + const baselineSnapshot = useMemo(() => { + if (!pendingData) { + const snapshot = cloneDeep(formData as ConfigSectionData); + baselineByKeyRef.current[pendingDataKey] = snapshot; + return snapshot; + } + + const cached = baselineByKeyRef.current[pendingDataKey]; + if (cached) { + return cached; + } + + const snapshot = cloneDeep(formData as ConfigSectionData); + baselineByKeyRef.current[pendingDataKey] = snapshot; + return snapshot; + }, [formData, pendingData, pendingDataKey]); + + const schemaDefaults = useMemo(() => { + if (!modifiedSchema) { + return {}; + } + return applySchemaDefaults(modifiedSchema, {}); + }, [modifiedSchema]); + + // Get effective defaults, handling special cases where schema defaults + // don't match semantic intent + const effectiveSchemaDefaults = useMemo( + () => + getEffectiveDefaultsForSection( + sectionPath, + level, + modifiedSchema, + schemaDefaults, + ), + [level, schemaDefaults, sectionPath, modifiedSchema], + ); + + const compareBaseData = useMemo( + () => sanitizeSectionData(rawFormData as ConfigSectionData), + [rawFormData, sanitizeSectionData], + ); + + // Clear pendingData whenever formData changes (e.g., from server refresh) + // This prevents RJSF's initial onChange call from being treated as a user edit + // Only clear if pendingData is managed locally (not by parent) + useEffect(() => { + const pendingKeyChanged = lastPendingDataKeyRef.current !== pendingDataKey; + + if (pendingKeyChanged) { + lastPendingDataKeyRef.current = pendingDataKey; + isInitializingRef.current = true; + setPendingOverrides(undefined); + setDirtyOverrides(undefined); + } else if (!pendingData) { + isInitializingRef.current = true; + setPendingOverrides(undefined); + setDirtyOverrides(undefined); + } + + if (onPendingDataChange === undefined) { + setPendingData(null); + } + }, [ + onPendingDataChange, + pendingData, + pendingDataKey, + setPendingData, + setDirtyOverrides, + setPendingOverrides, + ]); + + useEffect(() => { + if (isResettingRef.current) { + isResettingRef.current = false; + } + }, [formKey]); + + // Track if there are unsaved changes + const hasChanges = useMemo(() => { + const pendingChanged = pendingData + ? !isEqual(formData, pendingData) + : false; + return pendingChanged || extraHasChanges; + }, [formData, pendingData, extraHasChanges]); + + useEffect(() => { + onStatusChange?.({ hasChanges, isOverridden, hasValidationErrors }); + }, [hasChanges, isOverridden, hasValidationErrors, onStatusChange]); + + // Handle form data change + const handleChange = useCallback( + (data: unknown) => { + if (isResettingRef.current) { + setPendingData(null); + setPendingOverrides(undefined); + return; + } + if (!data || typeof data !== "object") { + setPendingData(null); + setPendingOverrides(undefined); + return; + } + const sanitizedData = sanitizeSectionData(data as ConfigSectionData); + const nextBaselineFormData = baselineSnapshot; + const overrides = buildOverrides( + sanitizedData, + compareBaseData, + effectiveSchemaDefaults, + ); + setPendingOverrides(overrides as JsonValue | undefined); + if (isInitializingRef.current && !pendingData) { + isInitializingRef.current = false; + if (overrides === undefined) { + setPendingData(null); + setPendingOverrides(undefined); + setDirtyOverrides(undefined); + return; + } + } + const dirty = buildOverrides( + sanitizedData, + nextBaselineFormData, + undefined, + ); + setDirtyOverrides(dirty as JsonValue | undefined); + if (overrides === undefined) { + setPendingData(null); + setPendingOverrides(undefined); + setDirtyOverrides(undefined); + return; + } + setPendingData(sanitizedData); + }, + [ + pendingData, + compareBaseData, + sanitizeSectionData, + effectiveSchemaDefaults, + setPendingData, + setPendingOverrides, + setDirtyOverrides, + baselineSnapshot, + ], + ); + + const currentFormData = pendingData || formData; + const effectiveBaselineFormData = baselineSnapshot; + + const currentOverrides = useMemo(() => { + if (!currentFormData || typeof currentFormData !== "object") { + return undefined; + } + const sanitizedData = sanitizeSectionData( + currentFormData as ConfigSectionData, + ); + return buildOverrides( + sanitizedData, + compareBaseData, + effectiveSchemaDefaults, + ); + }, [ + currentFormData, + sanitizeSectionData, + compareBaseData, + effectiveSchemaDefaults, + ]); + + const effectiveOverrides = pendingData + ? (pendingOverrides ?? currentOverrides) + : undefined; + const uiOverrides = dirtyOverrides ?? effectiveOverrides; + + const requiresRestartForOverrides = useCallback( + (overrides: unknown) => + sharedRequiresRestartForOverrides( + overrides, + sectionConfig.restartRequired, + requiresRestart, + ), + [requiresRestart, sectionConfig.restartRequired], + ); + + const handleReset = useCallback(() => { + isResettingRef.current = true; + setPendingData(null); + setPendingOverrides(undefined); + setDirtyOverrides(undefined); + setExtraHasChanges(false); + setFormKey((prev) => prev + 1); + }, [setPendingData, setPendingOverrides, setDirtyOverrides]); + + // Handle save button click + const handleSave = useCallback(async () => { + if (!pendingData) return; + + setIsSaving(true); + try { + const basePath = + level === "camera" && cameraName + ? `cameras.${cameraName}.${sectionPath}` + : sectionPath; + const rawData = sanitizeSectionData(rawFormData); + const overrides = buildOverrides( + pendingData, + rawData, + effectiveSchemaDefaults, + ); + const sanitizedOverrides = sanitizeOverridesForSection( + sectionPath, + level, + overrides, + ); + + if ( + !sanitizedOverrides || + typeof sanitizedOverrides !== "object" || + Object.keys(sanitizedOverrides).length === 0 + ) { + setPendingData(null); + return; + } + + const needsRestart = requiresRestartForOverrides(sanitizedOverrides); + + const configData = buildConfigDataForPath(basePath, sanitizedOverrides); + await axios.put("config/set", { + requires_restart: needsRestart ? 1 : 0, + update_topic: updateTopic, + config_data: configData, + }); + + if (needsRestart) { + statusBar?.addMessage( + "config_restart_required", + t("configForm.restartRequiredFooter", { + ns: "views/settings", + defaultValue: "Configuration changed - Restart required", + }), + undefined, + "config_restart_required", + ); + toast.success( + t("toast.successRestartRequired", { + ns: "views/settings", + defaultValue: + "Settings saved successfully. Restart Frigate to apply your changes.", + }), + { + action: ( + setRestartDialogOpen(true)}> + + + ), + }, + ); + } else { + toast.success( + t("toast.success", { + ns: "views/settings", + defaultValue: "Settings saved successfully", + }), + ); + } + + setPendingData(null); + refreshConfig(); + onSave?.(); + } catch (error) { + // Parse Pydantic validation errors from API response + if (axios.isAxiosError(error) && error.response?.data) { + const responseData = error.response.data; + if (responseData.detail && Array.isArray(responseData.detail)) { + const validationMessages = responseData.detail + .map((err: { loc?: string[]; msg?: string }) => { + const field = err.loc?.slice(1).join(".") || "unknown"; + return `${field}: ${err.msg || "Invalid value"}`; + }) + .join(", "); + toast.error( + t("toast.validationError", { + ns: "views/settings", + defaultValue: `Validation failed: ${validationMessages}`, + }), + ); + } else if (responseData.message) { + toast.error(responseData.message); + } else { + toast.error( + t("toast.error", { + ns: "views/settings", + defaultValue: "Failed to save settings", + }), + ); + } + } else { + toast.error( + t("toast.error", { + ns: "views/settings", + defaultValue: "Failed to save settings", + }), + ); + } + } finally { + setIsSaving(false); + } + }, [ + sectionPath, + pendingData, + level, + cameraName, + t, + refreshConfig, + statusBar, + onSave, + rawFormData, + sanitizeSectionData, + effectiveSchemaDefaults, + updateTopic, + setPendingData, + requiresRestartForOverrides, + ]); + + // Handle reset to global/defaults - removes camera-level override or resets global to defaults + const handleResetToGlobal = useCallback(async () => { + if (level === "camera" && !cameraName) return; + + try { + const basePath = + level === "camera" && cameraName + ? `cameras.${cameraName}.${sectionPath}` + : sectionPath; + + const configData = buildConfigDataForPath(basePath, ""); + + await axios.put("config/set", { + requires_restart: requiresRestart ? 1 : 0, + update_topic: updateTopic, + config_data: configData, + }); + + toast.success( + t("toast.resetSuccess", { + ns: "views/settings", + defaultValue: + level === "global" + ? "Reset to defaults" + : "Reset to global defaults", + }), + ); + + setPendingData(null); + setExtraHasChanges(false); + refreshConfig(); + } catch { + toast.error( + t("toast.resetError", { + ns: "views/settings", + defaultValue: "Failed to reset settings", + }), + ); + } + }, [ + sectionPath, + level, + cameraName, + requiresRestart, + t, + refreshConfig, + updateTopic, + setPendingData, + ]); + + const sectionValidation = useMemo( + () => getSectionValidation({ sectionPath, level, t }), + [sectionPath, level, t], + ); + + const customValidate = useMemo(() => { + const validators: Array< + (formData: unknown, errors: FormValidation) => FormValidation + > = []; + + if (sectionConfig.customValidate) { + validators.push(sectionConfig.customValidate); + } + + if (sectionValidation) { + validators.push(sectionValidation); + } + + if (validators.length === 0) { + return undefined; + } + + return (formData: unknown, errors: FormValidation) => + validators.reduce( + (currentErrors, validatorFn) => validatorFn(formData, currentErrors), + errors, + ); + }, [sectionConfig.customValidate, sectionValidation]); + + // Wrap renderers with runtime props (selectedCamera, setUnsavedChanges, etc.) + const wrappedRenderers = useMemo(() => { + const baseRenderers = + sectionConfig?.renderers ?? sectionRenderers?.[sectionPath]; + if (!baseRenderers) return undefined; + + // Create wrapper that injects runtime props + return Object.fromEntries( + Object.entries(baseRenderers).map(([key, RendererComponent]) => [ + key, + (staticProps: Record = {}) => ( + { + // Translate setUnsavedChanges to pending data state + const currentPending = pendingDataRef.current; + if (hasChanges && !currentPending) { + // Component signaled changes but we don't have pending data yet + // This can happen when the component manages its own state + } else if (!hasChanges && currentPending) { + // Component signaled no changes, clear pending + setPendingData(null); + } + }} + /> + ), + ]), + ); + }, [sectionConfig?.renderers, sectionPath, cameraName, setPendingData]); + + if (!modifiedSchema) { + return null; + } + + // Get section title from config namespace + const defaultTitle = + sectionPath.charAt(0).toUpperCase() + + sectionPath.slice(1).replace(/_/g, " "); + + // For camera-level sections, keys live under `config/cameras` and are + // nested under the section name (e.g., `audio.label`). For global-level + // sections, keys are nested under the section name in `config/global`. + const configNamespace = + level === "camera" ? "config/cameras" : "config/global"; + const title = t(`${sectionPath}.label`, { + ns: configNamespace, + defaultValue: defaultTitle, + }); + + const sectionDescription = i18n.exists(`${sectionPath}.description`, { + ns: configNamespace, + }) + ? t(`${sectionPath}.description`, { ns: configNamespace }) + : undefined; + + if (!sectionSchema || !config) { + return ; + } + + const sectionContent = ( +
+ handleChange(data), + // For widgets that need access to full camera config (e.g., zone names) + fullCameraConfig: + level === "camera" && cameraName + ? config?.cameras?.[cameraName] + : undefined, + fullConfig: config, + // When rendering camera-level sections, provide the section path so + // field templates can look up keys under the `config/cameras` namespace + // When using a consolidated global namespace, keys are nested + // under the section name (e.g., `audio.label`) so provide the + // section prefix to templates so they can attempt `${section}.${field}` lookups. + sectionI18nPrefix: sectionPath, + t, + renderers: wrappedRenderers, + sectionDocs: sectionConfig.sectionDocs, + fieldDocs: sectionConfig.fieldDocs, + hiddenFields: sectionConfig.hiddenFields, + restartRequired: sectionConfig.restartRequired, + requiresRestart, + }} + /> + +
+
+ {hasChanges && ( +
+ + {t("unsavedChanges", { + ns: "views/settings", + defaultValue: "You have unsaved changes", + })} + +
+ )} +
+ {((level === "camera" && isOverridden) || level === "global") && + !hasChanges && ( + + )} + {hasChanges && ( + + )} + +
+
+
+ + + + + + {t("confirmReset", { ns: "views/settings" })} + + + {level === "global" + ? t("resetToDefaultDescription", { ns: "views/settings" }) + : t("resetToGlobalDescription", { ns: "views/settings" })} + + + + + {t("button.cancel", { ns: "common" })} + + { + await handleResetToGlobal(); + setIsResetDialogOpen(false); + }} + > + {level === "global" + ? t("button.resetToDefault", { ns: "common" }) + : t("button.resetToGlobal", { ns: "common" })} + + + + +
+ ); + + if (collapsible) { + return ( + <> + +
+ +
+
+ {isOpen ? ( + + ) : ( + + )} + {title} + {showOverrideIndicator && + level === "camera" && + isOverridden && ( + + {t("button.overridden", { + ns: "common", + defaultValue: "Overridden", + })} + + )} + {hasChanges && ( + + {t("modified", { + ns: "common", + defaultValue: "Modified", + })} + + )} +
+
+
+ + +
{sectionContent}
+
+
+
+ setRestartDialogOpen(false)} + onRestart={() => sendRestart("restart")} + /> + + ); + } + + return ( + <> +
+ {shouldShowTitle && ( +
+
+
+ {title} + {showOverrideIndicator && + level === "camera" && + isOverridden && ( + + {t("button.overridden", { + ns: "common", + defaultValue: "Overridden", + })} + + )} + {hasChanges && ( + + {t("modified", { ns: "common", defaultValue: "Modified" })} + + )} +
+ {sectionDescription && ( +

+ {sectionDescription} +

+ )} +
+
+ )} + + {sectionContent} +
+ setRestartDialogOpen(false)} + onRestart={() => sendRestart("restart")} + /> + + ); +} diff --git a/web/src/components/config-form/sections/ConfigSectionTemplate.tsx b/web/src/components/config-form/sections/ConfigSectionTemplate.tsx new file mode 100644 index 000000000..c3b4c1bd1 --- /dev/null +++ b/web/src/components/config-form/sections/ConfigSectionTemplate.tsx @@ -0,0 +1,33 @@ +import { useMemo } from "react"; +import { ConfigSection } from "./BaseSection"; +import type { BaseSectionProps, SectionConfig } from "./BaseSection"; +import { getSectionConfig } from "@/utils/configUtil"; + +export type ConfigSectionTemplateProps = BaseSectionProps & { + sectionKey: string; + sectionConfig?: SectionConfig; +}; + +export function ConfigSectionTemplate({ + sectionKey, + level, + sectionConfig, + ...rest +}: ConfigSectionTemplateProps) { + const defaultConfig = useMemo( + () => getSectionConfig(sectionKey, level), + [sectionKey, level], + ); + + return ( + + ); +} + +export default ConfigSectionTemplate; diff --git a/web/src/components/config-form/sections/index.ts b/web/src/components/config-form/sections/index.ts new file mode 100644 index 000000000..6a59c0463 --- /dev/null +++ b/web/src/components/config-form/sections/index.ts @@ -0,0 +1,14 @@ +// Config Form Section Components +// Reusable components for both global and camera-level settings + +export { + ConfigSection, + type BaseSectionProps, + type SectionConfig, + type CreateSectionOptions, + type ConfigSectionProps, +} from "./BaseSection"; +export { + ConfigSectionTemplate, + type ConfigSectionTemplateProps, +} from "./ConfigSectionTemplate"; diff --git a/web/src/components/config-form/sections/section-special-cases.ts b/web/src/components/config-form/sections/section-special-cases.ts new file mode 100644 index 000000000..94771644f --- /dev/null +++ b/web/src/components/config-form/sections/section-special-cases.ts @@ -0,0 +1,203 @@ +/** + * Special case handling for config sections with schema/default issues. + * + * Some sections have schema patterns that cause false "Modified" indicators + * when navigating to them due to how defaults are applied. This utility + * centralizes the logic for detecting and handling these cases. + */ + +import { RJSFSchema } from "@rjsf/utils"; +import { applySchemaDefaults } from "@/lib/config-schema"; +import { isJsonObject } from "@/lib/utils"; +import { JsonObject, JsonValue } from "@/types/configForm"; + +/** + * Sections that require special handling at the global level. + * Add new section paths here as needed. + */ +const SPECIAL_CASE_SECTIONS = ["motion", "detectors"] as const; + +/** + * Check if a section requires special case handling. + */ +export function isSpecialCaseSection( + sectionPath: string, + level: string, +): boolean { + return ( + level === "global" && + SPECIAL_CASE_SECTIONS.includes( + sectionPath as (typeof SPECIAL_CASE_SECTIONS)[number], + ) + ); +} + +/** + * Modify schema for sections that need defaults stripped or other modifications. + * + * - detectors: Strip the "default" field to prevent RJSF from merging the + * default {"cpu": {"type": "cpu"}} with stored detector keys. + */ +export function modifySchemaForSection( + sectionPath: string, + level: string, + schema: RJSFSchema | undefined, +): RJSFSchema | undefined { + if (!schema || !isSpecialCaseSection(sectionPath, level)) { + return schema; + } + + // detectors: Remove default to prevent merging with stored keys + if (sectionPath === "detectors" && "default" in schema) { + const { default: _, ...schemaWithoutDefault } = schema; + return schemaWithoutDefault; + } + + return schema; +} + +/** + * Get effective defaults for sections with special schema patterns. + * + * - motion: Has anyOf schema with [null, MotionConfig]. When stored value is + * null, derive defaults from the non-null anyOf branch to avoid showing + * changes when navigating to the page. + * - detectors: Return empty object since the schema default would add unwanted + * keys to the stored configuration. + */ +export function getEffectiveDefaultsForSection( + sectionPath: string, + level: string, + schema: RJSFSchema | undefined, + schemaDefaults: unknown, +): unknown { + if (!isSpecialCaseSection(sectionPath, level) || !schema) { + return schemaDefaults; + } + + // motion: Derive defaults from non-null anyOf branch + if (sectionPath === "motion") { + const anyOfSchemas = (schema as { anyOf?: unknown[] }).anyOf; + if (!anyOfSchemas || !Array.isArray(anyOfSchemas)) { + return schemaDefaults; + } + + // Find the non-null motion config schema + const motionSchema = anyOfSchemas.find( + (s) => + typeof s === "object" && + s !== null && + (s as { type?: string }).type !== "null", + ); + + if (!motionSchema) { + return schemaDefaults; + } + + return applySchemaDefaults(motionSchema as RJSFSchema, {}); + } + + // detectors: Return empty object to avoid adding default keys + if (sectionPath === "detectors") { + return {}; + } + + return schemaDefaults; +} + +/** + * Sanitize overrides payloads for section-specific quirks. + */ +export function sanitizeOverridesForSection( + sectionPath: string, + level: string, + overrides: unknown, +): unknown { + if (!overrides || !isJsonObject(overrides)) { + return overrides; + } + + if (sectionPath === "ffmpeg" && level === "camera") { + const overridesObj = overrides as JsonObject; + const inputs = overridesObj.inputs; + if (!Array.isArray(inputs)) { + return overrides; + } + + const cleanedInputs = inputs.map((input) => { + if (!isJsonObject(input)) { + return input; + } + + const cleanedInput = { ...input } as JsonObject; + ["global_args", "hwaccel_args", "input_args"].forEach((key) => { + const value = cleanedInput[key]; + if (Array.isArray(value) && value.length === 0) { + delete cleanedInput[key]; + } + }); + + return cleanedInput; + }); + + return { + ...overridesObj, + inputs: cleanedInputs, + }; + } + + const flattenRecordWithDots = ( + value: JsonObject, + prefix: string = "", + ): JsonObject => { + const flattened: JsonObject = {}; + Object.entries(value).forEach(([key, entry]) => { + const nextKey = prefix ? `${prefix}.${key}` : key; + if (isJsonObject(entry)) { + Object.assign(flattened, flattenRecordWithDots(entry, nextKey)); + } else { + flattened[nextKey] = entry as JsonValue; + } + }); + return flattened; + }; + + // detectors: Strip readonly model fields that are generated on startup + // and should never be persisted back to the config file. + if (sectionPath === "detectors") { + const overridesObj = overrides as JsonObject; + const cleaned: JsonObject = {}; + + Object.entries(overridesObj).forEach(([key, value]) => { + if (!isJsonObject(value)) { + cleaned[key] = value; + return; + } + + const cleanedValue = { ...value } as JsonObject; + delete cleanedValue.model; + delete cleanedValue.model_path; + cleaned[key] = cleanedValue; + }); + + return cleaned; + } + + if (sectionPath === "logger") { + const overridesObj = overrides as JsonObject; + const logs = overridesObj.logs; + if (isJsonObject(logs)) { + return { + ...overridesObj, + logs: flattenRecordWithDots(logs), + }; + } + } + + if (sectionPath === "environment_vars") { + const overridesObj = overrides as JsonObject; + return flattenRecordWithDots(overridesObj); + } + + return overrides; +} diff --git a/web/src/components/config-form/theme/components/index.tsx b/web/src/components/config-form/theme/components/index.tsx new file mode 100644 index 000000000..85519313e --- /dev/null +++ b/web/src/components/config-form/theme/components/index.tsx @@ -0,0 +1,136 @@ +/** + * Shared UI components for config form templates and fields. + */ + +import { canExpand } from "@rjsf/utils"; +import type { RJSFSchema, UiSchema } from "@rjsf/utils"; +import { Button } from "@/components/ui/button"; +import { LuPlus, LuChevronDown, LuChevronRight } from "react-icons/lu"; +import { useTranslation } from "react-i18next"; +import { + Collapsible, + CollapsibleContent, + CollapsibleTrigger, +} from "@/components/ui/collapsible"; +import type { ReactNode } from "react"; + +interface AddPropertyButtonProps { + /** Callback fired when the add button is clicked */ + onAddProperty?: () => void; + /** JSON Schema to determine expandability */ + schema: RJSFSchema; + /** UI Schema for expansion checks */ + uiSchema?: UiSchema; + /** Current form data for expansion checks */ + formData?: unknown; + /** Whether the form is disabled */ + disabled?: boolean; + /** Whether the form is read-only */ + readonly?: boolean; +} + +/** + * Add property button for RJSF objects with additionalProperties. + * Shows "Add" button that allows adding new key-value pairs to objects. + */ +export function AddPropertyButton({ + onAddProperty, + schema, + uiSchema, + formData, + disabled, + readonly, +}: AddPropertyButtonProps) { + const { t } = useTranslation(["common"]); + + const canAdd = + Boolean(onAddProperty) && canExpand(schema, uiSchema, formData); + + if (!canAdd) { + return null; + } + + return ( + + ); +} + +interface AdvancedCollapsibleProps { + /** Number of advanced fields */ + count: number; + /** Whether the collapsible is open */ + open: boolean; + /** Callback when open state changes */ + onOpenChange: (open: boolean) => void; + /** Content to show when expanded */ + children: ReactNode; + /** Use root-level label variant (longer text) */ + isRoot?: boolean; + /** Button size - defaults to undefined (default) for root, "sm" for nested */ + buttonSize?: "sm" | "default" | "lg" | "icon"; +} + +/** + * Collapsible section for advanced form fields. + * Provides consistent styling and i18n labels for advanced settings. + */ +export function AdvancedCollapsible({ + count, + open, + onOpenChange, + children, + isRoot = false, + buttonSize, +}: AdvancedCollapsibleProps) { + const { t } = useTranslation(["views/settings", "common"]); + + if (count === 0) { + return null; + } + + const effectiveSize = buttonSize ?? (isRoot ? undefined : "sm"); + + const label = isRoot + ? t("configForm.advancedSettingsCount", { + ns: "views/settings", + defaultValue: "Advanced Settings ({{count}})", + count, + }) + : t("configForm.advancedCount", { + ns: "views/settings", + defaultValue: "Advanced ({{count}})", + count, + }); + + return ( + + + + + + {children} + + + ); +} diff --git a/web/src/components/config-form/theme/fields/CameraInputsField.tsx b/web/src/components/config-form/theme/fields/CameraInputsField.tsx new file mode 100644 index 000000000..ee19dbc95 --- /dev/null +++ b/web/src/components/config-form/theme/fields/CameraInputsField.tsx @@ -0,0 +1,426 @@ +import type { + ErrorSchema, + FieldProps, + RJSFSchema, + UiSchema, +} from "@rjsf/utils"; +import { toFieldPathId } from "@rjsf/utils"; +import { cloneDeep, isEqual } from "lodash"; +import { useCallback, useEffect, useMemo, useState } from "react"; +import { useTranslation } from "react-i18next"; +import { applySchemaDefaults } from "@/lib/config-schema"; +import { mergeUiSchema } from "@/lib/utils"; +import { Button } from "@/components/ui/button"; +import { Card, CardContent, CardHeader, CardTitle } from "@/components/ui/card"; +import { + Collapsible, + CollapsibleContent, + CollapsibleTrigger, +} from "@/components/ui/collapsible"; +import { + LuChevronDown, + LuChevronRight, + LuPlus, + LuTrash2, +} from "react-icons/lu"; +import type { ConfigFormContext } from "@/types/configForm"; +import { + Tooltip, + TooltipContent, + TooltipTrigger, +} from "@/components/ui/tooltip"; + +type FfmpegInput = { + path?: string; + roles?: string[]; + hwaccel_args?: unknown; +}; + +const asInputList = (formData: unknown): FfmpegInput[] => { + if (!Array.isArray(formData)) { + return []; + } + + return formData.filter( + (item): item is FfmpegInput => typeof item === "object" && item !== null, + ); +}; + +const getItemSchema = (schema: RJSFSchema): RJSFSchema | undefined => { + const items = schema.items; + if (!items || typeof items !== "object" || Array.isArray(items)) { + return undefined; + } + + return items as RJSFSchema; +}; + +const getItemProperties = ( + schema: RJSFSchema | undefined, +): Record => { + if (!schema || typeof schema.properties !== "object" || !schema.properties) { + return {}; + } + + return schema.properties as Record; +}; + +const hasDetectRole = (input: FfmpegInput): boolean => + Array.isArray(input.roles) && input.roles.includes("detect"); + +const hasHwaccelValue = (value: unknown): boolean => { + if (value === null || value === undefined || value === "") { + return false; + } + + if (Array.isArray(value)) { + return value.length > 0; + } + + return true; +}; + +const normalizeNonDetectHwaccel = (inputs: FfmpegInput[]): FfmpegInput[] => + inputs.map((input) => { + if (hasDetectRole(input)) { + return input; + } + + if (!hasHwaccelValue(input.hwaccel_args)) { + return input; + } + + return { + ...input, + hwaccel_args: undefined, + }; + }); + +export function CameraInputsField(props: FieldProps) { + const { + schema, + uiSchema, + formData, + onChange, + fieldPathId, + registry, + idSchema, + errorSchema, + disabled, + readonly, + hideError, + onBlur, + onFocus, + } = props; + + const formContext = registry?.formContext as ConfigFormContext | undefined; + const isCameraLevel = formContext?.level === "camera"; + const effectiveNamespace = isCameraLevel ? "config/cameras" : "config/global"; + + const { t, i18n } = useTranslation([ + "common", + "views/settings", + effectiveNamespace, + ]); + + const inputs = useMemo(() => asInputList(formData), [formData]); + const arraySchema = schema as RJSFSchema; + const itemSchema = useMemo(() => getItemSchema(arraySchema), [arraySchema]); + const itemProperties = useMemo( + () => getItemProperties(itemSchema), + [itemSchema], + ); + const itemUiSchema = useMemo( + () => + ((uiSchema as { items?: UiSchema } | undefined)?.items ?? {}) as UiSchema, + [uiSchema], + ); + const SchemaField = registry.fields.SchemaField; + + const [openByIndex, setOpenByIndex] = useState>({}); + + useEffect(() => { + setOpenByIndex((previous) => { + const next: Record = {}; + for (let index = 0; index < inputs.length; index += 1) { + next[index] = previous[index] ?? true; + } + return next; + }); + }, [inputs.length]); + + useEffect(() => { + const normalized = normalizeNonDetectHwaccel(inputs); + if (!isEqual(normalized, inputs)) { + onChange(normalized, fieldPathId.path); + } + }, [fieldPathId.path, inputs, onChange]); + + const handleFieldValueChange = useCallback( + (index: number, fieldName: string, nextValue: unknown) => { + const nextInputs = cloneDeep(inputs); + const item = + (nextInputs[index] as Record | undefined) ?? + ({} as Record); + + item[fieldName] = nextValue; + nextInputs[index] = item; + + onChange(normalizeNonDetectHwaccel(nextInputs), fieldPathId.path); + }, + [fieldPathId.path, inputs, onChange], + ); + + const handleAddInput = useCallback(() => { + const base = itemSchema + ? (applySchemaDefaults(itemSchema) as FfmpegInput) + : ({} as FfmpegInput); + const nextInputs = normalizeNonDetectHwaccel([...inputs, base]); + onChange(nextInputs, fieldPathId.path); + setOpenByIndex((previous) => ({ ...previous, [inputs.length]: true })); + }, [fieldPathId.path, inputs, itemSchema, onChange]); + + const handleRemoveInput = useCallback( + (index: number) => { + const nextInputs = inputs.filter( + (_, currentIndex) => currentIndex !== index, + ); + onChange(nextInputs, fieldPathId.path); + setOpenByIndex((previous) => { + const next: Record = {}; + Object.entries(previous).forEach(([key, value]) => { + const current = Number(key); + if (Number.isNaN(current) || current === index) { + return; + } + + next[current > index ? current - 1 : current] = value; + }); + return next; + }); + }, + [fieldPathId.path, inputs, onChange], + ); + + const renderField = useCallback( + ( + index: number, + fieldName: string, + options?: { + extraUiSchema?: UiSchema; + showSchemaDescription?: boolean; + }, + ) => { + if (!SchemaField) { + return null; + } + + const fieldSchema = itemProperties[fieldName]; + if (!fieldSchema) { + return null; + } + + const itemData = inputs[index] as Record; + const itemPath = [...fieldPathId.path, index]; + const itemFieldPathId = toFieldPathId( + fieldName, + registry.globalFormOptions, + itemPath, + ); + + const itemErrors = ( + errorSchema as Record | undefined + )?.[index] as Record | undefined; + const fieldErrorSchema = itemErrors?.[fieldName]; + + const baseUiSchema = + (itemUiSchema[fieldName] as UiSchema | undefined) ?? ({} as UiSchema); + const mergedUiSchema = options?.extraUiSchema + ? mergeUiSchema(baseUiSchema, options.extraUiSchema) + : baseUiSchema; + + const fieldTranslationDescriptionKey = `ffmpeg.inputs.${fieldName}.description`; + const translatedDescription = i18n.exists( + fieldTranslationDescriptionKey, + { + ns: effectiveNamespace, + }, + ) + ? t(fieldTranslationDescriptionKey, { ns: effectiveNamespace }) + : ""; + + const fieldDescription = + typeof fieldSchema.description === "string" && + fieldSchema.description.length > 0 + ? fieldSchema.description + : translatedDescription; + + const handleScopedFieldChange = ( + nextValue: unknown, + _path: unknown, + _errors?: ErrorSchema, + _id?: string, + ) => { + handleFieldValueChange(index, fieldName, nextValue); + }; + + return ( +
+ + {options?.showSchemaDescription && fieldDescription ? ( +

{fieldDescription}

+ ) : null} +
+ ); + }, + [ + SchemaField, + itemProperties, + inputs, + fieldPathId.path, + registry, + errorSchema, + itemUiSchema, + i18n, + handleFieldValueChange, + effectiveNamespace, + onBlur, + onFocus, + disabled, + readonly, + hideError, + t, + ], + ); + + const baseId = idSchema?.$id ?? "ffmpeg_inputs"; + + return ( +
+ {inputs.map((input, index) => { + const open = openByIndex[index] ?? true; + const itemTitle = t("configForm.cameraInputs.itemTitle", { + ns: "views/settings", + index: index + 1, + }); + const itemPath = + typeof input.path === "string" ? input.path.trim() : ""; + + return ( + + + setOpenByIndex((previous) => ({ + ...previous, + [index]: nextOpen, + })) + } + > + + +
+ + {itemTitle} + {itemPath ? ( + + {itemPath} + + ) : null} + + {open ? ( + + ) : ( + + )} +
+
+
+ + + +
+ {renderField(index, "path", { + extraUiSchema: { + "ui:widget": "CameraPathWidget", + "ui:options": { + size: "full", + splitLayout: false, + }, + }, + showSchemaDescription: true, + })} +
+ +
{renderField(index, "roles")}
+ + {renderField(index, "input_args")} + + {hasDetectRole(input) + ? renderField(index, "hwaccel_args", { + extraUiSchema: { + "ui:options": { + allowInherit: true, + }, + }, + }) + : null} + + {renderField(index, "output_args")} + +
+ + + + + + {t("button.delete", { ns: "common" })} + + +
+
+
+
+
+ ); + })} + + +
+ ); +} + +export default CameraInputsField; diff --git a/web/src/components/config-form/theme/fields/DetectorHardwareField.tsx b/web/src/components/config-form/theme/fields/DetectorHardwareField.tsx new file mode 100644 index 000000000..871131111 --- /dev/null +++ b/web/src/components/config-form/theme/fields/DetectorHardwareField.tsx @@ -0,0 +1,891 @@ +import type { + ErrorSchema, + FieldPathList, + FieldProps, + RJSFSchema, + UiSchema, +} from "@rjsf/utils"; +import { toFieldPathId } from "@rjsf/utils"; +import { useCallback, useEffect, useMemo, useState } from "react"; +import { useTranslation } from "react-i18next"; +import { + LuChevronDown, + LuChevronRight, + LuPlus, + LuTrash2, +} from "react-icons/lu"; +import { applySchemaDefaults } from "@/lib/config-schema"; +import { cn, isJsonObject, mergeUiSchema } from "@/lib/utils"; +import { ConfigFormContext, JsonObject } from "@/types/configForm"; +import { requiresRestartForFieldPath } from "@/utils/configUtil"; +import RestartRequiredIndicator from "@/components/indicators/RestartRequiredIndicator"; +import { Button } from "@/components/ui/button"; +import { + Collapsible, + CollapsibleContent, + CollapsibleTrigger, +} from "@/components/ui/collapsible"; +import { Input } from "@/components/ui/input"; +import { Label } from "@/components/ui/label"; +import { + Select, + SelectContent, + SelectItem, + SelectTrigger, + SelectValue, +} from "@/components/ui/select"; +import { humanizeKey } from "../utils/i18n"; + +type DetectorHardwareFieldOptions = { + multiInstanceTypes?: string[]; + hiddenByType?: Record; + hiddenFields?: string[]; + typeOrder?: string[]; +}; + +type DetectorSchemaEntry = { + type: string; + schema: RJSFSchema; +}; + +const DEFAULT_MULTI_INSTANCE_TYPES = ["cpu", "onnx", "openvino"]; +const EMPTY_HIDDEN_BY_TYPE: Record = {}; +const EMPTY_HIDDEN_FIELDS: string[] = []; +const EMPTY_TYPE_ORDER: string[] = []; + +const isSchemaObject = (schema: unknown): schema is RJSFSchema => + typeof schema === "object" && schema !== null; + +const getUnionSchemas = (schema?: RJSFSchema): RJSFSchema[] => { + if (!schema) { + return []; + } + + const schemaObj = schema as Record; + const union = schemaObj.oneOf ?? schemaObj.anyOf; + if (Array.isArray(union)) { + return union.filter(isSchemaObject) as RJSFSchema[]; + } + + return [schema]; +}; + +const getTypeValues = (schema: RJSFSchema): string[] => { + const schemaObj = schema as Record; + const properties = schemaObj.properties as + | Record + | undefined; + const typeSchema = properties?.type as Record | undefined; + const values: string[] = []; + + if (typeof typeSchema?.const === "string") { + values.push(typeSchema.const); + } + + if (Array.isArray(typeSchema?.enum)) { + typeSchema.enum.forEach((value) => { + if (typeof value === "string") { + values.push(value); + } + }); + } + + return values; +}; + +const buildHiddenUiSchema = (paths: string[]): UiSchema => { + const result: UiSchema = {}; + + paths.forEach((path) => { + if (!path) { + return; + } + + const segments = path.split(".").filter(Boolean); + if (segments.length === 0) { + return; + } + + let cursor = result; + segments.forEach((segment, index) => { + if (index === segments.length - 1) { + cursor[segment] = { + ...(cursor[segment] as UiSchema | undefined), + "ui:widget": "hidden", + } as UiSchema; + return; + } + + const existing = (cursor[segment] as UiSchema | undefined) ?? {}; + cursor[segment] = existing; + cursor = existing; + }); + }); + + return result; +}; + +const getInstanceType = (value: unknown): string | undefined => { + if (!isJsonObject(value)) { + return undefined; + } + + const typeValue = value.type; + return typeof typeValue === "string" && typeValue.length > 0 + ? typeValue + : undefined; +}; + +export function DetectorHardwareField(props: FieldProps) { + const { + schema, + uiSchema, + registry, + fieldPathId, + formData: rawFormData, + errorSchema, + disabled, + readonly, + hideError, + onBlur, + onFocus, + onChange, + } = props; + + const formContext = registry.formContext as ConfigFormContext | undefined; + const configNamespace = + formContext?.i18nNamespace ?? + (formContext?.level === "camera" ? "config/cameras" : "config/global"); + const { t: fallbackT } = useTranslation(["common", configNamespace]); + const t = formContext?.t ?? fallbackT; + const sectionPrefix = formContext?.sectionI18nPrefix ?? "detectors"; + const restartRequired = formContext?.restartRequired; + const defaultRequiresRestart = formContext?.requiresRestart ?? true; + + const options = + (uiSchema?.["ui:options"] as DetectorHardwareFieldOptions | undefined) ?? + {}; + const multiInstanceTypes = + options.multiInstanceTypes ?? DEFAULT_MULTI_INSTANCE_TYPES; + const hiddenByType = options.hiddenByType ?? EMPTY_HIDDEN_BY_TYPE; + const hiddenFields = options.hiddenFields ?? EMPTY_HIDDEN_FIELDS; + const typeOrder = options.typeOrder ?? EMPTY_TYPE_ORDER; + const multiInstanceSet = useMemo( + () => new Set(multiInstanceTypes), + [multiInstanceTypes], + ); + const globalHiddenFields = useMemo( + () => + hiddenFields + .map((path) => (path.startsWith("*.") ? path.slice(2) : path)) + .filter((path) => path.length > 0), + [hiddenFields], + ); + + const detectorConfigSchema = useMemo(() => { + const additional = (schema as RJSFSchema | undefined)?.additionalProperties; + if (isSchemaObject(additional)) { + return additional as RJSFSchema; + } + + const rootSchema = registry.rootSchema as Record; + const defs = + (rootSchema?.$defs as Record | undefined) ?? + (rootSchema?.definitions as Record | undefined); + const fallback = defs?.DetectorConfig; + + return isSchemaObject(fallback) ? (fallback as RJSFSchema) : undefined; + }, [schema, registry.rootSchema]); + + const detectorSchemas = useMemo(() => { + const entries: DetectorSchemaEntry[] = []; + getUnionSchemas(detectorConfigSchema).forEach((schema) => { + const types = getTypeValues(schema); + types.forEach((type) => { + entries.push({ type, schema }); + }); + }); + return entries; + }, [detectorConfigSchema]); + + const detectorSchemaByType = useMemo(() => { + const map = new Map(); + detectorSchemas.forEach(({ type, schema }) => { + if (!map.has(type)) { + map.set(type, schema); + } + }); + return map; + }, [detectorSchemas]); + + const availableTypes = useMemo( + () => detectorSchemas.map((entry) => entry.type), + [detectorSchemas], + ); + + const orderedTypes = useMemo(() => { + if (!typeOrder.length) { + return availableTypes; + } + + const availableSet = new Set(availableTypes); + const ordered = typeOrder.filter((type) => availableSet.has(type)); + const orderedSet = new Set(ordered); + const remaining = availableTypes.filter((type) => !orderedSet.has(type)); + return [...ordered, ...remaining]; + }, [availableTypes, typeOrder]); + + const formData = isJsonObject(rawFormData) ? rawFormData : {}; + const detectors = formData as JsonObject; + + const [addType, setAddType] = useState(orderedTypes[0]); + const [addError, setAddError] = useState(); + const [renameDrafts, setRenameDrafts] = useState>({}); + const [renameErrors, setRenameErrors] = useState>({}); + const [typeErrors, setTypeErrors] = useState>({}); + const [openKeys, setOpenKeys] = useState>( + () => new Set(Object.keys(detectors)), + ); + + useEffect(() => { + if (!orderedTypes.length) { + setAddType(undefined); + return; + } + + if (!addType || !orderedTypes.includes(addType)) { + setAddType(orderedTypes[0]); + } + }, [orderedTypes, addType]); + + useEffect(() => { + setOpenKeys((prev) => { + const next = new Set(); + Object.keys(detectors).forEach((key) => { + if (prev.has(key)) { + next.add(key); + } + }); + return next; + }); + + setRenameDrafts((prev) => { + const next: Record = {}; + Object.keys(detectors).forEach((key) => { + if (prev[key] !== undefined) { + next[key] = prev[key]; + } + }); + return next; + }); + + setRenameErrors((prev) => { + const next: Record = {}; + Object.keys(detectors).forEach((key) => { + if (prev[key] !== undefined) { + next[key] = prev[key]; + } + }); + return next; + }); + + setTypeErrors((prev) => { + const next: Record = {}; + Object.keys(detectors).forEach((key) => { + if (prev[key] !== undefined) { + next[key] = prev[key]; + } + }); + return next; + }); + }, [detectors]); + + const updateDetectors = useCallback( + (nextDetectors: JsonObject, path?: FieldPathList) => { + onChange(nextDetectors as unknown, path ?? fieldPathId.path); + }, + [fieldPathId.path, onChange], + ); + + const getTypeLabel = useCallback( + (type: string) => + t(`${sectionPrefix}.${type}.label`, { + ns: configNamespace, + defaultValue: humanizeKey(type), + }), + [t, sectionPrefix, configNamespace], + ); + + const getTypeDescription = useCallback( + (type: string) => + t(`${sectionPrefix}.${type}.description`, { + ns: configNamespace, + defaultValue: "", + }), + [t, sectionPrefix, configNamespace], + ); + + const shouldShowRestartForPath = useCallback( + (path: Array) => + requiresRestartForFieldPath( + path, + restartRequired, + defaultRequiresRestart, + ), + [defaultRequiresRestart, restartRequired], + ); + + const renderRestartIcon = (isRequired: boolean) => { + if (!isRequired) { + return null; + } + + return ; + }; + + const isSingleInstanceType = useCallback( + (type: string) => !multiInstanceSet.has(type), + [multiInstanceSet], + ); + + const getDetectorDefaults = useCallback( + (type: string) => { + const schema = detectorSchemaByType.get(type); + if (!schema) { + return { type }; + } + + const base = { type } as Record; + const withDefaults = applySchemaDefaults(schema, base); + return { ...withDefaults, type } as Record; + }, + [detectorSchemaByType], + ); + + const resolveDuplicateType = useCallback( + (targetType: string, excludeKey?: string) => { + return Object.entries(detectors).some(([key, value]) => { + if (excludeKey && key === excludeKey) { + return false; + } + return getInstanceType(value) === targetType; + }); + }, + [detectors], + ); + + const handleAdd = useCallback(() => { + if (!addType) { + setAddError( + t("selectItem", { + ns: "common", + defaultValue: "Select {{item}}", + item: t("detectors.type.label", { + ns: configNamespace, + defaultValue: "Type", + }), + }), + ); + return; + } + + if (isSingleInstanceType(addType) && resolveDuplicateType(addType)) { + setAddError( + t("configForm.detectors.singleType", { + ns: "views/settings", + defaultValue: "Only one {{type}} detector is allowed.", + type: getTypeLabel(addType), + }), + ); + return; + } + + const baseKey = addType; + let nextKey = baseKey; + let index = 2; + while (Object.prototype.hasOwnProperty.call(detectors, nextKey)) { + nextKey = `${baseKey}${index}`; + index += 1; + } + + const nextDetectors = { + ...detectors, + [nextKey]: getDetectorDefaults(addType), + } as JsonObject; + + setAddError(undefined); + setOpenKeys((prev) => { + const next = new Set(prev); + next.add(nextKey); + return next; + }); + + updateDetectors(nextDetectors); + }, [ + addType, + t, + configNamespace, + detectors, + getDetectorDefaults, + getTypeLabel, + isSingleInstanceType, + resolveDuplicateType, + updateDetectors, + ]); + + const handleRemove = useCallback( + (key: string) => { + const { [key]: _, ...rest } = detectors; + updateDetectors(rest as JsonObject); + setOpenKeys((prev) => { + const next = new Set(prev); + next.delete(key); + return next; + }); + }, + [detectors, updateDetectors], + ); + + const commitRename = useCallback( + (key: string, nextKey: string) => { + const trimmed = nextKey.trim(); + if (!trimmed) { + setRenameErrors((prev) => ({ + ...prev, + [key]: t("configForm.detectors.keyRequired", { + ns: "views/settings", + defaultValue: "Detector name is required.", + }), + })); + return; + } + + if (trimmed !== key && detectors[trimmed] !== undefined) { + setRenameErrors((prev) => ({ + ...prev, + [key]: t("configForm.detectors.keyDuplicate", { + ns: "views/settings", + defaultValue: "Detector name already exists.", + }), + })); + return; + } + + setRenameErrors((prev) => { + const { [key]: _, ...rest } = prev; + return rest; + }); + + setRenameDrafts((prev) => { + const { [key]: _, ...rest } = prev; + return rest; + }); + + if (trimmed === key) { + return; + } + + const { [key]: value, ...rest } = detectors; + const nextDetectors = { ...rest, [trimmed]: value } as JsonObject; + + setOpenKeys((prev) => { + const next = new Set(prev); + if (next.delete(key)) { + next.add(trimmed); + } + return next; + }); + + updateDetectors(nextDetectors); + }, + [detectors, t, updateDetectors], + ); + + const handleTypeChange = useCallback( + (key: string, nextType: string) => { + const currentType = getInstanceType(detectors[key]); + if (!nextType || nextType === currentType) { + return; + } + + if ( + isSingleInstanceType(nextType) && + resolveDuplicateType(nextType, key) + ) { + setTypeErrors((prev) => ({ + ...prev, + [key]: t("configForm.detectors.singleType", { + ns: "views/settings", + defaultValue: "Only one {{type}} detector is allowed.", + type: getTypeLabel(nextType), + }), + })); + return; + } + + setTypeErrors((prev) => { + const { [key]: _, ...rest } = prev; + return rest; + }); + + const nextDetectors = { + ...detectors, + [key]: getDetectorDefaults(nextType), + } as JsonObject; + + updateDetectors(nextDetectors); + }, + [ + detectors, + getDetectorDefaults, + getTypeLabel, + isSingleInstanceType, + resolveDuplicateType, + t, + updateDetectors, + ], + ); + + const getInstanceUiSchema = useCallback( + (type: string) => { + const baseUiSchema = + (uiSchema?.additionalProperties as UiSchema | undefined) ?? {}; + const globalHidden = buildHiddenUiSchema(globalHiddenFields); + const hiddenOverrides = buildHiddenUiSchema(hiddenByType[type] ?? []); + const typeHidden = { type: { "ui:widget": "hidden" } } as UiSchema; + const nestedOverrides = { + "ui:options": { + disableNestedCard: true, + }, + } as UiSchema; + + const withGlobalHidden = mergeUiSchema(baseUiSchema, globalHidden); + const withTypeHidden = mergeUiSchema(withGlobalHidden, hiddenOverrides); + const withTypeHiddenAndOptions = mergeUiSchema( + withTypeHidden, + typeHidden, + ); + return mergeUiSchema(withTypeHiddenAndOptions, nestedOverrides); + }, + [globalHiddenFields, hiddenByType, uiSchema?.additionalProperties], + ); + + const renderInstanceForm = useCallback( + (key: string, value: unknown) => { + const SchemaField = registry.fields.SchemaField; + const type = getInstanceType(value); + const schema = type ? detectorSchemaByType.get(type) : undefined; + + if (!SchemaField || !schema || !type) { + return null; + } + + const instanceUiSchema = getInstanceUiSchema(type); + const instanceFieldPathId = toFieldPathId( + key, + registry.globalFormOptions, + fieldPathId.path, + ); + + const instanceErrorSchema = ( + errorSchema as Record | undefined + )?.[key]; + + const handleInstanceChange = ( + nextValue: unknown, + path: FieldPathList, + errors?: ErrorSchema, + id?: string, + ) => { + onChange(nextValue, path, errors, id); + }; + + return ( + + ); + }, + [ + detectorSchemaByType, + getInstanceUiSchema, + disabled, + errorSchema, + fieldPathId, + hideError, + onChange, + onBlur, + onFocus, + readonly, + registry, + ], + ); + + if (!availableTypes.length) { + return ( +

+ {t("configForm.detectors.noSchema", { + ns: "views/settings", + defaultValue: "No detector schemas are available.", + })} +

+ ); + } + + const detectorEntries = Object.entries(detectors); + const isDisabled = Boolean(disabled || readonly); + + return ( +
+ {detectorEntries.length === 0 ? ( +

+ {t("configForm.detectors.none", { + ns: "views/settings", + defaultValue: "No detector instances configured.", + })} +

+ ) : ( +
+ {detectorEntries.map(([key, value]) => { + const type = getInstanceType(value) ?? ""; + const typeLabel = type ? getTypeLabel(type) : key; + const typeDescription = type ? getTypeDescription(type) : ""; + const isOpen = openKeys.has(key); + const renameDraft = renameDrafts[key] ?? key; + const detectorPath = [...fieldPathId.path, key]; + const detectorTypePath = [...detectorPath, "type"]; + const detectorTypeRequiresRestart = + shouldShowRestartForPath(detectorTypePath); + + return ( +
+ { + setOpenKeys((prev) => { + const next = new Set(prev); + if (open) { + next.add(key); + } else { + next.delete(key); + } + return next; + }); + }} + > +
+
+ + + +
+
+ {typeLabel} + {renderRestartIcon(detectorTypeRequiresRestart)} + + {key} + +
+ {typeDescription && ( +
+ {typeDescription} +
+ )} +
+
+ +
+ +
+
+
+ + { + setRenameDrafts((prev) => ({ + ...prev, + [key]: event.target.value, + })); + }} + onBlur={(event) => + commitRename(key, event.target.value) + } + onKeyDown={(event) => { + if (event.key === "Enter") { + event.preventDefault(); + commitRename(key, renameDraft); + } + }} + /> +

+ {t("field.internalID", { + ns: "common", + defaultValue: + "The Internal ID Frigate uses in the configuration and database", + })} +

+ {renameErrors[key] && ( +

+ {renameErrors[key]} +

+ )} +
+
+ + + {typeErrors[key] && ( +

+ {typeErrors[key]} +

+ )} +
+
+ +
+ {renderInstanceForm(key, value)} +
+
+
+
+
+ ); + })} +
+ )} + +
+
+
+ {t("configForm.detectors.add", { + ns: "views/settings", + defaultValue: "Add detector", + })} +
+
+
+ + + {addError &&

{addError}

} +
+
+ +
+
+
+
+
+ ); +} diff --git a/web/src/components/config-form/theme/fields/LayoutGridField.tsx b/web/src/components/config-form/theme/fields/LayoutGridField.tsx new file mode 100644 index 000000000..9953794d0 --- /dev/null +++ b/web/src/components/config-form/theme/fields/LayoutGridField.tsx @@ -0,0 +1,587 @@ +/** + * LayoutGridField - RJSF field for responsive, semantic grid layouts + * + * Overview: + * - Apply a responsive grid to object properties using `ui:layoutGrid` while + * preserving the default `ObjectFieldTemplate` behavior (cards, nested + * collapsibles, add button, and i18n). + * - Falls back to the original template when `ui:layoutGrid` is not present. + * + * Capabilities: + * - 12-column grid logic. `ui:col` accepts a number (1-12) or a Tailwind class + * string (e.g. "col-span-12 md:col-span-4") for responsive column widths. + * - Per-row and global class overrides: + * - `ui:options.layoutGrid.rowClassName` (default: "grid-cols-12") is merged + * with the base `grid gap-4` classes. + * - `ui:options.layoutGrid.advancedRowClassName` (default: "grid-cols-12") + * controls advanced-section rows. + * - Per-row `ui:className` and per-column `ui:className`/`className` are + * supported for fine-grained layout control. + * - Optional `useGridForAdvanced` (via `ui:options.layoutGrid`) to toggle + * whether advanced fields use the grid or fall back to stacked layout. + * - Integrates with `ui:groups` to show semantic group labels (resolved via + * `config/groups` i18n). If a layout row contains fields from the same group, + * that row shows the group label above it; leftover or ungrouped fields are + * rendered after the configured rows. + * - Hidden fields (`ui:widget: "hidden"`) are ignored. + * + * Internationalization + * - Advanced collapsible labels use `label.advancedSettingsCount` and + * `label.advancedCount` in the `common` namespace. + * - Group labels are looked up in `config/groups` (uses `sectionI18nPrefix` + * when available). + * + * Usage examples: + * Basic: + * { + * "ui:field": "LayoutGridField", + * "ui:layoutGrid": [ + * { "ui:row": ["field1", "field2"] }, + * { "ui:row": ["field3"] } + * ] + * } + * + * Custom columns and responsive classes: + * { + * "ui:field": "LayoutGridField", + * "ui:options": { + * "layoutGrid": { "rowClassName": "grid-cols-12 md:grid-cols-6", "useGridForAdvanced": true } + * }, + * "ui:layoutGrid": [ + * { + * "ui:row": [ + * { "field1": { "ui:col": "col-span-12 md:col-span-4", "ui:className": "md:col-start-2" } }, + * { "field2": { "ui:col": 4 } } + * ], + * "ui:className": "gap-6" + * } + * ] + * } + * + * Groups and rows: + * { + * "ui:field": "LayoutGridField", + * "ui:groups": { "resolution": ["fps","width","height"], "tracking": ["min_initialized","max_disappeared"] }, + * "ui:layoutGrid": [ + * { "ui:row": ["enabled"] }, + * { "ui:row": ["fps","width","height"] } + * ] + * } + * + * Notes: + * - `ui:layoutGrid` must be an array; non-array values are ignored and the + * default ObjectFieldTemplate is used instead. + * - This implementation adheres to RJSF patterns (use `ui:options`, + * `ui:className`, and `ui:layoutGrid` as documented) while adding a few + * Frigate-specific conveniences (defaults and Tailwind-friendly class + * handling). + */ + +import type { FieldProps, ObjectFieldTemplateProps } from "@rjsf/utils"; +import { useCallback, useMemo, useState } from "react"; +import { useTranslation } from "react-i18next"; +import { cn } from "@/lib/utils"; +import { ConfigFormContext } from "@/types/configForm"; +import { + getDomainFromNamespace, + hasOverrideAtPath, + humanizeKey, +} from "../utils"; +import { AddPropertyButton, AdvancedCollapsible } from "../components"; + +type LayoutGridColumnConfig = { + "ui:col"?: number | string; + "ui:className"?: string; + className?: string; +}; + +type LayoutRow = { + "ui:row": Array>; + "ui:className"?: string; + className?: string; +}; + +type LayoutGrid = LayoutRow[]; + +type LayoutGridOptions = { + rowClassName?: string; + advancedRowClassName?: string; + useGridForAdvanced?: boolean; +}; + +interface PropertyElement { + name: string; + content: React.ReactElement; +} + +function isObjectLikeElement(item: PropertyElement) { + const fieldSchema = item.content.props?.schema as + | { type?: string | string[] } + | undefined; + return fieldSchema?.type === "object"; +} + +// Custom ObjectFieldTemplate wrapper that applies grid layout +function GridLayoutObjectFieldTemplate( + props: ObjectFieldTemplateProps, + originalObjectFieldTemplate: React.ComponentType, +) { + const { + uiSchema, + properties, + registry, + schema, + onAddProperty, + formData, + disabled, + readonly, + } = props; + const formContext = registry?.formContext as ConfigFormContext | undefined; + const { t } = useTranslation(["common", "config/groups"]); + + // Use the original ObjectFieldTemplate passed as parameter, not from registry + const ObjectFieldTemplate = originalObjectFieldTemplate; + + // Get layout configuration + const layoutGrid = Array.isArray(uiSchema?.["ui:layoutGrid"]) + ? (uiSchema?.["ui:layoutGrid"] as LayoutGrid) + : []; + const layoutGridOptions = + (uiSchema?.["ui:options"] as { layoutGrid?: LayoutGridOptions } | undefined) + ?.layoutGrid ?? {}; + const baseRowClassName = layoutGridOptions.rowClassName ?? "grid-cols-12"; + const advancedRowClassName = + layoutGridOptions.advancedRowClassName ?? "grid-cols-12"; + const useGridForAdvanced = layoutGridOptions.useGridForAdvanced ?? true; + const groupDefinitions = + (uiSchema?.["ui:groups"] as Record | undefined) || {}; + const overrides = formContext?.overrides; + const fieldPath = props.fieldPathId.path; + + const isPathModified = (path: Array) => + hasOverrideAtPath(overrides, path, formContext?.formData); + + // Override the properties rendering with grid layout + const isHiddenProp = (prop: (typeof properties)[number]) => + prop.content.props.uiSchema?.["ui:widget"] === "hidden"; + + const visibleProps = properties.filter((prop) => !isHiddenProp(prop)); + + // Separate regular and advanced properties + const advancedProps = visibleProps.filter( + (p) => p.content.props.uiSchema?.["ui:options"]?.advanced === true, + ); + const regularProps = visibleProps.filter( + (p) => p.content.props.uiSchema?.["ui:options"]?.advanced !== true, + ); + const hasModifiedAdvanced = advancedProps.some((prop) => + isPathModified([...fieldPath, prop.name]), + ); + const [showAdvanced, setShowAdvanced] = useState(hasModifiedAdvanced); + + // If no layout grid is defined, use the default template + if (layoutGrid.length === 0) { + return ; + } + + const domain = getDomainFromNamespace(formContext?.i18nNamespace); + const sectionI18nPrefix = formContext?.sectionI18nPrefix; + + const getGroupLabel = (groupKey: string) => { + if (domain && sectionI18nPrefix) { + return t(`${sectionI18nPrefix}.${domain}.${groupKey}`, { + ns: "config/groups", + defaultValue: humanizeKey(groupKey), + }); + } + + return t(`groups.${groupKey}`, { + ns: "config/groups", + defaultValue: humanizeKey(groupKey), + }); + }; + + // Render fields using the layout grid structure + const renderGridLayout = (items: PropertyElement[], rowClassName: string) => { + if (!items.length) { + return null; + } + + // Create a map for quick lookup + const itemMap = new Map(items.map((item) => [item.name, item])); + const renderedFields = new Set(); + + return ( +
+ {layoutGrid.map((rowDef, rowIndex) => { + const rowItems = rowDef["ui:row"]; + const cols: React.ReactNode[] = []; + + rowItems.forEach((colDef, colIndex) => { + let fieldName: string; + let colSpan: number | string = 12; // Default to full width + let colClassName: string | undefined; + + if (typeof colDef === "string") { + fieldName = colDef; + } else { + // Object with field name as key and ui:col as value + const entries = Object.entries(colDef); + if (entries.length === 0) return; + const [name, config] = entries[0]; + fieldName = name; + colSpan = config["ui:col"] ?? 12; + colClassName = config["ui:className"] ?? config.className; + } + + const item = itemMap.get(fieldName); + if (!item) return; + + renderedFields.add(fieldName); + + // Calculate column width class (using 12-column grid) + const colSpanClass = + typeof colSpan === "string" ? colSpan : `col-span-${colSpan}`; + const colClass = cn(colSpanClass, colClassName); + + cols.push( +
+ {item.content} +
, + ); + }); + + if (cols.length === 0) return null; + + const rowClass = cn( + "grid gap-4", + rowClassName, + rowDef["ui:className"], + rowDef.className, + ); + + return ( +
+ {cols} +
+ ); + })} + + {Array.from(itemMap.keys()) + .filter((name) => !renderedFields.has(name)) + .map((name) => { + const item = itemMap.get(name); + return item ? ( +
+ {item.content} +
+ ) : null; + })} +
+ ); + }; + + const renderGroupedGridLayout = ( + items: PropertyElement[], + rowClassName: string, + ) => { + if (!items.length) { + return null; + } + + if (Object.keys(groupDefinitions).length === 0) { + return renderGridLayout(items, rowClassName); + } + + const itemMap = new Map(items.map((item) => [item.name, item])); + const renderedFields = new Set(); + const renderedGroups = new Set(); + const groupMap = new Map(); + + Object.entries(groupDefinitions).forEach(([groupKey, fields]) => { + fields.forEach((field) => { + groupMap.set(field, groupKey); + }); + }); + + const rows = layoutGrid + .map((rowDef, rowIndex) => { + const rowItems = rowDef["ui:row"]; + const cols: React.ReactNode[] = []; + const rowFieldNames: string[] = []; + + rowItems.forEach((colDef, colIndex) => { + let fieldName: string; + let colSpan: number | string = 12; + let colClassName: string | undefined; + + if (typeof colDef === "string") { + fieldName = colDef; + } else { + const entries = Object.entries(colDef); + if (entries.length === 0) return; + const [name, config] = entries[0]; + fieldName = name; + colSpan = config["ui:col"] ?? 12; + colClassName = config["ui:className"] ?? config.className; + } + + const item = itemMap.get(fieldName); + if (!item) return; + + renderedFields.add(fieldName); + rowFieldNames.push(fieldName); + + const colSpanClass = + typeof colSpan === "string" ? colSpan : `col-span-${colSpan}`; + const colClass = cn(colSpanClass, colClassName); + + cols.push( +
+ {item.content} +
, + ); + }); + + if (cols.length === 0) return null; + + const rowClass = cn( + "grid gap-4", + rowClassName, + rowDef["ui:className"], + rowDef.className, + ); + + const rowGroupKeys = rowFieldNames + .map((name) => groupMap.get(name)) + .filter(Boolean) as string[]; + const rowGroupKey = + rowGroupKeys.length > 0 && + rowGroupKeys.length === rowFieldNames.length && + new Set(rowGroupKeys).size === 1 + ? rowGroupKeys[0] + : undefined; + const showGroupLabel = rowGroupKey && !renderedGroups.has(rowGroupKey); + + if (showGroupLabel) { + renderedGroups.add(rowGroupKey); + } + + return ( +
+ {showGroupLabel && ( +
+ {getGroupLabel(rowGroupKey)} +
+ )} +
{cols}
+
+ ); + }) + .filter(Boolean); + + const remainingItems = Array.from(itemMap.keys()) + .filter((name) => !renderedFields.has(name)) + .map((name) => itemMap.get(name)) + .filter(Boolean) as PropertyElement[]; + + const groupedLeftovers = new Map(); + const ungroupedLeftovers: PropertyElement[] = []; + + remainingItems.forEach((item) => { + const groupKey = groupMap.get(item.name); + if (groupKey) { + const existing = groupedLeftovers.get(groupKey); + if (existing) { + existing.push(item); + } else { + groupedLeftovers.set(groupKey, [item]); + } + } else { + ungroupedLeftovers.push(item); + } + }); + + const leftoverSections: React.ReactNode[] = []; + + groupedLeftovers.forEach((groupItems, groupKey) => { + const showGroupLabel = !renderedGroups.has(groupKey); + if (showGroupLabel) { + renderedGroups.add(groupKey); + } + + leftoverSections.push( +
+ {showGroupLabel && ( +
+ {getGroupLabel(groupKey)} +
+ )} +
+ {groupItems.map((item) => ( +
{item.content}
+ ))} +
+
, + ); + }); + + if (ungroupedLeftovers.length > 0) { + leftoverSections.push( +
0 || groupedLeftovers.size > 0) && "pt-2", + )} + > + {ungroupedLeftovers.map((item) => ( +
0 && + !isObjectLikeElement(item) && + "px-4", + )} + > + {item.content} +
+ ))} +
, + ); + } + + return ( +
+ {rows} + {leftoverSections} +
+ ); + }; + + const renderStackedLayout = (items: PropertyElement[]) => { + if (!items.length) { + return null; + } + + return ( +
+ {items.map((item) => ( +
{item.content}
+ ))} +
+ ); + }; + + const regularLayout = renderGroupedGridLayout(regularProps, baseRowClassName); + const advancedLayout = useGridForAdvanced + ? renderGroupedGridLayout(advancedProps, advancedRowClassName) + : renderStackedLayout(advancedProps); + + // Create modified props with custom property rendering + // Render using the original template but with our custom content + const isRoot = registry?.rootSchema === props.schema; + + if (isRoot) { + return ( +
+ {regularLayout} + + + + {advancedLayout} + +
+ ); + } + + // We need to inject our custom rendering into the template + // Since we can't directly modify the template's internal rendering, + // we'll render the full structure ourselves + return ( + +
+ {regularLayout} + + + + {advancedLayout} + +
+
+ ); +} + +export function LayoutGridField(props: FieldProps) { + const { registry, schema, uiSchema, idSchema, formData } = props; + + // Store the original ObjectFieldTemplate before any modifications + const originalObjectFieldTemplate = registry.templates.ObjectFieldTemplate; + + // Get the ObjectField component from the registry + const ObjectField = registry.fields.ObjectField; + + // Create a modified registry with our custom template + // But we'll pass the original template to it to prevent circular reference + const gridObjectFieldTemplate = useCallback( + (tProps: ObjectFieldTemplateProps) => + GridLayoutObjectFieldTemplate(tProps, originalObjectFieldTemplate), + [originalObjectFieldTemplate], + ); + + const modifiedRegistry = useMemo( + () => ({ + ...registry, + templates: { + ...registry.templates, + ObjectFieldTemplate: gridObjectFieldTemplate, + }, + }), + [registry, gridObjectFieldTemplate], + ); + + // Delegate to ObjectField with the modified registry + return ( + + ); +} diff --git a/web/src/components/config-form/theme/fields/ReplaceRulesField.tsx b/web/src/components/config-form/theme/fields/ReplaceRulesField.tsx new file mode 100644 index 000000000..6724854c9 --- /dev/null +++ b/web/src/components/config-form/theme/fields/ReplaceRulesField.tsx @@ -0,0 +1,253 @@ +import type { FieldPathList, FieldProps, RJSFSchema } from "@rjsf/utils"; +import { useCallback, useEffect, useMemo, useState } from "react"; +import { useTranslation } from "react-i18next"; +import { + Collapsible, + CollapsibleContent, + CollapsibleTrigger, +} from "@/components/ui/collapsible"; +import { Card, CardContent, CardHeader, CardTitle } from "@/components/ui/card"; +import { Button } from "@/components/ui/button"; +import { Input } from "@/components/ui/input"; +import { Label } from "@/components/ui/label"; +import { cn } from "@/lib/utils"; +import { + LuChevronDown, + LuChevronRight, + LuPlus, + LuTrash2, +} from "react-icons/lu"; +import type { ConfigFormContext } from "@/types/configForm"; +import get from "lodash/get"; +import { isSubtreeModified } from "../utils"; + +type ReplaceRule = { + pattern?: string; + replacement?: string; +}; + +function getItemSchema(schema: RJSFSchema): RJSFSchema | undefined { + const items = schema.items; + if (!items || typeof items !== "object" || Array.isArray(items)) { + return undefined; + } + return items as RJSFSchema; +} + +function getPropertyTitle(itemSchema: RJSFSchema | undefined, key: string) { + const props = (itemSchema as { properties?: Record }) + ?.properties; + const title = props?.[key]?.title; + return typeof title === "string" ? title : undefined; +} + +export function ReplaceRulesField(props: FieldProps) { + const { schema, formData, onChange, idSchema, disabled, readonly } = props; + const formContext = props.registry?.formContext as + | ConfigFormContext + | undefined; + + const { t } = useTranslation(["common"]); + + const rules: ReplaceRule[] = useMemo(() => { + if (!Array.isArray(formData)) { + return []; + } + return formData as ReplaceRule[]; + }, [formData]); + + const itemSchema = useMemo( + () => getItemSchema(schema as RJSFSchema), + [schema], + ); + const title = (schema as RJSFSchema).title; + const description = (schema as RJSFSchema).description; + const patternTitle = getPropertyTitle(itemSchema, "pattern"); + const replacementTitle = getPropertyTitle(itemSchema, "replacement"); + + const hasItems = rules.length > 0; + const emptyPath = useMemo(() => [] as FieldPathList, []); + const fieldPath = + (props as { fieldPathId?: { path?: FieldPathList } }).fieldPathId?.path ?? + emptyPath; + + const isModified = useMemo(() => { + const baselineRoot = formContext?.baselineFormData; + const baselineValue = baselineRoot + ? get(baselineRoot, fieldPath) + : undefined; + return isSubtreeModified( + rules, + baselineValue, + formContext?.overrides, + fieldPath, + formContext?.formData, + ); + }, [fieldPath, formContext, rules]); + + const [open, setOpen] = useState(hasItems || isModified); + + useEffect(() => { + if (isModified) { + setOpen(true); + } + }, [isModified]); + + useEffect(() => { + if (hasItems) { + setOpen(true); + } + }, [hasItems]); + + const handleAdd = useCallback(() => { + const next = [...rules, { pattern: "", replacement: "" }]; + onChange(next, fieldPath); + }, [fieldPath, onChange, rules]); + + const handleRemove = useCallback( + (index: number) => { + const next = rules.filter((_, i) => i !== index); + onChange(next, fieldPath); + }, + [fieldPath, onChange, rules], + ); + + const handleUpdate = useCallback( + (index: number, patch: Partial) => { + const next = rules.map((rule, i) => { + if (i !== index) { + return rule; + } + return { + ...rule, + ...patch, + }; + }); + onChange(next, fieldPath); + }, + [fieldPath, onChange, rules], + ); + + const baseId = idSchema?.$id || "replace_rules"; + const deleteLabel = t("button.delete", { + ns: "common", + defaultValue: "Delete", + }); + + return ( + + + + +
+
+ + {title} + + {description && ( +

+ {description} +

+ )} +
+ {open ? ( + + ) : ( + + )} +
+
+
+ + + + {rules.length > 0 && ( +
+
+ {patternTitle && ( + + )} +
+
+ {replacementTitle && ( + + )} +
+
+ )} + +
+ {rules.map((rule, index) => { + const patternId = `${baseId}-${index}-pattern`; + const replacementId = `${baseId}-${index}-replacement`; + + return ( +
+
+ + handleUpdate(index, { pattern: e.target.value }) + } + /> +
+
+ + handleUpdate(index, { replacement: e.target.value }) + } + /> +
+
+ +
+
+ ); + })} +
+ +
+ +
+
+
+
+
+ ); +} + +export default ReplaceRulesField; diff --git a/web/src/components/config-form/theme/fields/index.ts b/web/src/components/config-form/theme/fields/index.ts new file mode 100644 index 000000000..b6b707866 --- /dev/null +++ b/web/src/components/config-form/theme/fields/index.ts @@ -0,0 +1,4 @@ +// Custom RJSF Fields +export { LayoutGridField } from "./LayoutGridField"; +export { DetectorHardwareField } from "./DetectorHardwareField"; +export { ReplaceRulesField } from "./ReplaceRulesField"; diff --git a/web/src/components/config-form/theme/fields/nullableUtils.ts b/web/src/components/config-form/theme/fields/nullableUtils.ts new file mode 100644 index 000000000..ed3939b88 --- /dev/null +++ b/web/src/components/config-form/theme/fields/nullableUtils.ts @@ -0,0 +1,60 @@ +// Utilities for handling anyOf with null patterns +import type { StrictRJSFSchema } from "@rjsf/utils"; + +/** + * Checks if a schema is anyOf/oneOf with exactly [Type, null]. + * This indicates a nullable field in Pydantic schemas. + */ +export function isNullableUnionSchema(schema: StrictRJSFSchema): boolean { + const union = schema.anyOf ?? schema.oneOf; + if (!union || !Array.isArray(union) || union.length !== 2) { + return false; + } + + let hasNull = false; + let nonNullCount = 0; + + for (const item of union) { + if (typeof item !== "object" || item === null) { + return false; + } + + const itemSchema = item as StrictRJSFSchema; + + if (itemSchema.type === "null") { + hasNull = true; + } else { + nonNullCount += 1; + } + } + + return hasNull && nonNullCount === 1; +} + +/** + * Backwards-compatible alias for nullable fields + */ +export function isSimpleNullableField(schema: StrictRJSFSchema): boolean { + return isNullableUnionSchema(schema); +} + +/** + * Get the non-null schema from an anyOf containing [Type, null] + */ +export function getNonNullSchema( + schema: StrictRJSFSchema, +): StrictRJSFSchema | null { + const union = schema.anyOf ?? schema.oneOf; + if (!union || !Array.isArray(union)) { + return null; + } + + return ( + (union.find( + (item) => + typeof item === "object" && + item !== null && + (item as StrictRJSFSchema).type !== "null", + ) as StrictRJSFSchema) || null + ); +} diff --git a/web/src/components/config-form/theme/frigateTheme.ts b/web/src/components/config-form/theme/frigateTheme.ts new file mode 100644 index 000000000..3baa2f3ad --- /dev/null +++ b/web/src/components/config-form/theme/frigateTheme.ts @@ -0,0 +1,95 @@ +// Custom RJSF Theme for Frigate +// Maps RJSF templates and widgets to shadcn/ui components + +import type { + WidgetProps, + FieldTemplateProps, + RegistryWidgetsType, + RegistryFieldsType, + TemplatesType, +} from "@rjsf/utils"; + +import { SwitchWidget } from "./widgets/SwitchWidget"; +import { SelectWidget } from "./widgets/SelectWidget"; +import { TextWidget } from "./widgets/TextWidget"; +import { PasswordWidget } from "./widgets/PasswordWidget"; +import { RangeWidget } from "./widgets/RangeWidget"; +import { TagsWidget } from "./widgets/TagsWidget"; +import { ColorWidget } from "./widgets/ColorWidget"; +import { TextareaWidget } from "./widgets/TextareaWidget"; +import { SwitchesWidget } from "./widgets/SwitchesWidget"; +import { ObjectLabelSwitchesWidget } from "./widgets/ObjectLabelSwitchesWidget"; +import { AudioLabelSwitchesWidget } from "./widgets/AudioLabelSwitchesWidget"; +import { ZoneSwitchesWidget } from "./widgets/ZoneSwitchesWidget"; +import { ArrayAsTextWidget } from "./widgets/ArrayAsTextWidget"; +import { FfmpegArgsWidget } from "./widgets/FfmpegArgsWidget"; +import { InputRolesWidget } from "./widgets/InputRolesWidget"; +import { TimezoneSelectWidget } from "./widgets/TimezoneSelectWidget"; +import { CameraPathWidget } from "./widgets/CameraPathWidget"; + +import { FieldTemplate } from "./templates/FieldTemplate"; +import { ObjectFieldTemplate } from "./templates/ObjectFieldTemplate"; +import { ArrayFieldTemplate } from "./templates/ArrayFieldTemplate"; +import { ArrayFieldItemTemplate } from "./templates/ArrayFieldItemTemplate"; +import { BaseInputTemplate } from "./templates/BaseInputTemplate"; +import { DescriptionFieldTemplate } from "./templates/DescriptionFieldTemplate"; +import { TitleFieldTemplate } from "./templates/TitleFieldTemplate"; +import { ErrorListTemplate } from "./templates/ErrorListTemplate"; +import { MultiSchemaFieldTemplate } from "./templates/MultiSchemaFieldTemplate"; +import { WrapIfAdditionalTemplate } from "./templates/WrapIfAdditionalTemplate"; + +import { LayoutGridField } from "./fields/LayoutGridField"; +import { DetectorHardwareField } from "./fields/DetectorHardwareField"; +import { ReplaceRulesField } from "./fields/ReplaceRulesField"; +import { CameraInputsField } from "./fields/CameraInputsField"; + +export interface FrigateTheme { + widgets: RegistryWidgetsType; + templates: Partial; + fields: RegistryFieldsType; +} + +export const frigateTheme: FrigateTheme = { + widgets: { + // Override default widgets with shadcn/ui styled versions + TextWidget: TextWidget, + PasswordWidget: PasswordWidget, + SelectWidget: SelectWidget, + CheckboxWidget: SwitchWidget, + ArrayAsTextWidget: ArrayAsTextWidget, + FfmpegArgsWidget: FfmpegArgsWidget, + CameraPathWidget: CameraPathWidget, + inputRoles: InputRolesWidget, + // Custom widgets + switch: SwitchWidget, + password: PasswordWidget, + select: SelectWidget, + range: RangeWidget, + tags: TagsWidget, + color: ColorWidget, + textarea: TextareaWidget, + switches: SwitchesWidget, + objectLabels: ObjectLabelSwitchesWidget, + audioLabels: AudioLabelSwitchesWidget, + zoneNames: ZoneSwitchesWidget, + timezoneSelect: TimezoneSelectWidget, + }, + templates: { + FieldTemplate: FieldTemplate as React.ComponentType, + ObjectFieldTemplate: ObjectFieldTemplate, + ArrayFieldTemplate: ArrayFieldTemplate, + ArrayFieldItemTemplate: ArrayFieldItemTemplate, + BaseInputTemplate: BaseInputTemplate as React.ComponentType, + DescriptionFieldTemplate: DescriptionFieldTemplate, + TitleFieldTemplate: TitleFieldTemplate, + ErrorListTemplate: ErrorListTemplate, + MultiSchemaFieldTemplate: MultiSchemaFieldTemplate, + WrapIfAdditionalTemplate: WrapIfAdditionalTemplate, + }, + fields: { + LayoutGridField: LayoutGridField, + DetectorHardwareField: DetectorHardwareField, + ReplaceRulesField: ReplaceRulesField, + CameraInputsField: CameraInputsField, + }, +}; diff --git a/web/src/components/config-form/theme/index.ts b/web/src/components/config-form/theme/index.ts new file mode 100644 index 000000000..fbc4123c3 --- /dev/null +++ b/web/src/components/config-form/theme/index.ts @@ -0,0 +1,5 @@ +// RJSF Custom Theme +// Maps RJSF components to existing shadcn/ui components + +export { frigateTheme } from "./frigateTheme"; +export type { FrigateTheme } from "./frigateTheme"; diff --git a/web/src/components/config-form/theme/templates/ArrayFieldItemTemplate.tsx b/web/src/components/config-form/theme/templates/ArrayFieldItemTemplate.tsx new file mode 100644 index 000000000..ab2f3b272 --- /dev/null +++ b/web/src/components/config-form/theme/templates/ArrayFieldItemTemplate.tsx @@ -0,0 +1,58 @@ +import type { + ArrayFieldItemTemplateProps, + FormContextType, + RJSFSchema, + StrictRJSFSchema, +} from "@rjsf/utils"; +import { getTemplate, getUiOptions } from "@rjsf/utils"; + +/** + * Custom ArrayFieldItemTemplate to ensure array item content uses full width + * while keeping action buttons aligned to the right. + */ +export function ArrayFieldItemTemplate< + T = unknown, + S extends StrictRJSFSchema = RJSFSchema, + F extends FormContextType = FormContextType, +>(props: ArrayFieldItemTemplateProps) { + const { + children, + buttonsProps, + displayLabel, + hasDescription, + hasToolbar, + uiSchema, + registry, + } = props; + + const uiOptions = getUiOptions(uiSchema); + const ArrayFieldItemButtonsTemplate = getTemplate< + "ArrayFieldItemButtonsTemplate", + T, + S, + F + >("ArrayFieldItemButtonsTemplate", registry, uiOptions); + + const margin = hasDescription ? -6 : 22; + + return ( +
+
+
{children}
+ {hasToolbar && ( +
+ +
+ )} +
+
+ ); +} + +export default ArrayFieldItemTemplate; diff --git a/web/src/components/config-form/theme/templates/ArrayFieldTemplate.tsx b/web/src/components/config-form/theme/templates/ArrayFieldTemplate.tsx new file mode 100644 index 000000000..42b0a0585 --- /dev/null +++ b/web/src/components/config-form/theme/templates/ArrayFieldTemplate.tsx @@ -0,0 +1,60 @@ +// Array Field Template - renders array fields with add/remove controls +import type { ArrayFieldTemplateProps } from "@rjsf/utils"; +import { Button } from "@/components/ui/button"; +import { LuPlus } from "react-icons/lu"; +import { useTranslation } from "react-i18next"; +import { cn } from "@/lib/utils"; + +export function ArrayFieldTemplate(props: ArrayFieldTemplateProps) { + const { items, canAdd, onAddClick, disabled, readonly, schema } = props; + + const { t } = useTranslation(["common"]); + + // Simple items (strings, numbers) render inline + const isSimpleType = + schema.items && + typeof schema.items === "object" && + "type" in schema.items && + ["string", "number", "integer", "boolean"].includes( + schema.items.type as string, + ); + + return ( +
+ {items.length === 0 && !canAdd && ( +

+ {t("no_items", { ns: "common", defaultValue: "No items" })} +

+ )} + + {items.map((element, index) => { + // RJSF items are pre-rendered React elements, render them directly + return ( +
+ {element} +
+ ); + })} + + {canAdd && ( + + )} +
+ ); +} diff --git a/web/src/components/config-form/theme/templates/BaseInputTemplate.tsx b/web/src/components/config-form/theme/templates/BaseInputTemplate.tsx new file mode 100644 index 000000000..f1636fa6b --- /dev/null +++ b/web/src/components/config-form/theme/templates/BaseInputTemplate.tsx @@ -0,0 +1,48 @@ +// Base Input Template - default input wrapper +import type { WidgetProps } from "@rjsf/utils"; +import { Input } from "@/components/ui/input"; +import { getSizedFieldClassName } from "../utils"; + +export function BaseInputTemplate(props: WidgetProps) { + const { + id, + type, + value, + disabled, + readonly, + onChange, + onBlur, + onFocus, + placeholder, + schema, + options, + } = props; + + const inputType = type || "text"; + const fieldClassName = getSizedFieldClassName(options, "xs"); + + const handleChange = (e: React.ChangeEvent) => { + const val = e.target.value; + if (inputType === "number") { + const num = parseFloat(val); + onChange(val === "" ? undefined : isNaN(num) ? undefined : num); + } else { + onChange(val === "" ? undefined : val); + } + }; + + return ( + onBlur(id, e.target.value)} + onFocus={(e) => onFocus(id, e.target.value)} + aria-label={schema.title} + /> + ); +} diff --git a/web/src/components/config-form/theme/templates/DescriptionFieldTemplate.tsx b/web/src/components/config-form/theme/templates/DescriptionFieldTemplate.tsx new file mode 100644 index 000000000..a57c90645 --- /dev/null +++ b/web/src/components/config-form/theme/templates/DescriptionFieldTemplate.tsx @@ -0,0 +1,37 @@ +// Description Field Template +import type { DescriptionFieldProps } from "@rjsf/utils"; +import { useTranslation } from "react-i18next"; +import { ConfigFormContext } from "@/types/configForm"; + +export function DescriptionFieldTemplate(props: DescriptionFieldProps) { + const { description, id } = props; + const formContext = ( + props as { registry?: { formContext?: ConfigFormContext } } + ).registry?.formContext; + + const isCameraLevel = formContext?.level === "camera"; + const sectionI18nPrefix = formContext?.sectionI18nPrefix; + const effectiveNamespace = isCameraLevel ? "config/cameras" : "config/global"; + + const { t, i18n } = useTranslation([effectiveNamespace, "common"]); + + let resolvedDescription = description; + + // Support nested keys for both camera-level and consolidated global namespace + if (sectionI18nPrefix && effectiveNamespace) { + const descriptionKey = `${sectionI18nPrefix}.description`; + if (i18n.exists(descriptionKey, { ns: effectiveNamespace })) { + resolvedDescription = t(descriptionKey, { ns: effectiveNamespace }); + } + } + + if (!resolvedDescription) { + return null; + } + + return ( + + {resolvedDescription} + + ); +} diff --git a/web/src/components/config-form/theme/templates/ErrorListTemplate.tsx b/web/src/components/config-form/theme/templates/ErrorListTemplate.tsx new file mode 100644 index 000000000..b70775157 --- /dev/null +++ b/web/src/components/config-form/theme/templates/ErrorListTemplate.tsx @@ -0,0 +1,193 @@ +// Error List Template - displays form-level errors +import type { + ErrorListProps, + RJSFSchema, + RJSFValidationError, +} from "@rjsf/utils"; +import { Alert, AlertDescription, AlertTitle } from "@/components/ui/alert"; +import { LuCircleAlert } from "react-icons/lu"; +import { useTranslation } from "react-i18next"; +import { buildTranslationPath, humanizeKey } from "../utils"; +import type { ConfigFormContext } from "@/types/configForm"; + +type ErrorSchemaNode = RJSFSchema & { + properties?: Record; + items?: RJSFSchema | RJSFSchema[]; + additionalProperties?: boolean | RJSFSchema; + title?: string; +}; + +const parsePropertyPath = (property: string): Array => { + const normalizedProperty = property.replace(/^\./, "").trim(); + if (!normalizedProperty) { + return []; + } + + return normalizedProperty + .split(".") + .filter(Boolean) + .map((segment) => { + const maybeIndex = Number(segment); + return Number.isInteger(maybeIndex) ? maybeIndex : segment; + }); +}; + +const resolveSchemaNodeForPath = ( + schema: RJSFSchema | undefined, + segments: Array, +): ErrorSchemaNode | undefined => { + if (!schema) { + return undefined; + } + + let currentSchema: ErrorSchemaNode | undefined = schema as ErrorSchemaNode; + + for (const segment of segments) { + if (!currentSchema) { + return undefined; + } + + if (typeof segment === "number") { + const items = currentSchema.items; + if (Array.isArray(items)) { + currentSchema = items[0] as ErrorSchemaNode | undefined; + } else { + currentSchema = items as ErrorSchemaNode | undefined; + } + continue; + } + + const nextFromProperties = currentSchema.properties?.[segment]; + if (nextFromProperties) { + currentSchema = nextFromProperties as ErrorSchemaNode; + continue; + } + + const additionalProperties = currentSchema.additionalProperties; + if ( + additionalProperties && + typeof additionalProperties === "object" && + !Array.isArray(additionalProperties) + ) { + currentSchema = additionalProperties as ErrorSchemaNode; + continue; + } + + return undefined; + } + + return currentSchema; +}; + +const resolveErrorFieldLabel = ({ + error, + schema, + formContext, + t, + i18n, +}: { + error: RJSFValidationError; + schema: RJSFSchema | undefined; + formContext?: ConfigFormContext; + t: (key: string, options?: Record) => string; + i18n: ReturnType["i18n"]; +}): string | undefined => { + const segments = parsePropertyPath(error.property || ""); + if (segments.length === 0) { + return undefined; + } + + const stringSegments = segments.filter( + (segment): segment is string => typeof segment === "string", + ); + + const sectionI18nPrefix = formContext?.sectionI18nPrefix; + const effectiveNamespace = + formContext?.level === "camera" + ? "config/cameras" + : formContext?.i18nNamespace; + + const translationPath = buildTranslationPath( + stringSegments, + sectionI18nPrefix, + formContext, + ); + + if (effectiveNamespace && translationPath) { + const prefixedTranslationKey = + sectionI18nPrefix && !translationPath.startsWith(`${sectionI18nPrefix}.`) + ? `${sectionI18nPrefix}.${translationPath}.label` + : undefined; + const translationKey = `${translationPath}.label`; + + if ( + prefixedTranslationKey && + i18n.exists(prefixedTranslationKey, { ns: effectiveNamespace }) + ) { + return t(prefixedTranslationKey, { ns: effectiveNamespace }); + } + + if (i18n.exists(translationKey, { ns: effectiveNamespace })) { + return t(translationKey, { ns: effectiveNamespace }); + } + } + + const schemaNode = resolveSchemaNodeForPath(schema, segments); + if (schemaNode?.title && schemaNode.title.trim().length > 0) { + return schemaNode.title; + } + + const fallbackSegment = + [...stringSegments].reverse().find((segment) => segment.length > 0) || + (typeof segments[segments.length - 1] === "string" + ? (segments[segments.length - 1] as string) + : undefined); + + return fallbackSegment ? humanizeKey(fallbackSegment) : undefined; +}; + +export function ErrorListTemplate(props: ErrorListProps) { + const { errors, schema } = props; + const formContext = ( + props as { registry?: { formContext?: ConfigFormContext } } + ).registry?.formContext; + const { t, i18n } = useTranslation([ + formContext?.level === "camera" + ? "config/cameras" + : formContext?.i18nNamespace || "config/global", + "common", + ]); + + if (!errors || errors.length === 0) { + return null; + } + + return ( + + + {t("validation_errors", { ns: "common" })} + +
    + {errors.map((error: RJSFValidationError, index: number) => { + const fieldLabel = resolveErrorFieldLabel({ + error, + schema, + formContext, + t, + i18n, + }); + + return ( +
  • + {fieldLabel && ( + {fieldLabel}: + )} + {error.message} +
  • + ); + })} +
+
+
+ ); +} diff --git a/web/src/components/config-form/theme/templates/FieldTemplate.tsx b/web/src/components/config-form/theme/templates/FieldTemplate.tsx new file mode 100644 index 000000000..becf720df --- /dev/null +++ b/web/src/components/config-form/theme/templates/FieldTemplate.tsx @@ -0,0 +1,616 @@ +// Field Template - wraps each form field with label and description +import { FieldTemplateProps, StrictRJSFSchema, UiSchema } from "@rjsf/utils"; +import { + getTemplate, + getUiOptions, + ADDITIONAL_PROPERTY_FLAG, +} from "@rjsf/utils"; +import { ComponentType, ReactNode } from "react"; +import { isValidElement } from "react"; +import { Label } from "@/components/ui/label"; +import { cn } from "@/lib/utils"; +import { useTranslation } from "react-i18next"; +import { isNullableUnionSchema } from "../fields/nullableUtils"; +import { getTranslatedLabel } from "@/utils/i18n"; +import { ConfigFormContext } from "@/types/configForm"; +import { Link } from "react-router-dom"; +import { LuExternalLink } from "react-icons/lu"; +import { useDocDomain } from "@/hooks/use-doc-domain"; +import { requiresRestartForFieldPath } from "@/utils/configUtil"; +import RestartRequiredIndicator from "@/components/indicators/RestartRequiredIndicator"; +import { + buildTranslationPath, + getFilterObjectLabel, + hasOverrideAtPath, + humanizeKey, + normalizeFieldValue, +} from "../utils"; +import { normalizeOverridePath } from "../utils/overrides"; +import get from "lodash/get"; +import isEqual from "lodash/isEqual"; +import { SPLIT_ROW_CLASS_NAME } from "@/components/card/SettingsGroupCard"; + +function _isArrayItemInAdditionalProperty( + pathSegments: Array, +): boolean { + // // If we find a numeric index, this is an array item + for (let i = 0; i < pathSegments.length; i++) { + const segment = pathSegments[i]; + if (typeof segment === "number") { + // Consider any array item as being inside additional properties if it's not at the root level + return i > 0; + } + } + return false; +} + +type FieldRenderSpec = + | ReactNode + | ComponentType + | { + render: string; + props?: Record; + }; + +export function FieldTemplate(props: FieldTemplateProps) { + const { + id, + label, + children, + classNames, + style, + errors, + help, + description, + hidden, + required, + displayLabel, + schema, + uiSchema, + registry, + fieldPathId, + onKeyRename, + onKeyRenameBlur, + onRemoveProperty, + rawDescription, + rawErrors, + formData: fieldFormData, + disabled, + readonly, + } = props; + + // Get i18n namespace from form context (passed through registry) + const formContext = registry?.formContext as ConfigFormContext | undefined; + const i18nNamespace = formContext?.i18nNamespace as string | undefined; + const sectionI18nPrefix = formContext?.sectionI18nPrefix as + | string + | undefined; + const isCameraLevel = formContext?.level === "camera"; + const effectiveNamespace = isCameraLevel ? "config/cameras" : i18nNamespace; + const { t, i18n } = useTranslation([ + effectiveNamespace || i18nNamespace || "common", + i18nNamespace || "common", + "views/settings", + ]); + const { getLocaleDocUrl } = useDocDomain(); + + if (hidden) { + return
{children}
; + } + + // Get UI options + const uiOptionsFromSchema = uiSchema?.["ui:options"] || {}; + + const suppressDescription = uiOptionsFromSchema.suppressDescription === true; + const showArrayItemDescription = + uiOptionsFromSchema.showArrayItemDescription === true; + + // Determine field characteristics + const isBoolean = + schema.type === "boolean" || + (Array.isArray(schema.type) && schema.type.includes("boolean")); + const isObjectField = + schema.type === "object" || + (Array.isArray(schema.type) && schema.type.includes("object")); + const isNullableUnion = isNullableUnionSchema(schema as StrictRJSFSchema); + const isAdditionalProperty = ADDITIONAL_PROPERTY_FLAG in schema; + const suppressMultiSchema = + (uiSchema?.["ui:options"] as UiSchema["ui:options"] | undefined) + ?.suppressMultiSchema === true; + const schemaTypes = Array.isArray(schema.type) + ? schema.type + : schema.type + ? [schema.type] + : []; + const nonNullSchemaTypes = schemaTypes.filter((type) => type !== "null"); + const isScalarValueField = + nonNullSchemaTypes.length === 1 && + ["string", "number", "integer"].includes(nonNullSchemaTypes[0]); + + // Only suppress labels/descriptions if this is a multi-schema field (anyOf/oneOf) with suppressMultiSchema flag + // This prevents duplicate labels while still showing the inner field's label + const isMultiSchemaWrapper = + (schema.anyOf || schema.oneOf) && (suppressMultiSchema || isNullableUnion); + const useSplitBooleanLayout = + uiOptionsFromSchema.splitLayout !== false && + isBoolean && + !isMultiSchemaWrapper && + !isObjectField && + !isAdditionalProperty; + const forceSplitLayout = uiOptionsFromSchema.forceSplitLayout === true; + const useSplitLayout = + uiOptionsFromSchema.splitLayout !== false && + (isScalarValueField || forceSplitLayout) && + !isBoolean && + !isMultiSchemaWrapper && + !isObjectField && + !isAdditionalProperty; + + // Get translation path for this field + const pathSegments = fieldPathId.path.filter( + (segment): segment is string => typeof segment === "string", + ); + + // Check if this is an array item inside an object with additionalProperties + const isArrayItemInAdditionalProp = _isArrayItemInAdditionalProperty( + fieldPathId.path, + ); + + // Conditions for showing descriptions/docs links + const shouldShowDescription = + !isMultiSchemaWrapper && + !isObjectField && + !isAdditionalProperty && + (!isArrayItemInAdditionalProp || showArrayItemDescription) && + !suppressDescription; + + const translationPath = buildTranslationPath( + pathSegments, + sectionI18nPrefix, + formContext, + ); + const fieldPath = fieldPathId.path; + const overrides = formContext?.overrides; + const baselineFormData = formContext?.baselineFormData; + const normalizedFieldPath = normalizeOverridePath( + fieldPath, + formContext?.formData, + ); + let baselineValue = baselineFormData + ? get(baselineFormData, normalizedFieldPath) + : undefined; + if (baselineValue === undefined || baselineValue === null) { + if (schema.default !== undefined && schema.default !== null) { + baselineValue = schema.default; + } + } + const isBaselineModified = + baselineFormData !== undefined && + !isEqual( + normalizeFieldValue(fieldFormData), + normalizeFieldValue(baselineValue), + ); + const isModified = baselineFormData + ? isBaselineModified + : hasOverrideAtPath(overrides, fieldPath, formContext?.formData); + const filterObjectLabel = getFilterObjectLabel(pathSegments); + const translatedFilterObjectLabel = filterObjectLabel + ? getTranslatedLabel(filterObjectLabel, "object") + : undefined; + const fieldDocsKey = translationPath || pathSegments.join("."); + const fieldDocsPath = fieldDocsKey + ? formContext?.fieldDocs?.[fieldDocsKey] + : undefined; + const fieldDocsUrl = fieldDocsPath + ? getLocaleDocUrl(fieldDocsPath) + : undefined; + const restartRequired = formContext?.restartRequired; + const defaultRequiresRestart = formContext?.requiresRestart ?? true; + const fieldRequiresRestart = requiresRestartForFieldPath( + normalizedFieldPath, + restartRequired, + defaultRequiresRestart, + ); + + // Use schema title/description as primary source (from JSON Schema) + const schemaTitle = schema.title; + const schemaDescription = schema.description; + + // Try to get translated label, falling back to schema title, then RJSF label + let finalLabel = label; + if (effectiveNamespace && translationPath) { + // Prefer camera-scoped translations when a section prefix is provided + const prefixedTranslationKey = + sectionI18nPrefix && !translationPath.startsWith(`${sectionI18nPrefix}.`) + ? `${sectionI18nPrefix}.${translationPath}.label` + : undefined; + const translationKey = `${translationPath}.label`; + + if ( + prefixedTranslationKey && + i18n.exists(prefixedTranslationKey, { ns: effectiveNamespace }) + ) { + finalLabel = t(prefixedTranslationKey, { ns: effectiveNamespace }); + } else if (i18n.exists(translationKey, { ns: effectiveNamespace })) { + finalLabel = t(translationKey, { ns: effectiveNamespace }); + } else if (schemaTitle) { + finalLabel = schemaTitle; + } else if (translatedFilterObjectLabel) { + const filtersIndex = pathSegments.indexOf("filters"); + const isFilterObjectField = + filtersIndex > -1 && pathSegments.length === filtersIndex + 2; + + if (isFilterObjectField) { + finalLabel = translatedFilterObjectLabel; + } else { + // Try to get translated field label, fall back to humanized + const fieldName = pathSegments[pathSegments.length - 1] || ""; + let fieldLabel = schemaTitle; + if (!fieldLabel) { + const fieldTranslationKey = `${fieldName}.label`; + const prefixedFieldTranslationKey = + sectionI18nPrefix && + !fieldTranslationKey.startsWith(`${sectionI18nPrefix}.`) + ? `${sectionI18nPrefix}.${fieldTranslationKey}` + : undefined; + + if ( + prefixedFieldTranslationKey && + effectiveNamespace && + i18n.exists(prefixedFieldTranslationKey, { ns: effectiveNamespace }) + ) { + fieldLabel = t(prefixedFieldTranslationKey, { + ns: effectiveNamespace, + }); + } else if ( + effectiveNamespace && + i18n.exists(fieldTranslationKey, { ns: effectiveNamespace }) + ) { + fieldLabel = t(fieldTranslationKey, { ns: effectiveNamespace }); + } else { + fieldLabel = humanizeKey(fieldName); + } + } + if (fieldLabel) { + finalLabel = t("configForm.filters.objectFieldLabel", { + ns: "views/settings", + field: fieldLabel, + label: translatedFilterObjectLabel, + }); + } + } + } + } else if (schemaTitle) { + finalLabel = schemaTitle; + } else if (translatedFilterObjectLabel) { + const filtersIndex = pathSegments.indexOf("filters"); + const isFilterObjectField = + filtersIndex > -1 && pathSegments.length === filtersIndex + 2; + if (isFilterObjectField) { + finalLabel = translatedFilterObjectLabel; + } else { + // Try to get translated field label, fall back to humanized + const fieldName = pathSegments[pathSegments.length - 1] || ""; + let fieldLabel = schemaTitle; + if (!fieldLabel) { + const fieldTranslationKey = `${fieldName}.label`; + const prefixedFieldTranslationKey = + sectionI18nPrefix && + !fieldTranslationKey.startsWith(`${sectionI18nPrefix}.`) + ? `${sectionI18nPrefix}.${fieldTranslationKey}` + : undefined; + + if ( + prefixedFieldTranslationKey && + effectiveNamespace && + i18n.exists(prefixedFieldTranslationKey, { ns: effectiveNamespace }) + ) { + fieldLabel = t(prefixedFieldTranslationKey, { + ns: effectiveNamespace, + }); + } else if ( + effectiveNamespace && + i18n.exists(fieldTranslationKey, { ns: effectiveNamespace }) + ) { + fieldLabel = t(fieldTranslationKey, { ns: effectiveNamespace }); + } else { + fieldLabel = humanizeKey(fieldName); + } + } + if (fieldLabel) { + finalLabel = t("configForm.filters.objectFieldLabel", { + ns: "views/settings", + field: fieldLabel, + label: translatedFilterObjectLabel, + }); + } + } + } + + // Try to get translated description, falling back to schema description + let finalDescription = description || ""; + if (effectiveNamespace && translationPath) { + const prefixedDescriptionKey = + sectionI18nPrefix && !translationPath.startsWith(`${sectionI18nPrefix}.`) + ? `${sectionI18nPrefix}.${translationPath}.description` + : undefined; + const descriptionKey = `${translationPath}.description`; + if ( + prefixedDescriptionKey && + i18n.exists(prefixedDescriptionKey, { ns: effectiveNamespace }) + ) { + finalDescription = t(prefixedDescriptionKey, { ns: effectiveNamespace }); + } else if (i18n.exists(descriptionKey, { ns: effectiveNamespace })) { + finalDescription = t(descriptionKey, { ns: effectiveNamespace }); + } else if (schemaDescription) { + finalDescription = schemaDescription; + } + } else if (schemaDescription) { + finalDescription = schemaDescription; + } + + const uiOptions = getUiOptions(uiSchema); + const beforeSpec = uiSchema?.["ui:before"] as FieldRenderSpec | undefined; + const afterSpec = uiSchema?.["ui:after"] as FieldRenderSpec | undefined; + + const renderCustom = (spec: FieldRenderSpec | undefined) => { + if (spec === undefined || spec === null) { + return null; + } + + if (isValidElement(spec) || typeof spec === "string") { + return spec; + } + + if (typeof spec === "number") { + return {spec}; + } + + if (typeof spec === "function") { + const SpecComponent = spec as ComponentType; + return ; + } + + if (typeof spec === "object" && "render" in spec) { + const renderKey = spec.render; + const renderers = formContext?.renderers; + const RenderComponent = renderers?.[renderKey]; + if (RenderComponent) { + return ( + + ); + } + } + + return null; + }; + + const beforeContent = renderCustom(beforeSpec); + const afterContent = renderCustom(afterSpec); + const WrapIfAdditionalTemplate = getTemplate( + "WrapIfAdditionalTemplate", + registry, + uiOptions, + ); + + const shouldRenderStandardLabel = + displayLabel && + finalLabel && + !isBoolean && + !useSplitLayout && + !isMultiSchemaWrapper && + !isObjectField && + !isAdditionalProperty; + + const shouldRenderSplitLabel = + displayLabel && + finalLabel && + !isMultiSchemaWrapper && + !isObjectField && + !isAdditionalProperty; + + const shouldRenderBooleanLabel = displayLabel && finalLabel; + + const renderDocsLink = (className?: string) => { + if (!fieldDocsUrl || !shouldShowDescription) { + return null; + } + + return ( +
+ + {t("readTheDocumentation", { ns: "common" })} + + +
+ ); + }; + + const renderDescription = (className?: string) => { + if (!finalDescription || !shouldShowDescription) { + return null; + } + + return ( +

+ {finalDescription} +

+ ); + }; + + const renderStandardLabel = () => { + if (!shouldRenderStandardLabel) { + return null; + } + + return ( + + ); + }; + + const renderBooleanLabel = () => { + if (!shouldRenderBooleanLabel) { + return null; + } + + return ( + + ); + }; + + const renderSplitLabel = () => { + if (!shouldRenderSplitLabel) { + return null; + } + + return ( + + ); + }; + + const renderBooleanSplitLayout = () => ( + <> +
+
+ {renderBooleanLabel()} +
{children}
+
+ {renderDescription()} + {renderDocsLink()} +
+ +
+
+ {renderBooleanLabel()} + {renderDescription()} + {renderDocsLink()} +
+
+
{children}
+
+
+ + ); + + const renderBooleanInlineLayout = () => ( +
+
+ {renderBooleanLabel()} + {renderDescription()} + {renderDocsLink()} +
+
{children}
+
+ ); + + const renderSplitValueLayout = () => ( +
+
+ {renderSplitLabel()} + {renderDescription("hidden md:block")} + {renderDocsLink("hidden md:flex")} +
+ +
+ {children} + {renderDescription("md:hidden")} + {renderDocsLink("md:hidden")} +
+
+ ); + + const renderDefaultValueLayout = () => ( + <> + {children} + {renderDescription()} + {renderDocsLink()} + + ); + + const renderFieldLayout = () => { + if (isBoolean) { + return useSplitBooleanLayout + ? renderBooleanSplitLayout() + : renderBooleanInlineLayout(); + } + + if (useSplitLayout) { + return renderSplitValueLayout(); + } + + return renderDefaultValueLayout(); + }; + + return ( + +
+ {beforeContent} +
+ {renderStandardLabel()} + {renderFieldLayout()} + + {errors} + {help} +
+ {afterContent} +
+
+ ); +} diff --git a/web/src/components/config-form/theme/templates/MultiSchemaFieldTemplate.tsx b/web/src/components/config-form/theme/templates/MultiSchemaFieldTemplate.tsx new file mode 100644 index 000000000..d845e2c61 --- /dev/null +++ b/web/src/components/config-form/theme/templates/MultiSchemaFieldTemplate.tsx @@ -0,0 +1,45 @@ +// Custom MultiSchemaFieldTemplate to handle anyOf [Type, null] fields +// Renders simple nullable types as single inputs instead of dropdowns + +import { + MultiSchemaFieldTemplateProps, + StrictRJSFSchema, + FormContextType, + UiSchema, +} from "@rjsf/utils"; +import { isNullableUnionSchema } from "../fields/nullableUtils"; + +/** + * Custom MultiSchemaFieldTemplate that: + * 1. Renders simple anyOf [Type, null] fields as single inputs + * 2. Falls back to default behavior for complex types + */ +export function MultiSchemaFieldTemplate< + // eslint-disable-next-line @typescript-eslint/no-explicit-any + T = any, + S extends StrictRJSFSchema = StrictRJSFSchema, + // eslint-disable-next-line @typescript-eslint/no-explicit-any + F extends FormContextType = any, +>(props: MultiSchemaFieldTemplateProps): JSX.Element { + const { schema, selector, optionSchemaField, uiSchema } = props; + + const uiOptions = uiSchema?.["ui:options"] as + | UiSchema["ui:options"] + | undefined; + const suppressMultiSchema = uiOptions?.suppressMultiSchema === true; + + // Check if this is a simple nullable field that should be handled specially + if (isNullableUnionSchema(schema) || suppressMultiSchema) { + // For simple nullable fields, just render the field directly without the dropdown selector + // This handles the case where empty input = null + return <>{optionSchemaField}; + } + + // For all other cases, render with both selector and field (default MultiSchemaField behavior) + return ( + <> + {selector} + {optionSchemaField} + + ); +} diff --git a/web/src/components/config-form/theme/templates/ObjectFieldTemplate.tsx b/web/src/components/config-form/theme/templates/ObjectFieldTemplate.tsx new file mode 100644 index 000000000..808557d46 --- /dev/null +++ b/web/src/components/config-form/theme/templates/ObjectFieldTemplate.tsx @@ -0,0 +1,503 @@ +// Object Field Template - renders nested object fields with i18n support +import type { ObjectFieldTemplateProps } from "@rjsf/utils"; +import { Card, CardContent, CardHeader, CardTitle } from "@/components/ui/card"; +import { + Collapsible, + CollapsibleContent, + CollapsibleTrigger, +} from "@/components/ui/collapsible"; +import { Children, useState, useEffect, useRef } from "react"; +import type { ReactNode } from "react"; +import RestartRequiredIndicator from "@/components/indicators/RestartRequiredIndicator"; +import { LuChevronDown, LuChevronRight } from "react-icons/lu"; +import { useTranslation } from "react-i18next"; +import { cn } from "@/lib/utils"; +import { getTranslatedLabel } from "@/utils/i18n"; +import { requiresRestartForFieldPath } from "@/utils/configUtil"; +import { ConfigFormContext } from "@/types/configForm"; +import { + buildTranslationPath, + getDomainFromNamespace, + getFilterObjectLabel, + humanizeKey, + isSubtreeModified, +} from "../utils"; +import get from "lodash/get"; +import { AddPropertyButton, AdvancedCollapsible } from "../components"; + +export function ObjectFieldTemplate(props: ObjectFieldTemplateProps) { + const { + title, + description, + properties, + uiSchema, + registry, + schema, + onAddProperty, + formData, + disabled, + readonly, + } = props; + const formContext = registry?.formContext as ConfigFormContext | undefined; + + // Check if this is a root-level object + const isRoot = registry?.rootSchema === schema; + const overrides = formContext?.overrides; + const baselineFormData = formContext?.baselineFormData; + const hiddenFields = formContext?.hiddenFields; + const fieldPath = props.fieldPathId.path; + const restartRequired = formContext?.restartRequired; + const defaultRequiresRestart = formContext?.requiresRestart ?? true; + + // Strip fields from an object that should be excluded from modification + // detection: fields listed in hiddenFields (stripped from baseline by + // sanitizeSectionData) and fields with ui:widget=hidden in uiSchema + // (managed by custom components, not the standard form). + const stripExcludedFields = ( + data: unknown, + path: Array, + ): unknown => { + if ( + !data || + typeof data !== "object" || + Array.isArray(data) || + data === null + ) { + return data; + } + const result = { ...(data as Record) }; + const pathStrings = path.map(String); + + // Strip hiddenFields that match the current path prefix + if (hiddenFields) { + for (const hidden of hiddenFields) { + const parts = hidden.split("."); + if ( + parts.length === pathStrings.length + 1 && + pathStrings.every((s, i) => s === parts[i]) + ) { + delete result[parts[parts.length - 1]]; + } + } + } + + // Strip ui:widget=hidden fields from uiSchema at this level + if (uiSchema) { + // Navigate to the uiSchema subtree matching the relative path + let subUiSchema = uiSchema; + const relativePath = path.slice(fieldPath.length); + for (const segment of relativePath) { + if ( + typeof segment === "string" && + subUiSchema && + typeof subUiSchema[segment] === "object" + ) { + subUiSchema = subUiSchema[segment] as typeof uiSchema; + } else { + subUiSchema = undefined as unknown as typeof uiSchema; + break; + } + } + if (subUiSchema && typeof subUiSchema === "object") { + for (const [key, propSchema] of Object.entries(subUiSchema)) { + if ( + !key.startsWith("ui:") && + typeof propSchema === "object" && + propSchema !== null && + (propSchema as Record)["ui:widget"] === "hidden" + ) { + delete result[key]; + } + } + } + } + + return result; + }; + + // Use props.formData (always up-to-date from RJSF) rather than + // formContext.formData which can be stale in parent templates. + const checkSubtreeModified = (path: Array): boolean => { + // Compute relative path from this object's fieldPath to get the + // value from props.formData (which represents this object's data) + const relativePath = path.slice(fieldPath.length); + let currentValue = + relativePath.length > 0 ? get(formData, relativePath) : formData; + + // Strip hidden/excluded fields from the RJSF data before comparing + // against the baseline (which already has these stripped) + currentValue = stripExcludedFields(currentValue, path); + + let baselineValue = + path.length > 0 ? get(baselineFormData, path) : baselineFormData; + // Also strip hidden/excluded fields from the baseline so that fields + // managed by custom components (e.g. required_zones with ui:widget=hidden) + // don't cause false modification detection. + baselineValue = stripExcludedFields(baselineValue, path); + + return isSubtreeModified( + currentValue, + baselineValue, + overrides, + path, + formContext?.formData, + ); + }; + + const hasModifiedDescendants = checkSubtreeModified(fieldPath); + const [isOpen, setIsOpen] = useState(hasModifiedDescendants); + const resetKey = `${formContext?.level ?? "global"}::${ + formContext?.cameraName ?? "global" + }`; + const lastResetKeyRef = useRef(null); + + // Auto-expand collapsible when modifications are detected + useEffect(() => { + if (hasModifiedDescendants) { + setIsOpen(true); + } + }, [hasModifiedDescendants]); + + const isCameraLevel = formContext?.level === "camera"; + const effectiveNamespace = isCameraLevel ? "config/cameras" : "config/global"; + const sectionI18nPrefix = formContext?.sectionI18nPrefix; + + const { t, i18n } = useTranslation([ + effectiveNamespace, + "config/groups", + "views/settings", + "common", + ]); + const objectRequiresRestart = requiresRestartForFieldPath( + fieldPath, + restartRequired, + defaultRequiresRestart, + ); + + const domain = getDomainFromNamespace(formContext?.i18nNamespace); + + const groupDefinitions = + (uiSchema?.["ui:groups"] as Record | undefined) || {}; + const disableNestedCard = + uiSchema?.["ui:options"]?.disableNestedCard === true; + + const isHiddenProp = (prop: (typeof properties)[number]) => + prop.content.props.uiSchema?.["ui:widget"] === "hidden"; + + const visibleProps = properties.filter((prop) => !isHiddenProp(prop)); + + // Check for advanced section grouping + const advancedProps = visibleProps.filter( + (p) => p.content.props.uiSchema?.["ui:options"]?.advanced === true, + ); + const regularProps = visibleProps.filter( + (p) => p.content.props.uiSchema?.["ui:options"]?.advanced !== true, + ); + const hasModifiedAdvanced = advancedProps.some((prop) => + checkSubtreeModified([...fieldPath, prop.name]), + ); + const [showAdvanced, setShowAdvanced] = useState(hasModifiedAdvanced); + + // Auto-expand advanced section when modifications are detected + useEffect(() => { + if (hasModifiedAdvanced) { + setShowAdvanced(true); + } + }, [hasModifiedAdvanced]); + + useEffect(() => { + if (lastResetKeyRef.current !== resetKey) { + lastResetKeyRef.current = resetKey; + setIsOpen(hasModifiedDescendants); + setShowAdvanced(hasModifiedAdvanced); + } + }, [resetKey, hasModifiedDescendants, hasModifiedAdvanced]); + const { children } = props as ObjectFieldTemplateProps & { + children?: ReactNode; + }; + const hasCustomChildren = Children.count(children) > 0; + + // Get the full translation path from the field path + const fieldPathId = ( + props as { fieldPathId?: { path?: (string | number)[] } } + ).fieldPathId; + let propertyName: string | undefined; + let translationPath: string | undefined; + const path = fieldPathId?.path; + const filterObjectLabel = path ? getFilterObjectLabel(path) : undefined; + const translatedFilterLabel = filterObjectLabel + ? getTranslatedLabel(filterObjectLabel, "object") + : undefined; + if (path) { + translationPath = buildTranslationPath( + path, + sectionI18nPrefix, + formContext, + ); + // Also get the last property name for fallback label generation + for (let i = path.length - 1; i >= 0; i -= 1) { + const segment = path[i]; + if (typeof segment === "string") { + propertyName = segment; + break; + } + } + } + + // Try i18n translation, fall back to schema or original values + const i18nNs = effectiveNamespace; + + let inferredLabel: string | undefined; + if (i18nNs && translationPath) { + const prefixedLabelKey = + sectionI18nPrefix && !translationPath.startsWith(`${sectionI18nPrefix}.`) + ? `${sectionI18nPrefix}.${translationPath}.label` + : undefined; + const labelKey = `${translationPath}.label`; + if (prefixedLabelKey && i18n.exists(prefixedLabelKey, { ns: i18nNs })) { + inferredLabel = t(prefixedLabelKey, { ns: i18nNs }); + } else if (i18n.exists(labelKey, { ns: i18nNs })) { + inferredLabel = t(labelKey, { ns: i18nNs }); + } + } + if (!inferredLabel && translatedFilterLabel) { + inferredLabel = translatedFilterLabel; + } + const schemaTitle = schema?.title; + const fallbackLabel = + title || + schemaTitle || + (propertyName ? humanizeKey(propertyName) : undefined); + inferredLabel = inferredLabel ?? fallbackLabel; + + let inferredDescription: string | undefined; + if (i18nNs && translationPath) { + const prefixedDescriptionKey = + sectionI18nPrefix && !translationPath.startsWith(`${sectionI18nPrefix}.`) + ? `${sectionI18nPrefix}.${translationPath}.description` + : undefined; + const descriptionKey = `${translationPath}.description`; + if ( + prefixedDescriptionKey && + i18n.exists(prefixedDescriptionKey, { ns: i18nNs }) + ) { + inferredDescription = t(prefixedDescriptionKey, { ns: i18nNs }); + } else if (i18n.exists(descriptionKey, { ns: i18nNs })) { + inferredDescription = t(descriptionKey, { ns: i18nNs }); + } + } + const schemaDescription = schema?.description; + const fallbackDescription = + (typeof description === "string" ? description : undefined) || + schemaDescription; + inferredDescription = inferredDescription ?? fallbackDescription; + + const renderGroupedFields = (items: (typeof properties)[number][]) => { + if (!items.length) { + return null; + } + + const grouped = new Set(); + const groups = Object.entries(groupDefinitions) + .map(([groupKey, fields]) => { + const ordered = fields + .map((field) => items.find((item) => item.name === field)) + .filter(Boolean) as (typeof properties)[number][]; + + if (ordered.length === 0) { + return null; + } + + ordered.forEach((item) => grouped.add(item.name)); + + const label = domain + ? t(`${sectionI18nPrefix}.${domain}.${groupKey}`, { + ns: "config/groups", + defaultValue: humanizeKey(groupKey), + }) + : t(`groups.${groupKey}`, { + defaultValue: humanizeKey(groupKey), + }); + + return { + key: groupKey, + label, + items: ordered, + }; + }) + .filter(Boolean) as Array<{ + key: string; + label: string; + items: (typeof properties)[number][]; + }>; + + const ungrouped = items.filter((item) => !grouped.has(item.name)); + const isObjectLikeField = (item: (typeof properties)[number]) => { + const fieldSchema = item.content.props.schema as + | { type?: string | string[] } + | undefined; + return fieldSchema?.type === "object"; + }; + + return ( +
+ {groups.map((group) => ( +
+
+ {group.label} +
+
+ {group.items.map((element) => ( +
{element.content}
+ ))} +
+
+ ))} + + {ungrouped.length > 0 && ( +
0 && "pt-2")}> + {ungrouped.map((element) => ( +
0 && !isObjectLikeField(element) && "px-4", + )} + > + {element.content} +
+ ))} +
+ )} +
+ ); + }; + + // Root level renders children directly + if (isRoot) { + return ( +
+ {hasCustomChildren ? ( + children + ) : ( + <> + {renderGroupedFields(regularProps)} + + + + {renderGroupedFields(advancedProps)} + + + )} +
+ ); + } + + if (disableNestedCard) { + return ( +
+ {hasCustomChildren ? ( + children + ) : ( + <> + {renderGroupedFields(regularProps)} + + + + {renderGroupedFields(advancedProps)} + + + )} +
+ ); + } + + // Nested objects render as collapsible cards + return ( + + + + +
+
+ + {inferredLabel} + {objectRequiresRestart && ( + + )} + + {inferredDescription && ( +

+ {inferredDescription} +

+ )} +
+ {isOpen ? ( + + ) : ( + + )} +
+
+
+ + + {hasCustomChildren ? ( + children + ) : ( + <> + {renderGroupedFields(regularProps)} + + + + {renderGroupedFields(advancedProps)} + + + )} + + +
+
+ ); +} diff --git a/web/src/components/config-form/theme/templates/TitleFieldTemplate.tsx b/web/src/components/config-form/theme/templates/TitleFieldTemplate.tsx new file mode 100644 index 000000000..3c0ce59bc --- /dev/null +++ b/web/src/components/config-form/theme/templates/TitleFieldTemplate.tsx @@ -0,0 +1,17 @@ +// Title Field Template +import type { TitleFieldProps } from "@rjsf/utils"; + +export function TitleFieldTemplate(props: TitleFieldProps) { + const { title, id, required } = props; + + if (!title) { + return null; + } + + return ( +

+ {title} + {required && *} +

+ ); +} diff --git a/web/src/components/config-form/theme/templates/WrapIfAdditionalTemplate.tsx b/web/src/components/config-form/theme/templates/WrapIfAdditionalTemplate.tsx new file mode 100644 index 000000000..6e6a19bfd --- /dev/null +++ b/web/src/components/config-form/theme/templates/WrapIfAdditionalTemplate.tsx @@ -0,0 +1,123 @@ +import { + ADDITIONAL_PROPERTY_FLAG, + FormContextType, + getUiOptions, + RJSFSchema, + StrictRJSFSchema, + WrapIfAdditionalTemplateProps, +} from "@rjsf/utils"; +import { Input } from "@/components/ui/input"; +import { Button } from "@/components/ui/button"; +import { Label } from "@/components/ui/label"; +import { cn } from "@/lib/utils"; +import { useTranslation } from "react-i18next"; +import { LuTrash2 } from "react-icons/lu"; + +export function WrapIfAdditionalTemplate< + T = unknown, + S extends StrictRJSFSchema = RJSFSchema, + F extends FormContextType = FormContextType, +>(props: WrapIfAdditionalTemplateProps) { + const { + classNames, + style, + children, + disabled, + id, + label, + displayLabel, + onRemoveProperty, + onKeyRenameBlur, + readonly, + required, + schema, + uiSchema, + } = props; + + const { t } = useTranslation(["views/settings"]); + + const additional = ADDITIONAL_PROPERTY_FLAG in schema; + + if (!additional) { + return ( +
+ {children} +
+ ); + } + + const keyId = `${id}-key`; + const keyLabel = t("configForm.additionalProperties.keyLabel", { + ns: "views/settings", + }); + const valueLabel = t("configForm.additionalProperties.valueLabel", { + ns: "views/settings", + }); + const keyPlaceholder = t("configForm.additionalProperties.keyPlaceholder", { + ns: "views/settings", + }); + const removeLabel = t("configForm.additionalProperties.remove", { + ns: "views/settings", + }); + const uiOptions = getUiOptions(uiSchema); + const keyIsReadonly = uiOptions.additionalPropertyKeyReadonly === true; + + return ( +
+ {!keyIsReadonly && ( +
+ {displayLabel && } + {keyIsReadonly ? ( +
+ {label} +
+ ) : ( + + )} +
+ )} +
+ {!keyIsReadonly && displayLabel && ( + + )} +
{children}
+
+ {!keyIsReadonly && ( +
+ +
+ )} +
+ ); +} + +export default WrapIfAdditionalTemplate; diff --git a/web/src/components/config-form/theme/utils/fieldSizing.ts b/web/src/components/config-form/theme/utils/fieldSizing.ts new file mode 100644 index 000000000..813965e77 --- /dev/null +++ b/web/src/components/config-form/theme/utils/fieldSizing.ts @@ -0,0 +1,37 @@ +import { cn } from "@/lib/utils"; + +const FIELD_SIZE_CLASS_MAP = { + xs: "max-w-xs", + sm: "max-w-sm", + md: "max-w-md", + lg: "max-w-2xl", + full: "max-w-full", +} as const; + +export type FieldSizeOption = keyof typeof FIELD_SIZE_CLASS_MAP; + +type FieldSizingOptions = { + size?: FieldSizeOption; + maxWidthClassName?: string; + className?: string; +}; + +export function getSizedFieldClassName( + options: unknown, + defaultSize: FieldSizeOption = "lg", +) { + const sizingOptions = + typeof options === "object" && options !== null + ? (options as FieldSizingOptions) + : undefined; + + const sizeClass = + FIELD_SIZE_CLASS_MAP[sizingOptions?.size ?? defaultSize] ?? + FIELD_SIZE_CLASS_MAP[defaultSize]; + + return cn( + "w-full", + sizingOptions?.maxWidthClassName ?? sizeClass, + sizingOptions?.className, + ); +} diff --git a/web/src/components/config-form/theme/utils/i18n.ts b/web/src/components/config-form/theme/utils/i18n.ts new file mode 100644 index 000000000..5de8ba506 --- /dev/null +++ b/web/src/components/config-form/theme/utils/i18n.ts @@ -0,0 +1,182 @@ +/** + * Shared i18n utilities for config form templates and fields. + * + * These functions handle translation key path building and label normalization + * for RJSF form fields. + */ + +import type { ConfigFormContext } from "@/types/configForm"; + +const isRecord = (value: unknown): value is Record => + typeof value === "object" && value !== null; + +const resolveDetectorType = ( + detectorConfig: unknown, + detectorKey?: string, +): string | undefined => { + if (!detectorKey || !isRecord(detectorConfig)) { + return undefined; + } + + const entry = detectorConfig[detectorKey]; + if (!isRecord(entry)) { + return undefined; + } + + const typeValue = entry.type; + return typeof typeValue === "string" && typeValue.length > 0 + ? typeValue + : undefined; +}; + +const resolveDetectorTypeFromContext = ( + formContext: ConfigFormContext | undefined, + detectorKey?: string, +): string | undefined => { + const formData = formContext?.formData; + if (!detectorKey || !isRecord(formData)) { + return undefined; + } + + const detectorConfig = isRecord(formData.detectors) + ? formData.detectors + : formData; + + return resolveDetectorType(detectorConfig, detectorKey); +}; + +/** + * Build the i18n translation key path for nested fields using the field path + * provided by RJSF. This avoids ambiguity with underscores in field names and + * normalizes dynamic segments like filter object names or detector names. + * + * @param segments Array of path segments (strings and/or numbers) + * @param sectionI18nPrefix Optional section prefix for specialized sections + * @param formContext Optional form context for resolving detector types + * @returns Normalized translation key path as a dot-separated string + * + * @example + * buildTranslationPath(["filters", "person", "threshold"]) => "filters.threshold" + * buildTranslationPath(["detectors", "ov1", "type"]) => "detectors.openvino.type" + * buildTranslationPath(["ov1", "type"], "detectors") => "openvino.type" + */ +export function buildTranslationPath( + segments: Array, + sectionI18nPrefix?: string, + formContext?: ConfigFormContext, +): string { + // Filter out numeric indices to get string segments only + const stringSegments = segments.filter( + (segment): segment is string => typeof segment === "string", + ); + + // Handle filters section - skip the dynamic filter object name + // Example: filters.person.threshold -> filters.threshold + const filtersIndex = stringSegments.indexOf("filters"); + if (filtersIndex !== -1 && stringSegments.length > filtersIndex + 2) { + const normalized = [ + ...stringSegments.slice(0, filtersIndex + 1), + ...stringSegments.slice(filtersIndex + 2), + ]; + return normalized.join("."); + } + + // Handle detectors section - resolve the detector type when available + // Example: detectors.ov1.type -> detectors.openvino.type + const detectorsIndex = stringSegments.indexOf("detectors"); + if (detectorsIndex !== -1 && stringSegments.length > detectorsIndex + 2) { + const detectorKey = stringSegments[detectorsIndex + 1]; + const detectorType = resolveDetectorTypeFromContext( + formContext, + detectorKey, + ); + if (detectorType) { + const normalized = [ + ...stringSegments.slice(0, detectorsIndex + 1), + detectorType, + ...stringSegments.slice(detectorsIndex + 2), + ]; + return normalized.join("."); + } + + const normalized = [ + ...stringSegments.slice(0, detectorsIndex + 1), + ...stringSegments.slice(detectorsIndex + 2), + ]; + return normalized.join("."); + } + + // Handle specialized sections like detectors where the first segment is dynamic + // Example: (sectionI18nPrefix="detectors") "ov1.type" -> "openvino.type" + if (sectionI18nPrefix === "detectors" && stringSegments.length > 1) { + const detectorKey = stringSegments[0]; + const detectorType = resolveDetectorTypeFromContext( + formContext, + detectorKey, + ); + if (detectorType) { + return [detectorType, ...stringSegments.slice(1)].join("."); + } + + return stringSegments.slice(1).join("."); + } + + return stringSegments.join("."); +} + +/** + * Extract the filter object label from a path containing "filters" segment. + * Returns the segment immediately after "filters". + * + * @param pathSegments Array of path segments + * @returns The filter object label or undefined if not found + * + * @example + * getFilterObjectLabel(["filters", "person", "threshold"]) => "person" + * getFilterObjectLabel(["detect", "enabled"]) => undefined + */ +export function getFilterObjectLabel( + pathSegments: Array, +): string | undefined { + const filtersIndex = pathSegments.indexOf("filters"); + if (filtersIndex === -1 || pathSegments.length <= filtersIndex + 1) { + return undefined; + } + const objectLabel = pathSegments[filtersIndex + 1]; + return typeof objectLabel === "string" && objectLabel.length > 0 + ? objectLabel + : undefined; +} + +/** + * Convert snake_case string to Title Case with spaces. + * Useful for generating human-readable labels from schema property names. + * + * @param value The snake_case string to convert + * @returns Title Case string + * + * @example + * humanizeKey("detect_fps") => "Detect Fps" + * humanizeKey("min_initialized") => "Min Initialized" + */ +export function humanizeKey(value: string): string { + return value + .replace(/_/g, " ") + .replace(/\b\w/g, (char) => char.toUpperCase()); +} + +/** + * Extract domain name from an i18n namespace string. + * Handles config/* namespace format by stripping the prefix. + * + * @param ns The i18n namespace (e.g., "config/audio", "config/global") + * @returns The domain portion (e.g., "audio", "global") or empty string + * + * @example + * getDomainFromNamespace("config/audio") => "audio" + * getDomainFromNamespace("common") => "" + */ +export function getDomainFromNamespace(ns?: string): string { + if (!ns || !ns.startsWith("config/")) return ""; + return ns.replace("config/", ""); +} diff --git a/web/src/components/config-form/theme/utils/index.ts b/web/src/components/config-form/theme/utils/index.ts new file mode 100644 index 000000000..52e38bd43 --- /dev/null +++ b/web/src/components/config-form/theme/utils/index.ts @@ -0,0 +1,18 @@ +/** + * Config form theme utilities + */ + +export { + buildTranslationPath, + getFilterObjectLabel, + humanizeKey, + getDomainFromNamespace, +} from "./i18n"; + +export { getOverrideAtPath, hasOverrideAtPath } from "./overrides"; +export { + deepNormalizeValue, + normalizeFieldValue, + isSubtreeModified, +} from "./overrides"; +export { getSizedFieldClassName } from "./fieldSizing"; diff --git a/web/src/components/config-form/theme/utils/overrides.ts b/web/src/components/config-form/theme/utils/overrides.ts new file mode 100644 index 000000000..9e94fbdf2 --- /dev/null +++ b/web/src/components/config-form/theme/utils/overrides.ts @@ -0,0 +1,128 @@ +import get from "lodash/get"; +import isEqual from "lodash/isEqual"; +import { isJsonObject } from "@/lib/utils"; +import type { JsonValue } from "@/types/configForm"; + +export const getOverrideAtPath = ( + overrides: JsonValue | undefined, + path: Array, +) => { + if (overrides === undefined || overrides === null) { + return undefined; + } + + if (isJsonObject(overrides) || Array.isArray(overrides)) { + return get(overrides, path); + } + + return path.length === 0 ? overrides : undefined; +}; + +export const normalizeOverridePath = ( + path: Array, + data: JsonValue | undefined, +) => { + if (data === undefined || data === null) { + return path; + } + + const normalized: Array = []; + let cursor: JsonValue | undefined = data; + + for (const segment of path) { + if (typeof segment === "number") { + if (Array.isArray(cursor)) { + normalized.push(segment); + cursor = cursor[segment] as JsonValue | undefined; + } + continue; + } + + normalized.push(segment); + + if (isJsonObject(cursor) || Array.isArray(cursor)) { + cursor = (cursor as Record)[segment]; + } else { + cursor = undefined; + } + } + + return normalized; +}; + +export const hasOverrideAtPath = ( + overrides: JsonValue | undefined, + path: Array, + contextData?: JsonValue, +) => { + const normalizedPath = contextData + ? normalizeOverridePath(path, contextData) + : path; + const value = getOverrideAtPath(overrides, normalizedPath); + if (value !== undefined) { + return true; + } + const shouldFallback = + normalizedPath.length !== path.length || + normalizedPath.some((segment, index) => segment !== path[index]); + if (!shouldFallback) { + return false; + } + return getOverrideAtPath(overrides, path) !== undefined; +}; + +/** + * Deep normalization for form data comparison. Strips null, undefined, + * and empty-string values from objects and arrays so that RJSF-injected + * schema defaults (e.g., `mask: null`) don't cause false positives + * against a baseline that lacks those keys. + */ +export const deepNormalizeValue = (value: unknown): unknown => { + if (value === null || value === undefined || value === "") return undefined; + if (Array.isArray(value)) return value.map(deepNormalizeValue); + if (typeof value === "object" && value !== null) { + const result: Record = {}; + for (const [k, v] of Object.entries(value as Record)) { + const normalized = deepNormalizeValue(v); + if (normalized !== undefined) { + result[k] = normalized; + } + } + return Object.keys(result).length > 0 ? result : undefined; + } + return value; +}; + +/** + * Shallow normalization for individual field values. + * Treats null and empty-string as equivalent to undefined. + */ +export const normalizeFieldValue = (value: unknown): unknown => + value === null || value === "" ? undefined : value; + +/** + * Check whether a subtree of form data has been modified relative to + * the baseline. Uses deep normalization to ignore RJSF-injected null/empty + * schema defaults. + * + * @param currentData - The current value at the subtree (from props.formData) + * @param baselineData - The baseline value at the subtree (from formContext.baselineFormData) + * @param overrides - Fallback: the overrides object from formContext + * @param path - The full field path for the fallback override check + * @param contextData - The full form data for normalizing the override path + */ +export const isSubtreeModified = ( + currentData: unknown, + baselineData: unknown, + overrides: JsonValue | undefined, + path: Array, + contextData?: JsonValue, +): boolean => { + if (baselineData !== undefined || currentData !== undefined) { + return !isEqual( + deepNormalizeValue(currentData), + deepNormalizeValue(baselineData), + ); + } + return hasOverrideAtPath(overrides, path, contextData); +}; diff --git a/web/src/components/config-form/theme/widgets/ArrayAsTextWidget.tsx b/web/src/components/config-form/theme/widgets/ArrayAsTextWidget.tsx new file mode 100644 index 000000000..7e8ab9c57 --- /dev/null +++ b/web/src/components/config-form/theme/widgets/ArrayAsTextWidget.tsx @@ -0,0 +1,36 @@ +// Widget that displays an array as a concatenated text string +import type { WidgetProps } from "@rjsf/utils"; +import { Input } from "@/components/ui/input"; +import { useCallback } from "react"; + +export function ArrayAsTextWidget(props: WidgetProps) { + const { value, onChange, disabled, readonly, placeholder } = props; + + // Convert array or string to text + let textValue = ""; + if (typeof value === "string" && value.length > 0) { + textValue = value; + } else if (Array.isArray(value) && value.length > 0) { + textValue = value.join(" "); + } + + const handleChange = useCallback( + (event: React.ChangeEvent) => { + const newText = event.target.value; + // Convert space-separated string back to array + const newArray = newText.trim() ? newText.trim().split(/\s+/) : []; + onChange(newArray); + }, + [onChange], + ); + + return ( + + ); +} diff --git a/web/src/components/config-form/theme/widgets/AudioLabelSwitchesWidget.tsx b/web/src/components/config-form/theme/widgets/AudioLabelSwitchesWidget.tsx new file mode 100644 index 000000000..13233c6df --- /dev/null +++ b/web/src/components/config-form/theme/widgets/AudioLabelSwitchesWidget.tsx @@ -0,0 +1,101 @@ +// Audio Label Switches Widget - For selecting audio labels via switches +import type { WidgetProps } from "@rjsf/utils"; +import { useCallback, useMemo } from "react"; +import useSWR from "swr"; +import { SwitchesWidget } from "./SwitchesWidget"; +import type { FormContext } from "./SwitchesWidget"; +import { getTranslatedLabel } from "@/utils/i18n"; +import { JsonObject } from "@/types/configForm"; + +function getEnabledAudioLabels(context: FormContext): string[] { + let cameraLabels: string[] = []; + let globalLabels: string[] = []; + + if (context) { + // context.cameraValue and context.globalValue should be the entire audio section + if ( + context.cameraValue && + typeof context.cameraValue === "object" && + !Array.isArray(context.cameraValue) + ) { + const listenValue = (context.cameraValue as JsonObject).listen; + if (Array.isArray(listenValue)) { + cameraLabels = listenValue.filter( + (item): item is string => typeof item === "string", + ); + } + } + + if ( + context.globalValue && + typeof context.globalValue === "object" && + !Array.isArray(context.globalValue) + ) { + const globalListenValue = (context.globalValue as JsonObject).listen; + if (Array.isArray(globalListenValue)) { + globalLabels = globalListenValue.filter( + (item): item is string => typeof item === "string", + ); + } + } + } + + const sourceLabels = cameraLabels.length > 0 ? cameraLabels : globalLabels; + return [...sourceLabels].sort(); +} + +function getAudioLabelDisplayName(label: string): string { + return getTranslatedLabel(label, "audio"); +} + +export function AudioLabelSwitchesWidget(props: WidgetProps) { + const { data: audioLabels } = useSWR>("/audio_labels"); + + const allLabels = useMemo(() => { + if (!audioLabels) { + return []; + } + + const labelSet = new Set(); + Object.values(audioLabels).forEach((label) => { + if (typeof label !== "string") { + return; + } + const normalized = label.trim(); + if (normalized) { + labelSet.add(normalized); + } + }); + + return [...labelSet].sort(); + }, [audioLabels]); + + const getEntities = useCallback( + (context: FormContext) => { + const enabledLabels = getEnabledAudioLabels(context); + + if (allLabels.length === 0) { + return enabledLabels; + } + + const combinedLabels = new Set([...allLabels, ...enabledLabels]); + return [...combinedLabels].sort(); + }, + [allLabels], + ); + + return ( + + ); +} diff --git a/web/src/components/config-form/theme/widgets/CameraPathWidget.tsx b/web/src/components/config-form/theme/widgets/CameraPathWidget.tsx new file mode 100644 index 000000000..b2490d2ab --- /dev/null +++ b/web/src/components/config-form/theme/widgets/CameraPathWidget.tsx @@ -0,0 +1,202 @@ +import type { WidgetProps } from "@rjsf/utils"; +import useSWR from "swr"; +import { useMemo, useState, type FocusEvent } from "react"; +import { useTranslation } from "react-i18next"; +import { LuEye, LuEyeOff } from "react-icons/lu"; +import { Button } from "@/components/ui/button"; +import { Input } from "@/components/ui/input"; +import type { ConfigFormContext } from "@/types/configForm"; +import { cn } from "@/lib/utils"; +import { getSizedFieldClassName } from "../utils"; + +type RawPathsResponse = { + cameras?: Record< + string, + { + ffmpeg?: { + inputs?: Array<{ + path?: string; + }>; + }; + } + >; +}; + +const MASKED_AUTH_PATTERN = /:\/\/\*:\*@/i; +const MASKED_QUERY_PATTERN = /(?:[?&])user=\*&password=\*/i; + +const getInputIndexFromWidgetId = (id: string): number | undefined => { + const match = id.match(/_inputs_(\d+)_path$/); + if (!match) { + return undefined; + } + + const index = Number(match[1]); + return Number.isNaN(index) ? undefined : index; +}; + +const isMaskedPath = (value: string): boolean => + MASKED_AUTH_PATTERN.test(value) || MASKED_QUERY_PATTERN.test(value); + +const hasCredentials = (value: string): boolean => { + if (!value) { + return false; + } + + if (isMaskedPath(value)) { + return true; + } + + try { + const parsed = new URL(value); + if (parsed.username || parsed.password) { + return true; + } + + return ( + parsed.searchParams.has("user") && parsed.searchParams.has("password") + ); + } catch { + return /:\/\/[^:@/\s]+:[^@/\s]+@/.test(value); + } +}; + +const maskCredentials = (value: string): string => { + if (!value) { + return value; + } + + const maskedAuth = value.replace(/:\/\/[^:@/\s]+:[^@/\s]*@/g, "://*:*@"); + + return maskedAuth + .replace(/([?&]user=)[^&]*/gi, "$1*") + .replace(/([?&]password=)[^&]*/gi, "$1*"); +}; + +export function CameraPathWidget(props: WidgetProps) { + const { + id, + value, + disabled, + readonly, + onChange, + onBlur, + onFocus, + placeholder, + schema, + options, + } = props; + + const { t } = useTranslation(["common", "views/settings"]); + const [showCredentials, setShowCredentials] = useState(false); + + const formContext = props.registry?.formContext as + | ConfigFormContext + | undefined; + const isCameraLevel = formContext?.level === "camera"; + const cameraName = formContext?.cameraName; + const inputIndex = useMemo(() => getInputIndexFromWidgetId(id), [id]); + + const shouldFetchRawPaths = + isCameraLevel && !!cameraName && inputIndex !== undefined; + const { data: rawPaths } = useSWR( + shouldFetchRawPaths ? "config/raw_paths" : null, + ); + + const rawPath = useMemo(() => { + if (!cameraName || inputIndex === undefined) { + return undefined; + } + + const path = + rawPaths?.cameras?.[cameraName]?.ffmpeg?.inputs?.[inputIndex]?.path; + return typeof path === "string" ? path : undefined; + }, [cameraName, inputIndex, rawPaths]); + + const rawValue = typeof value === "string" ? value : ""; + const resolvedValue = + isMaskedPath(rawValue) && rawPath ? rawPath : (rawValue ?? ""); + const canReveal = + hasCredentials(resolvedValue) && !isMaskedPath(resolvedValue); + const canToggle = canReveal || isMaskedPath(rawValue); + + const isMaskedView = canToggle && !showCredentials; + const displayValue = isMaskedView + ? maskCredentials(resolvedValue) + : resolvedValue; + + const isNullable = Array.isArray(schema.type) + ? schema.type.includes("null") + : false; + + const fieldClassName = getSizedFieldClassName(options, "xs"); + const uriLabel = t("cameraWizard.step3.url", { + ns: "views/settings", + defaultValue: schema.title, + }); + const toggleLabel = showCredentials + ? t("label.hide", { ns: "common", item: uriLabel }) + : t("label.show", { ns: "common", item: uriLabel }); + + const handleFocus = (event: FocusEvent) => { + if (isMaskedView && canReveal) { + setShowCredentials(true); + onFocus(id, resolvedValue); + return; + } + + onFocus(id, event.target.value); + }; + + const handleBlur = (event: FocusEvent) => { + if (canToggle) { + setShowCredentials(false); + } + + onBlur(id, event.target.value); + }; + + return ( +
+ + onChange( + e.target.value === "" + ? isNullable + ? null + : undefined + : e.target.value, + ) + } + onBlur={handleBlur} + onFocus={handleFocus} + aria-label={schema.title} + /> + + {canToggle ? ( + + ) : null} +
+ ); +} diff --git a/web/src/components/config-form/theme/widgets/CheckboxWidget.tsx b/web/src/components/config-form/theme/widgets/CheckboxWidget.tsx new file mode 100644 index 000000000..29f5bf3dc --- /dev/null +++ b/web/src/components/config-form/theme/widgets/CheckboxWidget.tsx @@ -0,0 +1,17 @@ +// Checkbox Widget - maps to shadcn/ui Checkbox +import type { WidgetProps } from "@rjsf/utils"; +import { Checkbox } from "@/components/ui/checkbox"; + +export function CheckboxWidget(props: WidgetProps) { + const { id, value, disabled, readonly, onChange, label, schema } = props; + + return ( + onChange(checked)} + aria-label={label || schema.title || "Checkbox"} + /> + ); +} diff --git a/web/src/components/config-form/theme/widgets/ColorWidget.tsx b/web/src/components/config-form/theme/widgets/ColorWidget.tsx new file mode 100644 index 000000000..3e64bf62d --- /dev/null +++ b/web/src/components/config-form/theme/widgets/ColorWidget.tsx @@ -0,0 +1,53 @@ +// Color Widget - For RGB color objects +import type { WidgetProps } from "@rjsf/utils"; +import { Input } from "@/components/ui/input"; +import { Label } from "@/components/ui/label"; +import { useMemo, useCallback } from "react"; + +interface RGBColor { + red: number; + green: number; + blue: number; +} + +export function ColorWidget(props: WidgetProps) { + const { id, value, disabled, readonly, onChange } = props; + + // Convert object to hex for color picker + const hexValue = useMemo(() => { + if (!value || typeof value !== "object") { + return "#ffffff"; + } + const { red = 255, green = 255, blue = 255 } = value as RGBColor; + return `#${red.toString(16).padStart(2, "0")}${green.toString(16).padStart(2, "0")}${blue.toString(16).padStart(2, "0")}`; + }, [value]); + + const handleColorChange = useCallback( + (e: React.ChangeEvent) => { + const hex = e.target.value; + const red = parseInt(hex.slice(1, 3), 16); + const green = parseInt(hex.slice(3, 5), 16); + const blue = parseInt(hex.slice(5, 7), 16); + onChange({ red, green, blue }); + }, + [onChange], + ); + + return ( +
+ +
+ + + +
+
+ ); +} diff --git a/web/src/components/config-form/theme/widgets/FfmpegArgsWidget.tsx b/web/src/components/config-form/theme/widgets/FfmpegArgsWidget.tsx new file mode 100644 index 000000000..415cd2603 --- /dev/null +++ b/web/src/components/config-form/theme/widgets/FfmpegArgsWidget.tsx @@ -0,0 +1,344 @@ +import type { WidgetProps } from "@rjsf/utils"; +import useSWR from "swr"; +import { useCallback, useEffect, useMemo, useState } from "react"; +import { useTranslation } from "react-i18next"; +import { Input } from "@/components/ui/input"; +import { ConfigFormContext } from "@/types/configForm"; +import { + Select, + SelectContent, + SelectItem, + SelectTrigger, + SelectValue, +} from "@/components/ui/select"; +import { RadioGroup, RadioGroupItem } from "@/components/ui/radio-group"; + +type FfmpegPresetResponse = { + hwaccel_args: string[]; + input_args: string[]; + output_args: { + record: string[]; + detect: string[]; + }; +}; + +type FfmpegArgsMode = "preset" | "manual" | "inherit"; + +type PresetField = + | "hwaccel_args" + | "input_args" + | "output_args.record" + | "output_args.detect"; + +const getPresetOptions = ( + data: FfmpegPresetResponse | undefined, + field: PresetField | undefined, +): string[] => { + if (!data || !field) { + return []; + } + + if (field === "hwaccel_args") { + return data.hwaccel_args; + } + + if (field === "input_args") { + return data.input_args; + } + + if (field.startsWith("output_args.")) { + const key = field.split(".")[1] as "record" | "detect"; + return data.output_args?.[key] ?? []; + } + + return []; +}; + +const resolveMode = ( + value: unknown, + presets: string[], + defaultMode: FfmpegArgsMode, + allowInherit: boolean, +): FfmpegArgsMode => { + if (allowInherit && (value === null || value === undefined)) { + return "inherit"; + } + + if (allowInherit && Array.isArray(value) && value.length === 0) { + return "inherit"; + } + + if (Array.isArray(value)) { + return "manual"; + } + + if (typeof value === "string") { + if (presets.length === 0) { + return defaultMode; + } + + return presets.includes(value) ? "preset" : "manual"; + } + + return defaultMode; +}; + +const normalizeManualText = (value: unknown): string => { + if (Array.isArray(value)) { + return value.join(" "); + } + + if (typeof value === "string") { + return value; + } + + return ""; +}; + +export function FfmpegArgsWidget(props: WidgetProps) { + const formContext = props.registry?.formContext as + | ConfigFormContext + | undefined; + const i18nNamespace = formContext?.i18nNamespace as string | undefined; + const isCameraLevel = formContext?.level === "camera"; + const effectiveNamespace = isCameraLevel ? "config/cameras" : i18nNamespace; + const { t, i18n } = useTranslation([ + effectiveNamespace || i18nNamespace || "common", + i18nNamespace || "common", + "views/settings", + ]); + const { + value, + onChange, + disabled, + readonly, + options, + placeholder, + schema, + id, + } = props; + const presetField = options?.ffmpegPresetField as PresetField | undefined; + const allowInherit = options?.allowInherit === true; + const hideDescription = options?.hideDescription === true; + const useSplitLayout = options?.splitLayout !== false; + + const { data } = useSWR("ffmpeg/presets"); + + const presetOptions = useMemo( + () => getPresetOptions(data, presetField), + [data, presetField], + ); + + const canUsePresets = presetOptions.length > 0; + const defaultMode: FfmpegArgsMode = canUsePresets ? "preset" : "manual"; + + const detectedMode = useMemo( + () => resolveMode(value, presetOptions, defaultMode, allowInherit), + [value, presetOptions, defaultMode, allowInherit], + ); + + const [mode, setMode] = useState(detectedMode); + + useEffect(() => { + if (!canUsePresets && detectedMode === "preset") { + setMode("manual"); + return; + } + + setMode(detectedMode); + }, [canUsePresets, detectedMode]); + + const handleModeChange = useCallback( + (nextMode: FfmpegArgsMode) => { + setMode(nextMode); + + if (nextMode === "inherit") { + onChange(undefined); + return; + } + + if (nextMode === "preset") { + const currentValue = typeof value === "string" ? value : undefined; + const presetValue = + currentValue && presetOptions.includes(currentValue) + ? currentValue + : presetOptions[0]; + if (presetValue) { + onChange(presetValue); + } + return; + } + + if (mode === "preset") { + onChange(""); + return; + } + + const manualText = normalizeManualText(value); + onChange(manualText); + }, + [mode, onChange, presetOptions, value], + ); + + const handlePresetChange = useCallback( + (preset: string) => { + onChange(preset); + }, + [onChange], + ); + + const handleManualChange = useCallback( + (event: React.ChangeEvent) => { + const newText = event.target.value; + onChange(newText); + }, + [onChange], + ); + + const manualValue = normalizeManualText(value); + const presetValue = + typeof value === "string" && presetOptions.includes(value) ? value : ""; + const fallbackDescriptionKey = useMemo(() => { + if (!presetField) { + return undefined; + } + + const isInputScoped = id.includes("_inputs_"); + const prefix = isInputScoped ? "ffmpeg.inputs" : "ffmpeg"; + + if (presetField === "hwaccel_args") { + return `${prefix}.hwaccel_args.description`; + } + + if (presetField === "input_args") { + return `${prefix}.input_args.description`; + } + + if (presetField === "output_args.record") { + return isInputScoped + ? "ffmpeg.inputs.output_args.record.description" + : "ffmpeg.output_args.record.description"; + } + + if (presetField === "output_args.detect") { + return isInputScoped + ? "ffmpeg.inputs.output_args.detect.description" + : "ffmpeg.output_args.detect.description"; + } + + return undefined; + }, [id, presetField]); + + const translatedDescription = + fallbackDescriptionKey && + effectiveNamespace && + i18n.exists(fallbackDescriptionKey, { ns: effectiveNamespace }) + ? t(fallbackDescriptionKey, { ns: effectiveNamespace }) + : ""; + const fieldDescription = + typeof schema.description === "string" && schema.description.length > 0 + ? schema.description + : translatedDescription; + + return ( +
+ handleModeChange(next as FfmpegArgsMode)} + className="gap-3" + > + {allowInherit ? ( +
+ + +
+ ) : null} +
+ + +
+
+ + +
+
+ + {mode === "inherit" ? null : mode === "preset" && canUsePresets ? ( + + ) : ( + + )} + + {!hideDescription && !useSplitLayout && fieldDescription ? ( +

{fieldDescription}

+ ) : null} +
+ ); +} diff --git a/web/src/components/config-form/theme/widgets/InputRolesWidget.tsx b/web/src/components/config-form/theme/widgets/InputRolesWidget.tsx new file mode 100644 index 000000000..c50cf7652 --- /dev/null +++ b/web/src/components/config-form/theme/widgets/InputRolesWidget.tsx @@ -0,0 +1,67 @@ +import type { WidgetProps } from "@rjsf/utils"; +import { useMemo } from "react"; +import { useTranslation } from "react-i18next"; +import { Switch } from "@/components/ui/switch"; + +const INPUT_ROLES = ["detect", "record", "audio"] as const; + +function normalizeValue(value: unknown): string[] { + if (Array.isArray(value)) { + return value.filter((item): item is string => typeof item === "string"); + } + + if (typeof value === "string" && value.trim()) { + return [value.trim()]; + } + + return []; +} + +export function InputRolesWidget(props: WidgetProps) { + const { id, value, disabled, readonly, onChange } = props; + const { t } = useTranslation(["views/settings"]); + + const selectedRoles = useMemo(() => normalizeValue(value), [value]); + + const toggleRole = (role: string, enabled: boolean) => { + if (enabled) { + if (!selectedRoles.includes(role)) { + onChange([...selectedRoles, role]); + } + return; + } + + onChange(selectedRoles.filter((item) => item !== role)); + }; + + return ( +
+
+ {INPUT_ROLES.map((role) => { + const checked = selectedRoles.includes(role); + const label = t(`configForm.inputRoles.options.${role}`, { + ns: "views/settings", + defaultValue: role, + }); + + return ( +
+ + toggleRole(role, !!enabled)} + /> +
+ ); + })} +
+
+ ); +} diff --git a/web/src/components/config-form/theme/widgets/NumberWidget.tsx b/web/src/components/config-form/theme/widgets/NumberWidget.tsx new file mode 100644 index 000000000..0000c5067 --- /dev/null +++ b/web/src/components/config-form/theme/widgets/NumberWidget.tsx @@ -0,0 +1,44 @@ +// Number Widget - Input with number type +import type { WidgetProps } from "@rjsf/utils"; +import { Input } from "@/components/ui/input"; + +export function NumberWidget(props: WidgetProps) { + const { + id, + value, + disabled, + readonly, + onChange, + onBlur, + onFocus, + schema, + options, + } = props; + + const handleChange = (e: React.ChangeEvent) => { + const val = e.target.value; + if (val === "") { + onChange(undefined); + } else { + const num = + schema.type === "integer" ? parseInt(val, 10) : parseFloat(val); + onChange(isNaN(num) ? undefined : num); + } + }; + + return ( + onBlur(id, e.target.value)} + onFocus={(e) => onFocus(id, e.target.value)} + aria-label={schema.title} + /> + ); +} diff --git a/web/src/components/config-form/theme/widgets/ObjectLabelSwitchesWidget.tsx b/web/src/components/config-form/theme/widgets/ObjectLabelSwitchesWidget.tsx new file mode 100644 index 000000000..b7b2df571 --- /dev/null +++ b/web/src/components/config-form/theme/widgets/ObjectLabelSwitchesWidget.tsx @@ -0,0 +1,101 @@ +// Object Label Switches Widget - For selecting objects via switches +import { WidgetProps } from "@rjsf/utils"; +import { SwitchesWidget } from "./SwitchesWidget"; +import { FormContext } from "./SwitchesWidget"; +import { getTranslatedLabel } from "@/utils/i18n"; +import { FrigateConfig } from "@/types/frigateConfig"; +import { JsonObject } from "@/types/configForm"; + +// Collect labelmap values (human-readable labels) from a labelmap object. +function collectLabelmapLabels(labelmap: unknown, labels: Set) { + if (!labelmap || typeof labelmap !== "object") { + return; + } + + Object.values(labelmap as JsonObject).forEach((value) => { + if (typeof value === "string" && value.trim().length > 0) { + labels.add(value); + } + }); +} + +// Read labelmap labels from the global model and detector models. +function getLabelmapLabels(context: FormContext): string[] { + const labels = new Set(); + const fullConfig = context.fullConfig as FrigateConfig | undefined; + + if (fullConfig?.model) { + collectLabelmapLabels(fullConfig.model.labelmap, labels); + } + + if (fullConfig?.detectors) { + // detectors is a map of detector configs; each may include a model labelmap. + Object.values(fullConfig.detectors).forEach((detector) => { + if (detector?.model?.labelmap) { + collectLabelmapLabels(detector.model.labelmap, labels); + } + }); + } + + return [...labels]; +} + +// Build the list of labels for switches (labelmap + configured track list). +function getObjectLabels(context: FormContext): string[] { + const labelmapLabels = getLabelmapLabels(context); + let cameraLabels: string[] = []; + let globalLabels: string[] = []; + + if (context) { + // context.cameraValue and context.globalValue should be the entire objects section + if ( + context.cameraValue && + typeof context.cameraValue === "object" && + !Array.isArray(context.cameraValue) + ) { + const trackValue = (context.cameraValue as JsonObject).track; + if (Array.isArray(trackValue)) { + cameraLabels = trackValue.filter( + (item): item is string => typeof item === "string", + ); + } + } + + if ( + context.globalValue && + typeof context.globalValue === "object" && + !Array.isArray(context.globalValue) + ) { + const globalTrackValue = (context.globalValue as JsonObject).track; + if (Array.isArray(globalTrackValue)) { + globalLabels = globalTrackValue.filter( + (item): item is string => typeof item === "string", + ); + } + } + } + + const sourceLabels = cameraLabels.length > 0 ? cameraLabels : globalLabels; + const combinedLabels = new Set([...labelmapLabels, ...sourceLabels]); + return [...combinedLabels].sort(); +} + +function getObjectLabelDisplayName(label: string): string { + return getTranslatedLabel(label, "object"); +} + +export function ObjectLabelSwitchesWidget(props: WidgetProps) { + return ( + + ); +} diff --git a/web/src/components/config-form/theme/widgets/PasswordWidget.tsx b/web/src/components/config-form/theme/widgets/PasswordWidget.tsx new file mode 100644 index 000000000..80a4e504e --- /dev/null +++ b/web/src/components/config-form/theme/widgets/PasswordWidget.tsx @@ -0,0 +1,59 @@ +// Password Widget - Input with password type +import type { WidgetProps } from "@rjsf/utils"; +import { Input } from "@/components/ui/input"; +import { Button } from "@/components/ui/button"; +import { useState } from "react"; +import { LuEye, LuEyeOff } from "react-icons/lu"; +import { cn } from "@/lib/utils"; +import { getSizedFieldClassName } from "../utils"; + +export function PasswordWidget(props: WidgetProps) { + const { + id, + value, + disabled, + readonly, + onChange, + onBlur, + onFocus, + placeholder, + schema, + options, + } = props; + + const [showPassword, setShowPassword] = useState(false); + const fieldClassName = getSizedFieldClassName(options, "sm"); + + return ( +
+ + onChange(e.target.value === "" ? undefined : e.target.value) + } + onBlur={(e) => onBlur(id, e.target.value)} + onFocus={(e) => onFocus(id, e.target.value)} + aria-label={schema.title} + className="w-full pr-10" + /> + +
+ ); +} diff --git a/web/src/components/config-form/theme/widgets/RangeWidget.tsx b/web/src/components/config-form/theme/widgets/RangeWidget.tsx new file mode 100644 index 000000000..bd05bfe94 --- /dev/null +++ b/web/src/components/config-form/theme/widgets/RangeWidget.tsx @@ -0,0 +1,31 @@ +// Range Widget - maps to shadcn/ui Slider +import type { WidgetProps } from "@rjsf/utils"; +import { Slider } from "@/components/ui/slider"; +import { cn } from "@/lib/utils"; + +export function RangeWidget(props: WidgetProps) { + const { id, value, disabled, readonly, onChange, schema, options } = props; + + const min = schema.minimum ?? 0; + const max = schema.maximum ?? 100; + const step = + (options.step as number) || (schema.type === "integer" ? 1 : 0.1); + + return ( +
+ onChange(vals[0])} + className={cn("flex-1", disabled && "opacity-50")} + /> + + {value ?? min} + +
+ ); +} diff --git a/web/src/components/config-form/theme/widgets/SelectWidget.tsx b/web/src/components/config-form/theme/widgets/SelectWidget.tsx new file mode 100644 index 000000000..d5047e959 --- /dev/null +++ b/web/src/components/config-form/theme/widgets/SelectWidget.tsx @@ -0,0 +1,51 @@ +// Select Widget - maps to shadcn/ui Select +import type { WidgetProps } from "@rjsf/utils"; +import { + Select, + SelectContent, + SelectItem, + SelectTrigger, + SelectValue, +} from "@/components/ui/select"; +import { getSizedFieldClassName } from "../utils"; + +export function SelectWidget(props: WidgetProps) { + const { + id, + options, + value, + disabled, + readonly, + onChange, + placeholder, + schema, + } = props; + + const { enumOptions = [] } = options; + const fieldClassName = getSizedFieldClassName(options, "sm"); + + return ( + + ); +} diff --git a/web/src/components/config-form/theme/widgets/SwitchWidget.tsx b/web/src/components/config-form/theme/widgets/SwitchWidget.tsx new file mode 100644 index 000000000..8e323e5a1 --- /dev/null +++ b/web/src/components/config-form/theme/widgets/SwitchWidget.tsx @@ -0,0 +1,17 @@ +// Switch Widget - maps to shadcn/ui Switch +import type { WidgetProps } from "@rjsf/utils"; +import { Switch } from "@/components/ui/switch"; + +export function SwitchWidget(props: WidgetProps) { + const { id, value, disabled, readonly, onChange, label, schema } = props; + + return ( + onChange(checked)} + aria-label={label || schema.title || "Toggle"} + /> + ); +} diff --git a/web/src/components/config-form/theme/widgets/SwitchesWidget.tsx b/web/src/components/config-form/theme/widgets/SwitchesWidget.tsx new file mode 100644 index 000000000..0a56c47b8 --- /dev/null +++ b/web/src/components/config-form/theme/widgets/SwitchesWidget.tsx @@ -0,0 +1,231 @@ +// Generic Switches Widget - Reusable component for selecting from any list of entities +import { WidgetProps } from "@rjsf/utils"; +import { useMemo, useState } from "react"; +import { Switch } from "@/components/ui/switch"; +import { Button } from "@/components/ui/button"; +import { Input } from "@/components/ui/input"; +import { + Collapsible, + CollapsibleContent, + CollapsibleTrigger, +} from "@/components/ui/collapsible"; +import { LuChevronDown, LuChevronRight } from "react-icons/lu"; +import { CameraConfig, FrigateConfig } from "@/types/frigateConfig"; +import { ConfigFormContext } from "@/types/configForm"; +import { cn } from "@/lib/utils"; + +type FormContext = Pick< + ConfigFormContext, + | "cameraValue" + | "globalValue" + | "fullCameraConfig" + | "fullConfig" + | "t" + | "level" +> & { + fullCameraConfig?: CameraConfig; + fullConfig?: FrigateConfig; +}; + +export type { FormContext }; + +export type SwitchesWidgetOptions = { + /** Function to extract available entities from context */ + getEntities: (context: FormContext) => string[]; + /** Function to get display label for an entity (e.g., translate, get friendly name) */ + getDisplayLabel?: (entity: string, context?: FormContext) => string; + /** i18n key prefix (e.g., "objectLabels", "zoneNames") */ + i18nKey: string; + /** Translation namespace (default: "views/settings") */ + namespace?: string; + /** Optional class name for the list container */ + listClassName?: string; + /** Enable search input to filter the list */ + enableSearch?: boolean; +}; + +function normalizeValue(value: unknown): string[] { + if (Array.isArray(value)) { + return value.filter((item): item is string => typeof item === "string"); + } + + if (typeof value === "string" && value.trim().length > 0) { + return [value.trim()]; + } + + return []; +} + +/** + * Generic switches widget for selecting from any list of entities (objects, zones, etc.) + * + * @example + * // In uiSchema: + * "track": { + * "ui:widget": "switches", + * "ui:options": { + * "getEntities": (context) => [...], + * "i18nKey": "objectLabels" + * } + * } + */ +export function SwitchesWidget(props: WidgetProps) { + const { value, disabled, readonly, onChange, formContext, id, registry } = + props; + + // Get configuration from widget options + const i18nKey = useMemo( + () => (props.options?.i18nKey as string | undefined) || "entities", + [props.options], + ); + const namespace = useMemo( + () => (props.options?.namespace as string | undefined) || "views/settings", + [props.options], + ); + + // Try to get formContext from direct prop, options, or registry + const context = useMemo( + () => + (formContext as FormContext | undefined) || + (props.options?.formContext as FormContext | undefined) || + (registry?.formContext as FormContext | undefined), + [formContext, props.options, registry], + ); + + const availableEntities = useMemo(() => { + const getEntities = + (props.options?.getEntities as + | ((context: FormContext) => string[]) + | undefined) || (() => []); + if (context) { + return getEntities(context); + } + return []; + }, [context, props.options]); + + const getDisplayLabel = useMemo( + () => + (props.options?.getDisplayLabel as + | ((entity: string, context?: FormContext) => string) + | undefined) || ((entity: string) => entity), + [props.options], + ); + + const listClassName = useMemo( + () => props.options?.listClassName as string | undefined, + [props.options], + ); + + const enableSearch = useMemo( + () => props.options?.enableSearch as boolean | undefined, + [props.options], + ); + + const selectedEntities = useMemo(() => normalizeValue(value), [value]); + const [isOpen, setIsOpen] = useState(selectedEntities.length > 0); + const [searchTerm, setSearchTerm] = useState(""); + + const filteredEntities = useMemo(() => { + if (!enableSearch || !searchTerm.trim()) { + return availableEntities; + } + const term = searchTerm.toLowerCase(); + return availableEntities.filter((entity) => { + const displayLabel = getDisplayLabel(entity, context); + return displayLabel.toLowerCase().includes(term); + }); + }, [availableEntities, searchTerm, enableSearch, getDisplayLabel, context]); + + const toggleEntity = (entity: string, enabled: boolean) => { + if (enabled) { + onChange([...selectedEntities, entity]); + } else { + onChange(selectedEntities.filter((item) => item !== entity)); + } + }; + + const t = context?.t; + const summary = t + ? t(`configForm.${i18nKey}.summary`, { + ns: namespace, + defaultValue: "{{count}} selected", + count: selectedEntities.length, + }) + : `${selectedEntities.length} selected`; + + const emptyMessage = t + ? t(`configForm.${i18nKey}.empty`, { + ns: namespace, + defaultValue: "No items available", + }) + : "No items available"; + + return ( + +
+ + + + + + {availableEntities.length === 0 ? ( +
{emptyMessage}
+ ) : ( + <> + {enableSearch && ( +
+ setSearchTerm(e.target.value)} + className="mb-2" + /> +
+ )} +
+ {filteredEntities.map((entity) => { + const checked = selectedEntities.includes(entity); + const displayLabel = getDisplayLabel(entity, context); + return ( +
+ + + toggleEntity(entity, !!value) + } + /> +
+ ); + })} +
+ + )} +
+
+
+ ); +} diff --git a/web/src/components/config-form/theme/widgets/TagsWidget.tsx b/web/src/components/config-form/theme/widgets/TagsWidget.tsx new file mode 100644 index 000000000..718805b47 --- /dev/null +++ b/web/src/components/config-form/theme/widgets/TagsWidget.tsx @@ -0,0 +1,74 @@ +// Tags Widget - For array of strings input +import type { WidgetProps } from "@rjsf/utils"; +import { Input } from "@/components/ui/input"; +import { Badge } from "@/components/ui/badge"; +import { Button } from "@/components/ui/button"; +import { useState, useCallback, useMemo } from "react"; +import { LuX } from "react-icons/lu"; + +export function TagsWidget(props: WidgetProps) { + const { id, value = [], disabled, readonly, onChange, schema } = props; + + const [inputValue, setInputValue] = useState(""); + + const tags = useMemo(() => (Array.isArray(value) ? value : []), [value]); + + const addTag = useCallback(() => { + const trimmed = inputValue.trim(); + if (trimmed && !tags.includes(trimmed)) { + onChange([...tags, trimmed]); + setInputValue(""); + } + }, [inputValue, tags, onChange]); + + const removeTag = useCallback( + (tagToRemove: string) => { + onChange(tags.filter((tag: string) => tag !== tagToRemove)); + }, + [tags, onChange], + ); + + const handleKeyDown = (e: React.KeyboardEvent) => { + if (e.key === "Enter") { + e.preventDefault(); + addTag(); + } else if (e.key === "Backspace" && inputValue === "" && tags.length > 0) { + removeTag(tags[tags.length - 1]); + } + }; + + return ( +
+
+ {tags.map((tag: string, index: number) => ( + + {tag} + {!disabled && !readonly && ( + + )} + + ))} +
+ {!readonly && ( + setInputValue(e.target.value)} + onKeyDown={handleKeyDown} + onBlur={addTag} + /> + )} +
+ ); +} diff --git a/web/src/components/config-form/theme/widgets/TextWidget.tsx b/web/src/components/config-form/theme/widgets/TextWidget.tsx new file mode 100644 index 000000000..16919f14b --- /dev/null +++ b/web/src/components/config-form/theme/widgets/TextWidget.tsx @@ -0,0 +1,48 @@ +// Text Widget - maps to shadcn/ui Input +import type { WidgetProps } from "@rjsf/utils"; +import { Input } from "@/components/ui/input"; +import { cn } from "@/lib/utils"; +import { getSizedFieldClassName } from "../utils"; + +export function TextWidget(props: WidgetProps) { + const { + id, + value, + disabled, + readonly, + onChange, + onBlur, + onFocus, + placeholder, + schema, + options, + } = props; + + const isNullable = Array.isArray(schema.type) + ? schema.type.includes("null") + : false; + const fieldClassName = getSizedFieldClassName(options, "xs"); + + return ( + + onChange( + e.target.value === "" + ? isNullable + ? null + : undefined + : e.target.value, + ) + } + onBlur={(e) => onBlur(id, e.target.value)} + onFocus={(e) => onFocus(id, e.target.value)} + aria-label={schema.title} + /> + ); +} diff --git a/web/src/components/config-form/theme/widgets/TextareaWidget.tsx b/web/src/components/config-form/theme/widgets/TextareaWidget.tsx new file mode 100644 index 000000000..4e260a816 --- /dev/null +++ b/web/src/components/config-form/theme/widgets/TextareaWidget.tsx @@ -0,0 +1,48 @@ +// Textarea Widget - maps to shadcn/ui Textarea +import type { WidgetProps } from "@rjsf/utils"; +import { Textarea } from "@/components/ui/textarea"; +import { cn } from "@/lib/utils"; +import { getSizedFieldClassName } from "../utils"; + +export function TextareaWidget(props: WidgetProps) { + const { + id, + value, + disabled, + readonly, + onChange, + onBlur, + onFocus, + placeholder, + schema, + options, + } = props; + + const isNullable = Array.isArray(schema.type) + ? schema.type.includes("null") + : false; + const fieldClassName = getSizedFieldClassName(options, "md"); + + return ( +