diff --git a/.cspell/frigate-dictionary.txt b/.cspell/frigate-dictionary.txt index f2bcf417a..f5292b167 100644 --- a/.cspell/frigate-dictionary.txt +++ b/.cspell/frigate-dictionary.txt @@ -229,6 +229,7 @@ Reolink restream restreamed restreaming +RJSF rkmpp rknn rkrga diff --git a/.github/copilot-instructions.md b/.github/copilot-instructions.md index f053abe3f..0af9c249f 100644 --- a/.github/copilot-instructions.md +++ b/.github/copilot-instructions.md @@ -324,6 +324,12 @@ try: value = await sensor.read() except Exception: # ❌ Too broad logger.error("Failed") + +# Returning exceptions in JSON responses +except ValueError as e: + return JSONResponse( + content={"success": False, "message": str(e)}, + ) ``` ### ✅ Use These Instead @@ -353,6 +359,16 @@ try: value = await sensor.read() except SensorException as err: # ✅ Specific logger.exception("Failed to read sensor") + +# Safe error responses +except ValueError: + logger.exception("Invalid parameters for API request") + return JSONResponse( + content={ + "success": False, + "message": "Invalid request parameters", + }, + ) ``` ## Project-Specific Conventions diff --git a/Makefile b/Makefile index d1427b6df..3800399ea 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ default_target: local COMMIT_HASH := $(shell git log -1 --pretty=format:"%h"|tail -1) -VERSION = 0.17.0 +VERSION = 0.18.0 IMAGE_REPO ?= ghcr.io/blakeblackshear/frigate GITHUB_REF_NAME ?= $(shell git rev-parse --abbrev-ref HEAD) BOARDS= #Initialized empty @@ -49,7 +49,8 @@ push: push-boards --push run: local - docker run --rm --publish=5000:5000 --volume=${PWD}/config:/config frigate:latest + docker run --rm --publish=5000:5000 --publish=8971:8971 \ + --volume=${PWD}/config:/config frigate:latest run_tests: local docker run --rm --workdir=/opt/frigate --entrypoint= frigate:latest \ diff --git a/docker/main/Dockerfile b/docker/main/Dockerfile index 055a1458f..b14320033 100644 --- a/docker/main/Dockerfile +++ b/docker/main/Dockerfile @@ -55,7 +55,7 @@ RUN --mount=type=tmpfs,target=/tmp --mount=type=tmpfs,target=/var/cache/apt \ FROM scratch AS go2rtc ARG TARGETARCH WORKDIR /rootfs/usr/local/go2rtc/bin -ADD --link --chmod=755 "https://github.com/AlexxIT/go2rtc/releases/download/v1.9.10/go2rtc_linux_${TARGETARCH}" go2rtc +ADD --link --chmod=755 "https://github.com/AlexxIT/go2rtc/releases/download/v1.9.13/go2rtc_linux_${TARGETARCH}" go2rtc FROM wget AS tempio ARG TARGETARCH diff --git a/docker/main/rootfs/etc/s6-overlay/s6-rc.d/certsync/run b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/certsync/run index 4ce1c133f..b834c09bb 100755 --- a/docker/main/rootfs/etc/s6-overlay/s6-rc.d/certsync/run +++ b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/certsync/run @@ -10,7 +10,8 @@ echo "[INFO] Starting certsync..." lefile="/etc/letsencrypt/live/frigate/fullchain.pem" -tls_enabled=`python3 /usr/local/nginx/get_listen_settings.py | jq -r .tls.enabled` +tls_enabled=`python3 /usr/local/nginx/get_nginx_settings.py | jq -r .tls.enabled` +listen_external_port=`python3 /usr/local/nginx/get_nginx_settings.py | jq -r .listen.external_port` while true do @@ -34,7 +35,7 @@ do ;; esac - liveprint=`echo | openssl s_client -showcerts -connect 127.0.0.1:8971 2>&1 | openssl x509 -fingerprint 2>&1 | grep -i fingerprint || echo 'failed'` + liveprint=`echo | openssl s_client -showcerts -connect 127.0.0.1:$listen_external_port 2>&1 | openssl x509 -fingerprint 2>&1 | grep -i fingerprint || echo 'failed'` case "$liveprint" in *Fingerprint*) @@ -55,4 +56,4 @@ do done -exit 0 \ No newline at end of file +exit 0 diff --git a/docker/main/rootfs/etc/s6-overlay/s6-rc.d/nginx/run b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/nginx/run index 8bd9b5250..a3c7b3248 100755 --- a/docker/main/rootfs/etc/s6-overlay/s6-rc.d/nginx/run +++ b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/nginx/run @@ -80,14 +80,14 @@ if [ ! \( -f "$letsencrypt_path/privkey.pem" -a -f "$letsencrypt_path/fullchain. fi # build templates for optional FRIGATE_BASE_PATH environment variable -python3 /usr/local/nginx/get_base_path.py | \ +python3 /usr/local/nginx/get_nginx_settings.py | \ tempio -template /usr/local/nginx/templates/base_path.gotmpl \ - -out /usr/local/nginx/conf/base_path.conf + -out /usr/local/nginx/conf/base_path.conf -# build templates for optional TLS support -python3 /usr/local/nginx/get_listen_settings.py | \ - tempio -template /usr/local/nginx/templates/listen.gotmpl \ - -out /usr/local/nginx/conf/listen.conf +# build templates for additional network settings +python3 /usr/local/nginx/get_nginx_settings.py | \ + tempio -template /usr/local/nginx/templates/listen.gotmpl \ + -out /usr/local/nginx/conf/listen.conf # Replace the bash process with the NGINX process, redirecting stderr to stdout exec 2>&1 diff --git a/docker/main/rootfs/usr/local/nginx/get_base_path.py b/docker/main/rootfs/usr/local/nginx/get_base_path.py deleted file mode 100644 index 2e78a7de9..000000000 --- a/docker/main/rootfs/usr/local/nginx/get_base_path.py +++ /dev/null @@ -1,11 +0,0 @@ -"""Prints the base path as json to stdout.""" - -import json -import os -from typing import Any - -base_path = os.environ.get("FRIGATE_BASE_PATH", "") - -result: dict[str, Any] = {"base_path": base_path} - -print(json.dumps(result)) diff --git a/docker/main/rootfs/usr/local/nginx/get_listen_settings.py b/docker/main/rootfs/usr/local/nginx/get_listen_settings.py deleted file mode 100644 index d879db56e..000000000 --- a/docker/main/rootfs/usr/local/nginx/get_listen_settings.py +++ /dev/null @@ -1,35 +0,0 @@ -"""Prints the tls config as json to stdout.""" - -import json -import sys -from typing import Any - -from ruamel.yaml import YAML - -sys.path.insert(0, "/opt/frigate") -from frigate.util.config import find_config_file - -sys.path.remove("/opt/frigate") - -yaml = YAML() - -config_file = find_config_file() - -try: - with open(config_file) as f: - raw_config = f.read() - - if config_file.endswith((".yaml", ".yml")): - config: dict[str, Any] = yaml.load(raw_config) - elif config_file.endswith(".json"): - config: dict[str, Any] = json.loads(raw_config) -except FileNotFoundError: - config: dict[str, Any] = {} - -tls_config: dict[str, any] = config.get("tls", {"enabled": True}) -networking_config = config.get("networking", {}) -ipv6_config = networking_config.get("ipv6", {"enabled": False}) - -output = {"tls": tls_config, "ipv6": ipv6_config} - -print(json.dumps(output)) diff --git a/docker/main/rootfs/usr/local/nginx/get_nginx_settings.py b/docker/main/rootfs/usr/local/nginx/get_nginx_settings.py new file mode 100644 index 000000000..79cda3686 --- /dev/null +++ b/docker/main/rootfs/usr/local/nginx/get_nginx_settings.py @@ -0,0 +1,62 @@ +"""Prints the nginx settings as json to stdout.""" + +import json +import os +import sys +from typing import Any + +from ruamel.yaml import YAML + +sys.path.insert(0, "/opt/frigate") +from frigate.util.config import find_config_file + +sys.path.remove("/opt/frigate") + +yaml = YAML() + +config_file = find_config_file() + +try: + with open(config_file) as f: + raw_config = f.read() + + if config_file.endswith((".yaml", ".yml")): + config: dict[str, Any] = yaml.load(raw_config) + elif config_file.endswith(".json"): + config: dict[str, Any] = json.loads(raw_config) +except FileNotFoundError: + config: dict[str, Any] = {} + +tls_config: dict[str, Any] = config.get("tls", {}) +tls_config.setdefault("enabled", True) + +networking_config: dict[str, Any] = config.get("networking", {}) +ipv6_config: dict[str, Any] = networking_config.get("ipv6", {}) +ipv6_config.setdefault("enabled", False) + +listen_config: dict[str, Any] = networking_config.get("listen", {}) +listen_config.setdefault("internal", 5000) +listen_config.setdefault("external", 8971) + +# handle case where internal port is a string with ip:port +internal_port = listen_config["internal"] +if type(internal_port) is str: + internal_port = int(internal_port.split(":")[-1]) +listen_config["internal_port"] = internal_port + +# handle case where external port is a string with ip:port +external_port = listen_config["external"] +if type(external_port) is str: + external_port = int(external_port.split(":")[-1]) +listen_config["external_port"] = external_port + +base_path = os.environ.get("FRIGATE_BASE_PATH", "") + +result: dict[str, Any] = { + "tls": tls_config, + "ipv6": ipv6_config, + "listen": listen_config, + "base_path": base_path, +} + +print(json.dumps(result)) diff --git a/docker/main/rootfs/usr/local/nginx/templates/base_path.gotmpl b/docker/main/rootfs/usr/local/nginx/templates/base_path.gotmpl index ace4443ee..ca945ba1f 100644 --- a/docker/main/rootfs/usr/local/nginx/templates/base_path.gotmpl +++ b/docker/main/rootfs/usr/local/nginx/templates/base_path.gotmpl @@ -7,7 +7,7 @@ location ^~ {{ .base_path }}/ { # remove base_url from the path before passing upstream rewrite ^{{ .base_path }}/(.*) /$1 break; - proxy_pass $scheme://127.0.0.1:8971; + proxy_pass $scheme://127.0.0.1:{{ .listen.external_port }}; proxy_http_version 1.1; proxy_set_header Upgrade $http_upgrade; proxy_set_header Connection "upgrade"; diff --git a/docker/main/rootfs/usr/local/nginx/templates/listen.gotmpl b/docker/main/rootfs/usr/local/nginx/templates/listen.gotmpl index 066f872cb..628784b60 100644 --- a/docker/main/rootfs/usr/local/nginx/templates/listen.gotmpl +++ b/docker/main/rootfs/usr/local/nginx/templates/listen.gotmpl @@ -1,45 +1,36 @@ - # Internal (IPv4 always; IPv6 optional) -listen 5000; -{{ if .ipv6 }}{{ if .ipv6.enabled }}listen [::]:5000;{{ end }}{{ end }} - +listen {{ .listen.internal }}; +{{ if .ipv6.enabled }}listen [::]:{{ .listen.internal_port }};{{ end }} # intended for external traffic, protected by auth -{{ if .tls }} - {{ if .tls.enabled }} - # external HTTPS (IPv4 always; IPv6 optional) - listen 8971 ssl; - {{ if .ipv6 }}{{ if .ipv6.enabled }}listen [::]:8971 ssl;{{ end }}{{ end }} +{{ if .tls.enabled }} + # external HTTPS (IPv4 always; IPv6 optional) + listen {{ .listen.external }} ssl; + {{ if .ipv6.enabled }}listen [::]:{{ .listen.external_port }} ssl;{{ end }} - ssl_certificate /etc/letsencrypt/live/frigate/fullchain.pem; - ssl_certificate_key /etc/letsencrypt/live/frigate/privkey.pem; + ssl_certificate /etc/letsencrypt/live/frigate/fullchain.pem; + ssl_certificate_key /etc/letsencrypt/live/frigate/privkey.pem; - # generated 2024-06-01, Mozilla Guideline v5.7, nginx 1.25.3, OpenSSL 1.1.1w, modern configuration, no OCSP - # https://ssl-config.mozilla.org/#server=nginx&version=1.25.3&config=modern&openssl=1.1.1w&ocsp=false&guideline=5.7 - ssl_session_timeout 1d; - ssl_session_cache shared:MozSSL:10m; # about 40000 sessions - ssl_session_tickets off; + # generated 2024-06-01, Mozilla Guideline v5.7, nginx 1.25.3, OpenSSL 1.1.1w, modern configuration, no OCSP + # https://ssl-config.mozilla.org/#server=nginx&version=1.25.3&config=modern&openssl=1.1.1w&ocsp=false&guideline=5.7 + ssl_session_timeout 1d; + ssl_session_cache shared:MozSSL:10m; # about 40000 sessions + ssl_session_tickets off; - # modern configuration - ssl_protocols TLSv1.3; - ssl_prefer_server_ciphers off; + # modern configuration + ssl_protocols TLSv1.3; + ssl_prefer_server_ciphers off; - # HSTS (ngx_http_headers_module is required) (63072000 seconds) - add_header Strict-Transport-Security "max-age=63072000" always; + # HSTS (ngx_http_headers_module is required) (63072000 seconds) + add_header Strict-Transport-Security "max-age=63072000" always; - # ACME challenge location - location /.well-known/acme-challenge/ { - default_type "text/plain"; - root /etc/letsencrypt/www; - } - {{ else }} - # external HTTP (IPv4 always; IPv6 optional) - listen 8971; - {{ if .ipv6 }}{{ if .ipv6.enabled }}listen [::]:8971;{{ end }}{{ end }} - {{ end }} + # ACME challenge location + location /.well-known/acme-challenge/ { + default_type "text/plain"; + root /etc/letsencrypt/www; + } {{ else }} - # (No tls section) default to HTTP (IPv4 always; IPv6 optional) - listen 8971; - {{ if .ipv6 }}{{ if .ipv6.enabled }}listen [::]:8971;{{ end }}{{ end }} + # (No tls) default to HTTP (IPv4 always; IPv6 optional) + listen {{ .listen.external }}; + {{ if .ipv6.enabled }}listen [::]:{{ .listen.external_port }};{{ end }} {{ end }} - diff --git a/docker/rocm/Dockerfile b/docker/rocm/Dockerfile index 9edcd6058..42447a26b 100644 --- a/docker/rocm/Dockerfile +++ b/docker/rocm/Dockerfile @@ -13,7 +13,7 @@ ARG ROCM RUN apt update -qq && \ apt install -y wget gpg && \ - wget -O rocm.deb https://repo.radeon.com/amdgpu-install/7.1.1/ubuntu/jammy/amdgpu-install_7.1.1.70101-1_all.deb && \ + wget -O rocm.deb https://repo.radeon.com/amdgpu-install/7.2/ubuntu/jammy/amdgpu-install_7.2.70200-1_all.deb && \ apt install -y ./rocm.deb && \ apt update && \ apt install -qq -y rocm @@ -56,6 +56,8 @@ FROM scratch AS rocm-dist ARG ROCM +# Copy HIP headers required for MIOpen JIT (BuildHip) / HIPRTC at runtime +COPY --from=rocm /opt/rocm-${ROCM}/include/ /opt/rocm-${ROCM}/include/ COPY --from=rocm /opt/rocm-$ROCM/bin/rocminfo /opt/rocm-$ROCM/bin/migraphx-driver /opt/rocm-$ROCM/bin/ # Copy MIOpen database files for gfx10xx and gfx11xx only (RDNA2/RDNA3) COPY --from=rocm /opt/rocm-$ROCM/share/miopen/db/*gfx10* /opt/rocm-$ROCM/share/miopen/db/ diff --git a/docker/rocm/requirements-wheels-rocm.txt b/docker/rocm/requirements-wheels-rocm.txt index b6a202f93..da22f2ff6 100644 --- a/docker/rocm/requirements-wheels-rocm.txt +++ b/docker/rocm/requirements-wheels-rocm.txt @@ -1 +1 @@ -onnxruntime-migraphx @ https://github.com/NickM-27/frigate-onnxruntime-rocm/releases/download/v7.1.0/onnxruntime_migraphx-1.23.1-cp311-cp311-linux_x86_64.whl \ No newline at end of file +onnxruntime-migraphx @ https://github.com/NickM-27/frigate-onnxruntime-rocm/releases/download/v7.2.0/onnxruntime_migraphx-1.23.1-cp311-cp311-linux_x86_64.whl \ No newline at end of file diff --git a/docker/rocm/rocm.hcl b/docker/rocm/rocm.hcl index 6595066c5..710bfe995 100644 --- a/docker/rocm/rocm.hcl +++ b/docker/rocm/rocm.hcl @@ -1,5 +1,5 @@ variable "ROCM" { - default = "7.1.1" + default = "7.2.0" } variable "HSA_OVERRIDE_GFX_VERSION" { default = "" diff --git a/docs/docs/configuration/advanced.md b/docs/docs/configuration/advanced.md index 17eb2053d..b8dbffd62 100644 --- a/docs/docs/configuration/advanced.md +++ b/docs/docs/configuration/advanced.md @@ -155,34 +155,33 @@ services: ### Enabling IPv6 -IPv6 is disabled by default, to enable IPv6 listen.gotmpl needs to be bind mounted with IPv6 enabled. For example: +IPv6 is disabled by default, to enable IPv6 modify your Frigate configuration as follows: -``` -{{ if not .enabled }} -# intended for external traffic, protected by auth -listen 8971; -{{ else }} -# intended for external traffic, protected by auth -listen 8971 ssl; - -# intended for internal traffic, not protected by auth -listen 5000; +```yaml +networking: + ipv6: + enabled: True ``` -becomes +### Listen on different ports -``` -{{ if not .enabled }} -# intended for external traffic, protected by auth -listen [::]:8971 ipv6only=off; -{{ else }} -# intended for external traffic, protected by auth -listen [::]:8971 ipv6only=off ssl; +You can change the ports Nginx uses for listening using Frigate's configuration file. The internal port (unauthenticated) and external port (authenticated) can be changed independently. You can also specify an IP address using the format `ip:port` if you wish to bind the port to a specific interface. This may be useful for example to prevent exposing the internal port outside the container. -# intended for internal traffic, not protected by auth -listen [::]:5000 ipv6only=off; +For example: + +```yaml +networking: + listen: + internal: 127.0.0.1:5000 + external: 8971 ``` +:::warning + +This setting is for advanced users. For the majority of use cases it's recommended to change the `ports` section of your Docker compose file or use the Docker `run` `--publish` option instead, e.g. `-p 443:8971`. Changing Frigate's ports may break some integrations. + +::: + ## Base path By default, Frigate runs at the root path (`/`). However some setups require to run Frigate under a custom path prefix (e.g. `/frigate`), especially when Frigate is located behind a reverse proxy that requires path-based routing. @@ -234,7 +233,7 @@ To do this: ### Custom go2rtc version -Frigate currently includes go2rtc v1.9.10, there may be certain cases where you want to run a different version of go2rtc. +Frigate currently includes go2rtc v1.9.13, there may be certain cases where you want to run a different version of go2rtc. To do this: diff --git a/docs/docs/configuration/camera_specific.md b/docs/docs/configuration/camera_specific.md index 50d5c52aa..aae8c57b4 100644 --- a/docs/docs/configuration/camera_specific.md +++ b/docs/docs/configuration/camera_specific.md @@ -244,7 +244,7 @@ go2rtc: - rtspx://192.168.1.1:7441/abcdefghijk ``` -[See the go2rtc docs for more information](https://github.com/AlexxIT/go2rtc/tree/v1.9.10#source-rtsp) +[See the go2rtc docs for more information](https://github.com/AlexxIT/go2rtc/tree/v1.9.13#source-rtsp) In the Unifi 2.0 update Unifi Protect Cameras had a change in audio sample rate which causes issues for ffmpeg. The input rate needs to be set for record if used directly with unifi protect. diff --git a/docs/docs/configuration/genai/config.md b/docs/docs/configuration/genai/config.md index e1f79b744..67bbb9fe4 100644 --- a/docs/docs/configuration/genai/config.md +++ b/docs/docs/configuration/genai/config.md @@ -5,39 +5,31 @@ title: Configuring Generative AI ## Configuration -A Generative AI provider can be configured in the global config, which will make the Generative AI features available for use. There are currently 3 native providers available to integrate with Frigate. Other providers that support the OpenAI standard API can also be used. See the OpenAI section below. +A Generative AI provider can be configured in the global config, which will make the Generative AI features available for use. There are currently 4 native providers available to integrate with Frigate. Other providers that support the OpenAI standard API can also be used. See the OpenAI-Compatible section below. To use Generative AI, you must define a single provider at the global level of your Frigate configuration. If the provider you choose requires an API key, you may either directly paste it in your configuration, or store it in an environment variable prefixed with `FRIGATE_`. -## Ollama +## Local Providers + +Local providers run on your own hardware and keep all data processing private. These require a GPU or dedicated hardware for best performance. :::warning -Using Ollama on CPU is not recommended, high inference times make using Generative AI impractical. +Running Generative AI models on CPU is not recommended, as high inference times make using Generative AI impractical. ::: -[Ollama](https://ollama.com/) allows you to self-host large language models and keep everything running locally. It is highly recommended to host this server on a machine with an Nvidia graphics card, or on a Apple silicon Mac for best performance. +### Recommended Local Models -Most of the 7b parameter 4-bit vision models will fit inside 8GB of VRAM. There is also a [Docker container](https://hub.docker.com/r/ollama/ollama) available. +You must use a vision-capable model with Frigate. The following models are recommended for local deployment: -Parallel requests also come with some caveats. You will need to set `OLLAMA_NUM_PARALLEL=1` and choose a `OLLAMA_MAX_QUEUE` and `OLLAMA_MAX_LOADED_MODELS` values that are appropriate for your hardware and preferences. See the [Ollama documentation](https://docs.ollama.com/faq#how-does-ollama-handle-concurrent-requests). - -### Model Types: Instruct vs Thinking - -Most vision-language models are available as **instruct** models, which are fine-tuned to follow instructions and respond concisely to prompts. However, some models (such as certain Qwen-VL or minigpt variants) offer both **instruct** and **thinking** versions. - -- **Instruct models** are always recommended for use with Frigate. These models generate direct, relevant, actionable descriptions that best fit Frigate's object and event summary use case. -- **Thinking models** are fine-tuned for more free-form, open-ended, and speculative outputs, which are typically not concise and may not provide the practical summaries Frigate expects. For this reason, Frigate does **not** recommend or support using thinking models. - -Some models are labeled as **hybrid** (capable of both thinking and instruct tasks). In these cases, Frigate will always use instruct-style prompts and specifically disables thinking-mode behaviors to ensure concise, useful responses. - -**Recommendation:** -Always select the `-instruct` or documented instruct/tagged variant of any model you use in your Frigate configuration. If in doubt, refer to your model provider’s documentation or model library for guidance on the correct model variant to use. - -### Supported Models - -You must use a vision capable model with Frigate. Current model variants can be found [in their model library](https://ollama.com/library). Note that Frigate will not automatically download the model you specify in your config, Ollama will try to download the model but it may take longer than the timeout, it is recommended to pull the model beforehand by running `ollama pull your_model` on your Ollama server/Docker container. Note that the model specified in Frigate's config must match the downloaded model tag. +| Model | Notes | +| ------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `qwen3-vl` | Strong visual and situational understanding, strong ability to identify smaller objects and interactions with object. | +| `qwen3.5` | Strong situational understanding, but missing DeepStack from qwen3-vl leading to worse performance for identifying objects in people's hand and other small details. | +| `Intern3.5VL` | Relatively fast with good vision comprehension | +| `gemma3` | Slower model with good vision and temporal understanding | +| `qwen2.5-vl` | Fast but capable model with good vision comprehension | :::info @@ -45,49 +37,135 @@ Each model is available in multiple parameter sizes (3b, 4b, 8b, etc.). Larger s ::: +:::note + +You should have at least 8 GB of RAM available (or VRAM if running on GPU) to run the 7B models, 16 GB to run the 13B models, and 24 GB to run the 33B models. + +::: + +### Model Types: Instruct vs Thinking + +Most vision-language models are available as **instruct** models, which are fine-tuned to follow instructions and respond concisely to prompts. However, some models (such as certain Qwen-VL or minigpt variants) offer both **instruct** and **thinking** versions. + +- **Instruct models** are always recommended for use with Frigate. These models generate direct, relevant, actionable descriptions that best fit Frigate's object and event summary use case. +- **Reasoning / Thinking models** are fine-tuned for more free-form, open-ended, and speculative outputs, which are typically not concise and may not provide the practical summaries Frigate expects. For this reason, Frigate does **not** recommend or support using thinking models. + +Some models are labeled as **hybrid** (capable of both thinking and instruct tasks). In these cases, it is recommended to disable reasoning / thinking, which is generally model specific (see your models documentation). + +**Recommendation:** +Always select the `-instruct` or documented instruct/tagged variant of any model you use in your Frigate configuration. If in doubt, refer to your model provider's documentation or model library for guidance on the correct model variant to use. + +### llama.cpp + +[llama.cpp](https://github.com/ggml-org/llama.cpp) is a C++ implementation of LLaMA that provides a high-performance inference server. + +It is highly recommended to host the llama.cpp server on a machine with a discrete graphics card, or on an Apple silicon Mac for best performance. + +#### Supported Models + +You must use a vision capable model with Frigate. The llama.cpp server supports various vision models in GGUF format. + +#### Configuration + +All llama.cpp native options can be passed through `provider_options`, including `temperature`, `top_k`, `top_p`, `min_p`, `repeat_penalty`, `repeat_last_n`, `seed`, `grammar`, and more. See the [llama.cpp server documentation](https://github.com/ggml-org/llama.cpp/blob/master/tools/server/README.md) for a complete list of available parameters. + +```yaml +genai: + provider: llamacpp + base_url: http://localhost:8080 + model: your-model-name + provider_options: + context_size: 16000 # Tell Frigate your context size so it can send the appropriate amount of information. +``` + +### Ollama + +[Ollama](https://ollama.com/) allows you to self-host large language models and keep everything running locally. It is highly recommended to host this server on a machine with an Nvidia graphics card, or on a Apple silicon Mac for best performance. + +Most of the 7b parameter 4-bit vision models will fit inside 8GB of VRAM. There is also a [Docker container](https://hub.docker.com/r/ollama/ollama) available. + +Parallel requests also come with some caveats. You will need to set `OLLAMA_NUM_PARALLEL=1` and choose a `OLLAMA_MAX_QUEUE` and `OLLAMA_MAX_LOADED_MODELS` values that are appropriate for your hardware and preferences. See the [Ollama documentation](https://docs.ollama.com/faq#how-does-ollama-handle-concurrent-requests). + :::tip If you are trying to use a single model for Frigate and HomeAssistant, it will need to support vision and tools calling. qwen3-VL supports vision and tools simultaneously in Ollama. ::: -The following models are recommended: +Note that Frigate will not automatically download the model you specify in your config. Ollama will try to download the model but it may take longer than the timeout, so it is recommended to pull the model beforehand by running `ollama pull your_model` on your Ollama server/Docker container. The model specified in Frigate's config must match the downloaded model tag. -| Model | Notes | -| ------------- | -------------------------------------------------------------------- | -| `qwen3-vl` | Strong visual and situational understanding, higher vram requirement | -| `Intern3.5VL` | Relatively fast with good vision comprehension | -| `gemma3` | Strong frame-to-frame understanding, slower inference times | -| `qwen2.5-vl` | Fast but capable model with good vision comprehension | - -:::note - -You should have at least 8 GB of RAM available (or VRAM if running on GPU) to run the 7B models, 16 GB to run the 13B models, and 32 GB to run the 33B models. - -::: - -#### Ollama Cloud models - -Ollama also supports [cloud models](https://ollama.com/cloud), where your local Ollama instance handles requests from Frigate, but model inference is performed in the cloud. Set up Ollama locally, sign in with your Ollama account, and specify the cloud model name in your Frigate config. For more details, see the Ollama cloud model [docs](https://docs.ollama.com/cloud). - -### Configuration +#### Configuration ```yaml genai: provider: ollama base_url: http://localhost:11434 model: qwen3-vl:4b + provider_options: # other Ollama client options can be defined + keep_alive: -1 + options: + num_ctx: 8192 # make sure the context matches other services that are using ollama ``` -## Google Gemini +### OpenAI-Compatible + +Frigate supports any provider that implements the OpenAI API standard. This includes self-hosted solutions like [vLLM](https://docs.vllm.ai/), [LocalAI](https://localai.io/), and other OpenAI-compatible servers. + +:::tip + +For OpenAI-compatible servers (such as llama.cpp) that don't expose the configured context size in the API response, you can manually specify the context size in `provider_options`: + +```yaml +genai: + provider: openai + base_url: http://your-llama-server + model: your-model-name + provider_options: + context_size: 8192 # Specify the configured context size +``` + +This ensures Frigate uses the correct context window size when generating prompts. + +::: + +#### Configuration + +```yaml +genai: + provider: openai + base_url: http://your-server:port + api_key: your-api-key # May not be required for local servers + model: your-model-name +``` + +To use a different OpenAI-compatible API endpoint, set the `OPENAI_BASE_URL` environment variable to your provider's API URL. + +## Cloud Providers + +Cloud providers run on remote infrastructure and require an API key for authentication. These services handle all model inference on their servers. + +### Ollama Cloud + +Ollama also supports [cloud models](https://ollama.com/cloud), where your local Ollama instance handles requests from Frigate, but model inference is performed in the cloud. Set up Ollama locally, sign in with your Ollama account, and specify the cloud model name in your Frigate config. For more details, see the Ollama cloud model [docs](https://docs.ollama.com/cloud). + +#### Configuration + +```yaml +genai: + provider: ollama + base_url: http://localhost:11434 + model: cloud-model-name +``` + +### Google Gemini Google Gemini has a [free tier](https://ai.google.dev/pricing) for the API, however the limits may not be sufficient for standard Frigate usage. Choose a plan appropriate for your installation. -### Supported Models +#### Supported Models You must use a vision capable model with Frigate. Current model variants can be found [in their documentation](https://ai.google.dev/gemini-api/docs/models/gemini). -### Get API Key +#### Get API Key To start using Gemini, you must first get an API key from [Google AI Studio](https://aistudio.google.com). @@ -96,7 +174,7 @@ To start using Gemini, you must first get an API key from [Google AI Studio](htt 3. Click "Create API key in new project" 4. Copy the API key for use in your config -### Configuration +#### Configuration ```yaml genai: @@ -121,19 +199,19 @@ Other HTTP options are available, see the [python-genai documentation](https://g ::: -## OpenAI +### OpenAI OpenAI does not have a free tier for their API. With the release of gpt-4o, pricing has been reduced and each generation should cost fractions of a cent if you choose to go this route. -### Supported Models +#### Supported Models You must use a vision capable model with Frigate. Current model variants can be found [in their documentation](https://platform.openai.com/docs/models). -### Get API Key +#### Get API Key To start using OpenAI, you must first [create an API key](https://platform.openai.com/api-keys) and [configure billing](https://platform.openai.com/settings/organization/billing/overview). -### Configuration +#### Configuration ```yaml genai: @@ -142,42 +220,19 @@ genai: model: gpt-4o ``` -:::note - -To use a different OpenAI-compatible API endpoint, set the `OPENAI_BASE_URL` environment variable to your provider's API URL. - -::: - -:::tip - -For OpenAI-compatible servers (such as llama.cpp) that don't expose the configured context size in the API response, you can manually specify the context size in `provider_options`: - -```yaml -genai: - provider: openai - base_url: http://your-llama-server - model: your-model-name - provider_options: - context_size: 8192 # Specify the configured context size -``` - -This ensures Frigate uses the correct context window size when generating prompts. - -::: - -## Azure OpenAI +### Azure OpenAI Microsoft offers several vision models through Azure OpenAI. A subscription is required. -### Supported Models +#### Supported Models You must use a vision capable model with Frigate. Current model variants can be found [in their documentation](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models). -### Create Resource and Get API Key +#### Create Resource and Get API Key To start using Azure OpenAI, you must first [create a resource](https://learn.microsoft.com/azure/cognitive-services/openai/how-to/create-resource?pivots=web-portal#create-a-resource). You'll need your API key, model name, and resource URL, which must include the `api-version` parameter (see the example below). -### Configuration +#### Configuration ```yaml genai: diff --git a/docs/docs/configuration/genai/objects.md b/docs/docs/configuration/genai/objects.md index e3ae31393..3ed826d21 100644 --- a/docs/docs/configuration/genai/objects.md +++ b/docs/docs/configuration/genai/objects.md @@ -11,7 +11,7 @@ By default, descriptions will be generated for all tracked objects and all zones Optionally, you can generate the description using a snapshot (if enabled) by setting `use_snapshot` to `True`. By default, this is set to `False`, which sends the uncompressed images from the `detect` stream collected over the object's lifetime to the model. Once the object lifecycle ends, only a single compressed and cropped thumbnail is saved with the tracked object. Using a snapshot might be useful when you want to _regenerate_ a tracked object's description as it will provide the AI with a higher-quality image (typically downscaled by the AI itself) than the cropped/compressed thumbnail. Using a snapshot otherwise has a trade-off in that only a single image is sent to your provider, which will limit the model's ability to determine object movement or direction. -Generative AI object descriptions can also be toggled dynamically for a camera via MQTT with the topic `frigate//object_descriptions/set`. See the [MQTT documentation](/integrations/mqtt/#frigatecamera_nameobjectdescriptionsset). +Generative AI object descriptions can also be toggled dynamically for a camera via MQTT with the topic `frigate//object_descriptions/set`. See the [MQTT documentation](/integrations/mqtt#frigatecamera_nameobject_descriptionsset). ## Usage and Best Practices diff --git a/docs/docs/configuration/genai/review_summaries.md b/docs/docs/configuration/genai/review_summaries.md index df287446c..c6f5e53ec 100644 --- a/docs/docs/configuration/genai/review_summaries.md +++ b/docs/docs/configuration/genai/review_summaries.md @@ -7,7 +7,7 @@ Generative AI can be used to automatically generate structured summaries of revi Requests for a summary are requested automatically to your AI provider for alert review items when the activity has ended, they can also be optionally enabled for detections as well. -Generative AI review summaries can also be toggled dynamically for a [camera via MQTT](/integrations/mqtt/#frigatecamera_namereviewdescriptionsset). +Generative AI review summaries can also be toggled dynamically for a [camera via MQTT](/integrations/mqtt#frigatecamera_namereview_descriptionsset). ## Review Summary Usage and Best Practices diff --git a/docs/docs/configuration/index.md b/docs/docs/configuration/index.md index 2144ef7ea..ac5247000 100644 --- a/docs/docs/configuration/index.md +++ b/docs/docs/configuration/index.md @@ -138,7 +138,10 @@ cameras: - detect motion: mask: - - 0.000,0.427,0.002,0.000,0.999,0.000,0.999,0.781,0.885,0.456,0.700,0.424,0.701,0.311,0.507,0.294,0.453,0.347,0.451,0.400 + timestamp: + friendly_name: "Camera timestamp" + enabled: true + coordinates: "0.000,0.427,0.002,0.000,0.999,0.000,0.999,0.781,0.885,0.456,0.700,0.424,0.701,0.311,0.507,0.294,0.453,0.347,0.451,0.400" ``` ### Standalone Intel Mini PC with USB Coral @@ -195,7 +198,10 @@ cameras: - detect motion: mask: - - 0.000,0.427,0.002,0.000,0.999,0.000,0.999,0.781,0.885,0.456,0.700,0.424,0.701,0.311,0.507,0.294,0.453,0.347,0.451,0.400 + timestamp: + friendly_name: "Camera timestamp" + enabled: true + coordinates: "0.000,0.427,0.002,0.000,0.999,0.000,0.999,0.781,0.885,0.456,0.700,0.424,0.701,0.311,0.507,0.294,0.453,0.347,0.451,0.400" ``` ### Home Assistant integrated Intel Mini PC with OpenVino @@ -262,5 +268,8 @@ cameras: - detect motion: mask: - - 0.000,0.427,0.002,0.000,0.999,0.000,0.999,0.781,0.885,0.456,0.700,0.424,0.701,0.311,0.507,0.294,0.453,0.347,0.451,0.400 + timestamp: + friendly_name: "Camera timestamp" + enabled: true + coordinates: "0.000,0.427,0.002,0.000,0.999,0.000,0.999,0.781,0.885,0.456,0.700,0.424,0.701,0.311,0.507,0.294,0.453,0.347,0.451,0.400" ``` diff --git a/docs/docs/configuration/masks.md b/docs/docs/configuration/masks.md index 4a4722586..32280531d 100644 --- a/docs/docs/configuration/masks.md +++ b/docs/docs/configuration/masks.md @@ -33,18 +33,55 @@ Your config file will be updated with the relative coordinates of the mask/zone: ```yaml motion: - mask: "0.000,0.427,0.002,0.000,0.999,0.000,0.999,0.781,0.885,0.456,0.700,0.424,0.701,0.311,0.507,0.294,0.453,0.347,0.451,0.400" + mask: + # Motion mask name (required) + mask1: + # Optional: A friendly name for the mask + friendly_name: "Timestamp area" + # Optional: Whether this mask is active (default: true) + enabled: true + # Required: Coordinates polygon for the mask + coordinates: "0.000,0.427,0.002,0.000,0.999,0.000,0.999,0.781,0.885,0.456,0.700,0.424,0.701,0.311,0.507,0.294,0.453,0.347,0.451,0.400" ``` -Multiple masks can be listed in your config. +Multiple motion masks can be listed in your config: ```yaml motion: mask: - - 0.239,1.246,0.175,0.901,0.165,0.805,0.195,0.802 - - 0.000,0.427,0.002,0.000,0.999,0.000,0.999,0.781,0.885,0.456 + mask1: + friendly_name: "Timestamp area" + enabled: true + coordinates: "0.239,1.246,0.175,0.901,0.165,0.805,0.195,0.802" + mask2: + friendly_name: "Tree area" + enabled: true + coordinates: "0.000,0.427,0.002,0.000,0.999,0.000,0.999,0.781,0.885,0.456" ``` +Object filter masks can also be created through the UI or manually in the config. They are configured under the object filters section for each object type: + +```yaml +objects: + filters: + person: + mask: + person_filter1: + friendly_name: "Roof area" + enabled: true + coordinates: "0.000,0.000,1.000,0.000,1.000,0.400,0.000,0.400" + car: + mask: + car_filter1: + friendly_name: "Sidewalk area" + enabled: true + coordinates: "0.000,0.700,1.000,0.700,1.000,1.000,0.000,1.000" +``` + +## Enabling/Disabling Masks + +Both motion masks and object filter masks can be toggled on or off without removing them from the configuration. Disabled masks are completely ignored at runtime - they will not affect motion detection or object filtering. This is useful for temporarily disabling a mask during certain seasons or times of day without modifying the configuration. + ### Further Clarification This is a response to a [question posed on reddit](https://www.reddit.com/r/homeautomation/comments/ppxdve/replacing_my_doorbell_with_a_security_camera_a_6/hd876w4?utm_source=share&utm_medium=web2x&context=3): diff --git a/docs/docs/configuration/motion_detection.md b/docs/docs/configuration/motion_detection.md index c22491fd0..53e63272a 100644 --- a/docs/docs/configuration/motion_detection.md +++ b/docs/docs/configuration/motion_detection.md @@ -38,7 +38,6 @@ Remember that motion detection is just used to determine when object detection s The threshold value dictates how much of a change in a pixels luminance is required to be considered motion. ```yaml -# default threshold value motion: # Optional: The threshold passed to cv2.threshold to determine if a pixel is different enough to be counted as motion. (default: shown below) # Increasing this value will make motion detection less sensitive and decreasing it will make motion detection more sensitive. @@ -53,7 +52,6 @@ Watching the motion boxes in the debug view, increase the threshold until you on ### Contour Area ```yaml -# default contour_area value motion: # Optional: Minimum size in pixels in the resized motion image that counts as motion (default: shown below) # Increasing this value will prevent smaller areas of motion from being detected. Decreasing will @@ -81,27 +79,49 @@ However, if the preferred day settings do not work well at night it is recommend ## Tuning For Large Changes In Motion +### Lightning Threshold + ```yaml -# default lightning_threshold: motion: - # Optional: The percentage of the image used to detect lightning or other substantial changes where motion detection - # needs to recalibrate. (default: shown below) - # Increasing this value will make motion detection more likely to consider lightning or ir mode changes as valid motion. - # Decreasing this value will make motion detection more likely to ignore large amounts of motion such as a person approaching - # a doorbell camera. + # Optional: The percentage of the image used to detect lightning or + # other substantial changes where motion detection needs to + # recalibrate. (default: shown below) + # Increasing this value will make motion detection more likely + # to consider lightning or IR mode changes as valid motion. + # Decreasing this value will make motion detection more likely + # to ignore large amounts of motion such as a person + # approaching a doorbell camera. lightning_threshold: 0.8 ``` +Large changes in motion like PTZ moves and camera switches between Color and IR mode should result in a pause in object detection. `lightning_threshold` defines the percentage of the image used to detect these substantial changes. Increasing this value makes motion detection more likely to treat large changes (like IR mode switches) as valid motion. Decreasing it makes motion detection more likely to ignore large amounts of motion, such as a person approaching a doorbell camera. + +Note that `lightning_threshold` does **not** stop motion-based recordings from being saved — it only prevents additional motion analysis after the threshold is exceeded, reducing false positive object detections during high-motion periods (e.g. storms or PTZ sweeps) without interfering with recordings. + :::warning -Some cameras like doorbell cameras may have missed detections when someone walks directly in front of the camera and the lightning_threshold causes motion detection to be re-calibrated. In this case, it may be desirable to increase the `lightning_threshold` to ensure these objects are not missed. +Some cameras, like doorbell cameras, may have missed detections when someone walks directly in front of the camera and the `lightning_threshold` causes motion detection to recalibrate. In this case, it may be desirable to increase the `lightning_threshold` to ensure these objects are not missed. ::: -:::note +### Skip Motion On Large Scene Changes -Lightning threshold does not stop motion based recordings from being saved. +```yaml +motion: + # Optional: Fraction of the frame that must change in a single update + # before Frigate will completely ignore any motion in that frame. + # Values range between 0.0 and 1.0, leave unset (null) to disable. + # Setting this to 0.7 would cause Frigate to **skip** reporting + # motion boxes when more than 70% of the image appears to change + # (e.g. during lightning storms, IR/color mode switches, or other + # sudden lighting events). + skip_motion_threshold: 0.7 +``` + +This option is handy when you want to prevent large transient changes from triggering recordings or object detection. It differs from `lightning_threshold` because it completely suppresses motion instead of just forcing a recalibration. + +:::warning + +When the skip threshold is exceeded, **no motion is reported** for that frame, meaning **nothing is recorded** for that frame. That means you can miss something important, like a PTZ camera auto-tracking an object or activity while the camera is moving. If you prefer to guarantee that every frame is saved, leave this unset and accept occasional recordings containing scene noise — they typically only take up a few megabytes and are quick to scan in the timeline UI. ::: - -Large changes in motion like PTZ moves and camera switches between Color and IR mode should result in a pause in object detection. This is done via the `lightning_threshold` configuration. It is defined as the percentage of the image used to detect lightning or other substantial changes where motion detection needs to recalibrate. Increasing this value will make motion detection more likely to consider lightning or IR mode changes as valid motion. Decreasing this value will make motion detection more likely to ignore large amounts of motion such as a person approaching a doorbell camera. diff --git a/docs/docs/configuration/record.md b/docs/docs/configuration/record.md index 4dfd8b77c..eb5d736e4 100644 --- a/docs/docs/configuration/record.md +++ b/docs/docs/configuration/record.md @@ -139,7 +139,13 @@ record: :::tip -When using `hwaccel_args` globally hardware encoding is used for time lapse generation. The encoder determines its own behavior so the resulting file size may be undesirably large. +When using `hwaccel_args`, hardware encoding is used for timelapse generation. This setting can be overridden for a specific camera (e.g., when camera resolution exceeds hardware encoder limits); set `cameras..record.export.hwaccel_args` with the appropriate settings. Using an unrecognized value or empty string will fall back to software encoding (libx264). + +::: + +:::tip + +The encoder determines its own behavior so the resulting file size may be undesirably large. To reduce the output file size the ffmpeg parameter `-qp n` can be utilized (where `n` stands for the value of the quantisation parameter). The value can be adjusted to get an acceptable tradeoff between quality and file size for the given scenario. ::: @@ -148,19 +154,16 @@ To reduce the output file size the ffmpeg parameter `-qp n` can be utilized (whe Apple devices running the Safari browser may fail to playback h.265 recordings. The [apple compatibility option](../configuration/camera_specific.md#h265-cameras-via-safari) should be used to ensure seamless playback on Apple devices. -## Syncing Recordings With Disk +## Syncing Media Files With Disk -In some cases the recordings files may be deleted but Frigate will not know this has happened. Recordings sync can be enabled which will tell Frigate to check the file system and delete any db entries for files which don't exist. +Media files (event snapshots, event thumbnails, review thumbnails, previews, exports, and recordings) can become orphaned when database entries are deleted but the corresponding files remain on disk. -```yaml -record: - sync_recordings: True -``` +Normal operation may leave small numbers of orphaned files until Frigate's scheduled cleanup, but crashes, configuration changes, or upgrades may cause more orphaned files that Frigate does not clean up. This feature checks the file system for media files and removes any that are not referenced in the database. -This feature is meant to fix variations in files, not completely delete entries in the database. If you delete all of your media, don't use `sync_recordings`, just stop Frigate, delete the `frigate.db` database, and restart. +The Maintenance pane in the Frigate UI or an API endpoint `POST /api/media/sync` can be used to trigger a media sync. When using the API, a job ID is returned and the operation continues on the server. Status can be checked with the `/api/media/sync/status/{job_id}` endpoint. :::warning -The sync operation uses considerable CPU resources and in most cases is not needed, only enable when necessary. +This operation uses considerable CPU resources and includes a safety threshold that aborts if more than 50% of files would be deleted. Only run when necessary. If you set `force: true` the safety threshold will be bypassed; do not use `force` unless you are certain the deletions are intended. ::: diff --git a/docs/docs/configuration/reference.md b/docs/docs/configuration/reference.md index 206d7012e..cac508195 100644 --- a/docs/docs/configuration/reference.md +++ b/docs/docs/configuration/reference.md @@ -73,11 +73,19 @@ tls: # Optional: Enable TLS for port 8971 (default: shown below) enabled: True -# Optional: IPv6 configuration +# Optional: Networking configuration networking: # Optional: Enable IPv6 on 5000, and 8971 if tls is configured (default: shown below) ipv6: enabled: False + # Optional: Override ports Frigate uses for listening (defaults: shown below) + # An IP address may also be provided to bind to a specific interface, e.g. ip:port + # NOTE: This setting is for advanced users and may break some integrations. The majority + # of users should change ports in the docker compose file + # or use the docker run `--publish` option to select a different port. + listen: + internal: 5000 + external: 8971 # Optional: Proxy configuration proxy: @@ -337,7 +345,15 @@ objects: # Optional: mask to prevent all object types from being detected in certain areas (default: no mask) # Checks based on the bottom center of the bounding box of the object. # NOTE: This mask is COMBINED with the object type specific mask below - mask: 0.000,0.000,0.781,0.000,0.781,0.278,0.000,0.278 + mask: + # Object filter mask name (required) + mask1: + # Optional: A friendly name for the mask + friendly_name: "Object filter mask area" + # Optional: Whether this mask is active (default: true) + enabled: true + # Required: Coordinates polygon for the mask + coordinates: "0.000,0.000,0.781,0.000,0.781,0.278,0.000,0.278" # Optional: filters to reduce false positives for specific object types filters: person: @@ -357,7 +373,15 @@ objects: threshold: 0.7 # Optional: mask to prevent this object type from being detected in certain areas (default: no mask) # Checks based on the bottom center of the bounding box of the object - mask: 0.000,0.000,0.781,0.000,0.781,0.278,0.000,0.278 + mask: + # Object filter mask name (required) + mask1: + # Optional: A friendly name for the mask + friendly_name: "Object filter mask area" + # Optional: Whether this mask is active (default: true) + enabled: true + # Required: Coordinates polygon for the mask + coordinates: "0.000,0.000,0.781,0.000,0.781,0.278,0.000,0.278" # Optional: Configuration for AI generated tracked object descriptions genai: # Optional: Enable AI object description generation (default: shown below) @@ -456,12 +480,16 @@ motion: # Increasing this value will make motion detection less sensitive and decreasing it will make motion detection more sensitive. # The value should be between 1 and 255. threshold: 30 - # Optional: The percentage of the image used to detect lightning or other substantial changes where motion detection - # needs to recalibrate. (default: shown below) + # Optional: The percentage of the image used to detect lightning or other substantial changes where motion detection needs + # to recalibrate and motion checks stop for that frame. Recordings are unaffected. (default: shown below) # Increasing this value will make motion detection more likely to consider lightning or ir mode changes as valid motion. - # Decreasing this value will make motion detection more likely to ignore large amounts of motion such as a person approaching - # a doorbell camera. + # Decreasing this value will make motion detection more likely to ignore large amounts of motion such as a person approaching a doorbell camera. lightning_threshold: 0.8 + # Optional: Fraction of the frame that must change in a single update before motion boxes are completely + # ignored. Values range between 0.0 and 1.0. When exceeded, no motion boxes are reported and **no motion + # recording** is created for that frame. Leave unset (null) to disable this feature. Use with care on PTZ + # cameras or other situations where you require guaranteed frame capture. + skip_motion_threshold: None # Optional: Minimum size in pixels in the resized motion image that counts as motion (default: shown below) # Increasing this value will prevent smaller areas of motion from being detected. Decreasing will # make motion detection more sensitive to smaller moving objects. @@ -481,7 +509,15 @@ motion: frame_height: 100 # Optional: motion mask # NOTE: see docs for more detailed info on creating masks - mask: 0.000,0.469,1.000,0.469,1.000,1.000,0.000,1.000 + mask: + # Motion mask name (required) + mask1: + # Optional: A friendly name for the mask + friendly_name: "Motion mask area" + # Optional: Whether this mask is active (default: true) + enabled: true + # Required: Coordinates polygon for the mask + coordinates: "0.000,0.469,1.000,0.469,1.000,1.000,0.000,1.000" # Optional: improve contrast (default: shown below) # Enables dynamic contrast improvement. This should help improve night detections at the cost of making motion detection more sensitive # for daytime. @@ -510,8 +546,6 @@ record: # Optional: Number of minutes to wait between cleanup runs (default: shown below) # This can be used to reduce the frequency of deleting recording segments from disk if you want to minimize i/o expire_interval: 60 - # Optional: Two-way sync recordings database with disk on startup and once a day (default: shown below). - sync_recordings: False # Optional: Continuous retention settings continuous: # Optional: Number of days to retain recordings regardless of tracked objects or motion (default: shown below) @@ -534,6 +568,8 @@ record: # The -r (framerate) dictates how smooth the output video is. # So the args would be -vf setpts=0.02*PTS -r 30 in that case. timelapse_args: "-vf setpts=0.04*PTS -r 30" + # Optional: Global hardware acceleration settings for timelapse exports. (default: inherit) + hwaccel_args: auto # Optional: Recording Preview Settings preview: # Optional: Quality of recording preview (default: shown below). @@ -752,7 +788,7 @@ classification: interval: None # Optional: Restream configuration -# Uses https://github.com/AlexxIT/go2rtc (v1.9.10) +# Uses https://github.com/AlexxIT/go2rtc (v1.9.13) # NOTE: The default go2rtc API port (1984) must be used, # changing this port for the integrated go2rtc instance is not supported. go2rtc: @@ -838,6 +874,11 @@ cameras: # Optional: camera specific output args (default: inherit) # output_args: + # Optional: camera specific hwaccel args for timelapse export (default: inherit) + # record: + # export: + # hwaccel_args: + # Optional: timeout for highest scoring image before allowing it # to be replaced by a newer image. (default: shown below) best_image_timeout: 60 @@ -853,6 +894,9 @@ cameras: front_steps: # Optional: A friendly name or descriptive text for the zones friendly_name: "" + # Optional: Whether this zone is active (default: shown below) + # Disabled zones are completely ignored at runtime - no object tracking or debug drawing + enabled: True # Required: List of x,y coordinates to define the polygon of the zone. # NOTE: Presence in a zone is evaluated only based on the bottom center of the objects bounding box. coordinates: 0.033,0.306,0.324,0.138,0.439,0.185,0.042,0.428 diff --git a/docs/docs/configuration/restream.md b/docs/docs/configuration/restream.md index ebd506294..a3c11f2d0 100644 --- a/docs/docs/configuration/restream.md +++ b/docs/docs/configuration/restream.md @@ -7,7 +7,7 @@ title: Restream Frigate can restream your video feed as an RTSP feed for other applications such as Home Assistant to utilize it at `rtsp://:8554/`. Port 8554 must be open. [This allows you to use a video feed for detection in Frigate and Home Assistant live view at the same time without having to make two separate connections to the camera](#reduce-connections-to-camera). The video feed is copied from the original video feed directly to avoid re-encoding. This feed does not include any annotation by Frigate. -Frigate uses [go2rtc](https://github.com/AlexxIT/go2rtc/tree/v1.9.10) to provide its restream and MSE/WebRTC capabilities. The go2rtc config is hosted at the `go2rtc` in the config, see [go2rtc docs](https://github.com/AlexxIT/go2rtc/tree/v1.9.10#configuration) for more advanced configurations and features. +Frigate uses [go2rtc](https://github.com/AlexxIT/go2rtc/tree/v1.9.13) to provide its restream and MSE/WebRTC capabilities. The go2rtc config is hosted at the `go2rtc` in the config, see [go2rtc docs](https://github.com/AlexxIT/go2rtc/tree/v1.9.13#configuration) for more advanced configurations and features. :::note @@ -206,7 +206,7 @@ Enabling arbitrary exec sources allows execution of arbitrary commands through g ## Advanced Restream Configurations -The [exec](https://github.com/AlexxIT/go2rtc/tree/v1.9.10#source-exec) source in go2rtc can be used for custom ffmpeg commands. An example is below: +The [exec](https://github.com/AlexxIT/go2rtc/tree/v1.9.13#source-exec) source in go2rtc can be used for custom ffmpeg commands. An example is below: :::warning diff --git a/docs/docs/configuration/zones.md b/docs/docs/configuration/zones.md index c0a11d4f6..bf846c3a7 100644 --- a/docs/docs/configuration/zones.md +++ b/docs/docs/configuration/zones.md @@ -10,6 +10,10 @@ For example, the cat in this image is currently in Zone 1, but **not** Zone 2. Zones cannot have the same name as a camera. If desired, a single zone can include multiple cameras if you have multiple cameras covering the same area by configuring zones with the same name for each camera. +## Enabling/Disabling Zones + +Zones can be toggled on or off without removing them from the configuration. Disabled zones are completely ignored at runtime - objects will not be tracked for zone presence, and zones will not appear in the debug view. This is useful for temporarily disabling a zone during certain seasons or times of day without modifying the configuration. + During testing, enable the Zones option for the Debug view of your camera (Settings --> Debug) so you can adjust as needed. The zone line will increase in thickness when any object enters the zone. To create a zone, follow [the steps for a "Motion mask"](masks.md), but use the section of the web UI for creating a zone instead. @@ -86,7 +90,6 @@ cameras: Only car objects can trigger the `front_yard_street` zone and only person can trigger the `entire_yard`. Objects will be tracked for any `person` that enter anywhere in the yard, and for cars only if they enter the street. - ### Zone Loitering Sometimes objects are expected to be passing through a zone, but an object loitering in an area is unexpected. Zones can be configured to have a minimum loitering time after which the object will be considered in the zone. @@ -94,6 +97,7 @@ Sometimes objects are expected to be passing through a zone, but an object loite :::note When using loitering zones, a review item will behave in the following way: + - When a person is in a loitering zone, the review item will remain active until the person leaves the loitering zone, regardless of if they are stationary. - When any other object is in a loitering zone, the review item will remain active until the loitering time is met. Then if the object is stationary the review item will end. diff --git a/docs/docs/guides/configuring_go2rtc.md b/docs/docs/guides/configuring_go2rtc.md index ca50a90d3..8b01de3e7 100644 --- a/docs/docs/guides/configuring_go2rtc.md +++ b/docs/docs/guides/configuring_go2rtc.md @@ -11,7 +11,7 @@ Use of the bundled go2rtc is optional. You can still configure FFmpeg to connect ## Setup a go2rtc stream -First, you will want to configure go2rtc to connect to your camera stream by adding the stream you want to use for live view in your Frigate config file. Avoid changing any other parts of your config at this step. Note that go2rtc supports [many different stream types](https://github.com/AlexxIT/go2rtc/tree/v1.9.10#module-streams), not just rtsp. +First, you will want to configure go2rtc to connect to your camera stream by adding the stream you want to use for live view in your Frigate config file. Avoid changing any other parts of your config at this step. Note that go2rtc supports [many different stream types](https://github.com/AlexxIT/go2rtc/tree/v1.9.13#module-streams), not just rtsp. :::tip @@ -47,8 +47,8 @@ After adding this to the config, restart Frigate and try to watch the live strea - Check Video Codec: - If the camera stream works in go2rtc but not in your browser, the video codec might be unsupported. - - If using H265, switch to H264. Refer to [video codec compatibility](https://github.com/AlexxIT/go2rtc/tree/v1.9.10#codecs-madness) in go2rtc documentation. - - If unable to switch from H265 to H264, or if the stream format is different (e.g., MJPEG), re-encode the video using [FFmpeg parameters](https://github.com/AlexxIT/go2rtc/tree/v1.9.10#source-ffmpeg). It supports rotating and resizing video feeds and hardware acceleration. Keep in mind that transcoding video from one format to another is a resource intensive task and you may be better off using the built-in jsmpeg view. + - If using H265, switch to H264. Refer to [video codec compatibility](https://github.com/AlexxIT/go2rtc/tree/v1.9.13#codecs-madness) in go2rtc documentation. + - If unable to switch from H265 to H264, or if the stream format is different (e.g., MJPEG), re-encode the video using [FFmpeg parameters](https://github.com/AlexxIT/go2rtc/tree/v1.9.13#source-ffmpeg). It supports rotating and resizing video feeds and hardware acceleration. Keep in mind that transcoding video from one format to another is a resource intensive task and you may be better off using the built-in jsmpeg view. ```yaml go2rtc: streams: diff --git a/docs/docs/guides/getting_started.md b/docs/docs/guides/getting_started.md index f0f2f0f98..92f485333 100644 --- a/docs/docs/guides/getting_started.md +++ b/docs/docs/guides/getting_started.md @@ -240,7 +240,10 @@ cameras: - detect motion: mask: - - 0,461,3,0,1919,0,1919,843,1699,492,1344,458,1346,336,973,317,869,375,866,432 + motion_area: + friendly_name: "Motion mask" + enabled: true + coordinates: "0,461,3,0,1919,0,1919,843,1699,492,1344,458,1346,336,973,317,869,375,866,432" ``` ### Step 6: Enable recordings diff --git a/docs/docs/integrations/mqtt.md b/docs/docs/integrations/mqtt.md index 66775a473..ca3589df1 100644 --- a/docs/docs/integrations/mqtt.md +++ b/docs/docs/integrations/mqtt.md @@ -429,6 +429,30 @@ Topic to adjust motion contour area for a camera. Expected value is an integer. Topic with current motion contour area for a camera. Published value is an integer. +### `frigate//motion_mask//set` + +Topic to turn a specific motion mask for a camera on and off. Expected values are `ON` and `OFF`. + +### `frigate//motion_mask//state` + +Topic with current state of a specific motion mask for a camera. Published values are `ON` and `OFF`. + +### `frigate//object_mask//set` + +Topic to turn a specific object mask for a camera on and off. Expected values are `ON` and `OFF`. + +### `frigate//object_mask//state` + +Topic with current state of a specific object mask for a camera. Published values are `ON` and `OFF`. + +### `frigate//zone//set` + +Topic to turn a specific zone for a camera on and off. Expected values are `ON` and `OFF`. + +### `frigate//zone//state` + +Topic with current state of a specific zone for a camera. Published values are `ON` and `OFF`. + ### `frigate//review_status` Topic with current activity status of the camera. Possible values are `NONE`, `DETECTION`, or `ALERT`. diff --git a/docs/sidebars.ts b/docs/sidebars.ts index ea0d2f5c8..a4c1bca9d 100644 --- a/docs/sidebars.ts +++ b/docs/sidebars.ts @@ -28,7 +28,7 @@ const sidebars: SidebarsConfig = { { type: "link", label: "Go2RTC Configuration Reference", - href: "https://github.com/AlexxIT/go2rtc/tree/v1.9.10#configuration", + href: "https://github.com/AlexxIT/go2rtc/tree/v1.9.13#configuration", } as PropSidebarItemLink, ], Detectors: [ diff --git a/docs/static/frigate-api.yaml b/docs/static/frigate-api.yaml index f1a00fe61..2063514ac 100644 --- a/docs/static/frigate-api.yaml +++ b/docs/static/frigate-api.yaml @@ -331,6 +331,59 @@ paths: application/json: schema: $ref: "#/components/schemas/HTTPValidationError" + /media/sync: + post: + tags: + - App + summary: Start media sync job + description: |- + Start an asynchronous media sync job to find and (optionally) remove orphaned media files. + Returns 202 with job details when queued, or 409 if a job is already running. + operationId: sync_media_media_sync_post + requestBody: + required: true + content: + application/json: + responses: + "202": + description: Accepted - Job queued + "409": + description: Conflict - Job already running + "422": + description: Validation Error + + /media/sync/current: + get: + tags: + - App + summary: Get current media sync job + description: |- + Retrieve the current running media sync job, if any. Returns the job details or null when no job is active. + operationId: get_media_sync_current_media_sync_current_get + responses: + "200": + description: Successful Response + "422": + description: Validation Error + + /media/sync/status/{job_id}: + get: + tags: + - App + summary: Get media sync job status + description: |- + Get status and results for the specified media sync job id. Returns 200 with job details including results, or 404 if the job is not found. + operationId: get_media_sync_status_media_sync_status__job_id__get + parameters: + - name: job_id + in: path + responses: + "200": + description: Successful Response + "404": + description: Not Found - Job not found + "422": + description: Validation Error /faces/train/{name}/classify: post: tags: @@ -3147,6 +3200,7 @@ paths: duration: 30 include_recording: true draw: {} + pre_capture: null responses: "200": description: Successful Response @@ -4949,6 +5003,12 @@ components: - type: "null" title: Draw default: {} + pre_capture: + anyOf: + - type: integer + - type: "null" + title: Pre Capture Seconds + default: null type: object title: EventsCreateBody EventsDeleteBody: diff --git a/frigate/api/app.py b/frigate/api/app.py index 440adfce4..a28f174de 100644 --- a/frigate/api/app.py +++ b/frigate/api/app.py @@ -19,6 +19,7 @@ from fastapi import APIRouter, Body, Path, Request, Response from fastapi.encoders import jsonable_encoder from fastapi.params import Depends from fastapi.responses import JSONResponse, PlainTextResponse, StreamingResponse +from filelock import FileLock, Timeout from markupsafe import escape from peewee import SQL, fn, operator from pydantic import ValidationError @@ -30,22 +31,32 @@ from frigate.api.auth import ( require_role, ) from frigate.api.defs.query.app_query_parameters import AppTimelineHourlyQueryParameters -from frigate.api.defs.request.app_body import AppConfigSetBody +from frigate.api.defs.request.app_body import AppConfigSetBody, MediaSyncBody from frigate.api.defs.tags import Tags from frigate.config import FrigateConfig from frigate.config.camera.updater import ( CameraConfigUpdateEnum, CameraConfigUpdateTopic, ) +from frigate.ffmpeg_presets import FFMPEG_HWACCEL_VAAPI, _gpu_selector +from frigate.jobs.media_sync import ( + get_current_media_sync_job, + get_media_sync_job_by_id, + start_media_sync_job, +) from frigate.models import Event, Timeline from frigate.stats.prometheus import get_metrics, update_metrics +from frigate.types import JobStatusTypesEnum from frigate.util.builtin import ( clean_camera_user_pass, + deep_merge, flatten_config_data, + load_labels, process_config_query_string, update_yaml_file_bulk, ) -from frigate.util.config import find_config_file +from frigate.util.config import apply_section_update, find_config_file +from frigate.util.schema import get_config_schema from frigate.util.services import ( get_nvidia_driver_info, process_logs, @@ -70,9 +81,7 @@ def is_healthy(): @router.get("/config/schema.json", dependencies=[Depends(allow_public())]) def config_schema(request: Request): - return Response( - content=request.app.frigate_config.schema_json(), media_type="application/json" - ) + return JSONResponse(content=get_config_schema(FrigateConfig)) @router.get( @@ -118,6 +127,10 @@ def config(request: Request): config: dict[str, dict[str, Any]] = config_obj.model_dump( mode="json", warnings="none", exclude_none=True ) + config["detectors"] = { + name: detector.model_dump(mode="json", warnings="none", exclude_none=True) + for name, detector in config_obj.detectors.items() + } # remove the mqtt password config["mqtt"].pop("password", None) @@ -188,6 +201,54 @@ def config(request: Request): return JSONResponse(content=config) +@router.get("/ffmpeg/presets", dependencies=[Depends(allow_any_authenticated())]) +def ffmpeg_presets(): + """Return available ffmpeg preset keys for config UI usage.""" + + # Whitelist based on documented presets in ffmpeg_presets.md + hwaccel_presets = [ + "preset-rpi-64-h264", + "preset-rpi-64-h265", + "preset-vaapi", + "preset-intel-qsv-h264", + "preset-intel-qsv-h265", + "preset-nvidia", + "preset-jetson-h264", + "preset-jetson-h265", + "preset-rkmpp", + ] + input_presets = [ + "preset-http-jpeg-generic", + "preset-http-mjpeg-generic", + "preset-http-reolink", + "preset-rtmp-generic", + "preset-rtsp-generic", + "preset-rtsp-restream", + "preset-rtsp-restream-low-latency", + "preset-rtsp-udp", + "preset-rtsp-blue-iris", + ] + record_output_presets = [ + "preset-record-generic", + "preset-record-generic-audio-copy", + "preset-record-generic-audio-aac", + "preset-record-mjpeg", + "preset-record-jpeg", + "preset-record-ubiquiti", + ] + + return JSONResponse( + content={ + "hwaccel_args": hwaccel_presets, + "input_args": input_presets, + "output_args": { + "record": record_output_presets, + "detect": [], + }, + } + ) + + @router.get("/config/raw_paths", dependencies=[Depends(require_role(["admin"]))]) def config_raw_paths(request: Request): """Admin-only endpoint that returns camera paths and go2rtc streams without credential masking.""" @@ -362,108 +423,230 @@ def config_save(save_option: str, body: Any = Body(media_type="text/plain")): ) -@router.put("/config/set", dependencies=[Depends(require_role(["admin"]))]) -def config_set(request: Request, body: AppConfigSetBody): - config_file = find_config_file() - - with open(config_file, "r") as f: - old_raw_config = f.read() +def _config_set_in_memory(request: Request, body: AppConfigSetBody) -> JSONResponse: + """Apply config changes in-memory only, without writing to YAML. + Used for temporary config changes like debug replay camera tuning. + Updates the in-memory Pydantic config and publishes ZMQ updates, + bypassing YAML parsing entirely. + """ try: updates = {} - - # process query string parameters (takes precedence over body.config_data) - parsed_url = urllib.parse.urlparse(str(request.url)) - query_string = urllib.parse.parse_qs(parsed_url.query, keep_blank_values=True) - - # Filter out empty keys but keep blank values for non-empty keys - query_string = {k: v for k, v in query_string.items() if k} - - if query_string: - updates = process_config_query_string(query_string) - elif body.config_data: + if body.config_data: updates = flatten_config_data(body.config_data) + updates = {k: ("" if v is None else v) for k, v in updates.items()} if not updates: return JSONResponse( - content=( - {"success": False, "message": "No configuration data provided"} - ), + content={"success": False, "message": "No configuration data provided"}, status_code=400, ) - # apply all updates in a single operation - update_yaml_file_bulk(config_file, updates) + config: FrigateConfig = request.app.frigate_config - # validate the updated config - with open(config_file, "r") as f: - new_raw_config = f.read() + # Group flat key paths into nested per-camera, per-section dicts + grouped: dict[str, dict[str, dict]] = {} + for key_path, value in updates.items(): + parts = key_path.split(".") + if len(parts) < 3 or parts[0] != "cameras": + continue - try: - config = FrigateConfig.parse(new_raw_config) - except Exception: - with open(config_file, "w") as f: - f.write(old_raw_config) - f.close() - logger.error(f"\nConfig Error:\n\n{str(traceback.format_exc())}") - return JSONResponse( - content=( - { + cam, section = parts[1], parts[2] + grouped.setdefault(cam, {}).setdefault(section, {}) + + # Build nested dict from remaining path (e.g. "filters.person.threshold") + target = grouped[cam][section] + for part in parts[3:-1]: + target = target.setdefault(part, {}) + if len(parts) > 3: + target[parts[-1]] = value + elif isinstance(value, dict): + grouped[cam][section] = deep_merge( + grouped[cam][section], value, override=True + ) + else: + grouped[cam][section] = value + + # Apply each section update + for cam_name, sections in grouped.items(): + camera_config = config.cameras.get(cam_name) + if not camera_config: + return JSONResponse( + content={ "success": False, - "message": "Error parsing config. Check logs for error message.", - } - ), - status_code=400, - ) - except Exception as e: - logging.error(f"Error updating config: {e}") - return JSONResponse( - content=({"success": False, "message": "Error updating config"}), - status_code=500, - ) + "message": f"Camera '{cam_name}' not found", + }, + status_code=400, + ) - if body.requires_restart == 0 or body.update_topic: - old_config: FrigateConfig = request.app.frigate_config - request.app.frigate_config = config + for section_name, update in sections.items(): + err = apply_section_update(camera_config, section_name, update) + if err is not None: + return JSONResponse( + content={"success": False, "message": err}, + status_code=400, + ) - if body.update_topic: - if body.update_topic.startswith("config/cameras/"): - _, _, camera, field = body.update_topic.split("/") - - if field == "add": - settings = config.cameras[camera] - elif field == "remove": - settings = old_config.cameras[camera] - else: - settings = config.get_nested_object(body.update_topic) + # Publish ZMQ updates so processing threads pick up changes + if body.update_topic and body.update_topic.startswith("config/cameras/"): + _, _, camera, field = body.update_topic.split("/") + settings = getattr(config.cameras.get(camera, None), field, None) + if settings is not None: request.app.config_publisher.publish_update( CameraConfigUpdateTopic(CameraConfigUpdateEnum[field], camera), settings, ) - else: - # Generic handling for global config updates - settings = config.get_nested_object(body.update_topic) - # Publish None for removal, actual config for add/update - request.app.config_publisher.publisher.publish( - body.update_topic, settings + return JSONResponse( + content={"success": True, "message": "Config applied in-memory"}, + status_code=200, + ) + except Exception as e: + logger.error(f"Error applying config in-memory: {e}") + return JSONResponse( + content={"success": False, "message": "Error applying config"}, + status_code=500, + ) + + +@router.put("/config/set", dependencies=[Depends(require_role(["admin"]))]) +def config_set(request: Request, body: AppConfigSetBody): + config_file = find_config_file() + + if body.skip_save: + return _config_set_in_memory(request, body) + + lock = FileLock(f"{config_file}.lock", timeout=5) + + try: + with lock: + with open(config_file, "r") as f: + old_raw_config = f.read() + + try: + updates = {} + + # process query string parameters (takes precedence over body.config_data) + parsed_url = urllib.parse.urlparse(str(request.url)) + query_string = urllib.parse.parse_qs( + parsed_url.query, keep_blank_values=True ) - return JSONResponse( - content=( - { - "success": True, - "message": "Config successfully updated, restart to apply", - } - ), - status_code=200, - ) + # Filter out empty keys but keep blank values for non-empty keys + query_string = {k: v for k, v in query_string.items() if k} + + if query_string: + updates = process_config_query_string(query_string) + elif body.config_data: + updates = flatten_config_data(body.config_data) + # Convert None values to empty strings for deletion (e.g., when deleting masks) + updates = {k: ("" if v is None else v) for k, v in updates.items()} + + if not updates: + return JSONResponse( + content=( + { + "success": False, + "message": "No configuration data provided", + } + ), + status_code=400, + ) + + # apply all updates in a single operation + update_yaml_file_bulk(config_file, updates) + + # validate the updated config + with open(config_file, "r") as f: + new_raw_config = f.read() + + try: + config = FrigateConfig.parse(new_raw_config) + except Exception: + with open(config_file, "w") as f: + f.write(old_raw_config) + f.close() + logger.error(f"\nConfig Error:\n\n{str(traceback.format_exc())}") + return JSONResponse( + content=( + { + "success": False, + "message": "Error parsing config. Check logs for error message.", + } + ), + status_code=400, + ) + except Exception as e: + logging.error(f"Error updating config: {e}") + return JSONResponse( + content=({"success": False, "message": "Error updating config"}), + status_code=500, + ) + + if body.requires_restart == 0 or body.update_topic: + old_config: FrigateConfig = request.app.frigate_config + request.app.frigate_config = config + request.app.genai_manager.update_config(config) + + if body.update_topic: + if body.update_topic.startswith("config/cameras/"): + _, _, camera, field = body.update_topic.split("/") + + if field == "add": + settings = config.cameras[camera] + elif field == "remove": + settings = old_config.cameras[camera] + else: + settings = config.get_nested_object(body.update_topic) + + request.app.config_publisher.publish_update( + CameraConfigUpdateTopic( + CameraConfigUpdateEnum[field], camera + ), + settings, + ) + else: + # Generic handling for global config updates + settings = config.get_nested_object(body.update_topic) + + # Publish None for removal, actual config for add/update + request.app.config_publisher.publisher.publish( + body.update_topic, settings + ) + + return JSONResponse( + content=( + { + "success": True, + "message": "Config successfully updated, restart to apply", + } + ), + status_code=200, + ) + except Timeout: + return JSONResponse( + content=( + { + "success": False, + "message": "Another process is currently updating the config. Please try again in a few seconds.", + } + ), + status_code=503, + ) @router.get("/vainfo", dependencies=[Depends(allow_any_authenticated())]) def vainfo(): - vainfo = vainfo_hwaccel() + # Use LibvaGpuSelector to pick an appropriate libva device (if available) + selected_gpu = "" + try: + selected_gpu = _gpu_selector.get_gpu_arg(FFMPEG_HWACCEL_VAAPI, 0) or "" + except Exception: + selected_gpu = "" + + # If selected_gpu is empty, pass None to vainfo_hwaccel to run plain `vainfo`. + vainfo = vainfo_hwaccel(device_name=selected_gpu or None) return JSONResponse( content={ "return_code": vainfo.returncode, @@ -598,6 +781,98 @@ def restart(): ) +@router.post( + "/media/sync", + dependencies=[Depends(require_role(["admin"]))], + summary="Start media sync job", + description="""Start an asynchronous media sync job to find and (optionally) remove orphaned media files. + Returns 202 with job details when queued, or 409 if a job is already running.""", +) +def sync_media(body: MediaSyncBody = Body(...)): + """Start async media sync job - remove orphaned files. + + Syncs specified media types: event snapshots, event thumbnails, review thumbnails, + previews, exports, and/or recordings. Job runs in background; use /media/sync/current + or /media/sync/status/{job_id} to check status. + + Args: + body: MediaSyncBody with dry_run flag and media_types list. + media_types can include: 'all', 'event_snapshots', 'event_thumbnails', + 'review_thumbnails', 'previews', 'exports', 'recordings' + + Returns: + 202 Accepted with job_id, or 409 Conflict if job already running. + """ + job_id = start_media_sync_job( + dry_run=body.dry_run, media_types=body.media_types, force=body.force + ) + + if job_id is None: + # A job is already running + current = get_current_media_sync_job() + return JSONResponse( + content={ + "error": "A media sync job is already running", + "current_job_id": current.id if current else None, + }, + status_code=409, + ) + + return JSONResponse( + content={ + "job": { + "job_type": "media_sync", + "status": JobStatusTypesEnum.queued, + "id": job_id, + } + }, + status_code=202, + ) + + +@router.get( + "/media/sync/current", + dependencies=[Depends(require_role(["admin"]))], + summary="Get current media sync job", + description="""Retrieve the current running media sync job, if any. Returns the job details + or null when no job is active.""", +) +def get_media_sync_current(): + """Get the current running media sync job, if any.""" + job = get_current_media_sync_job() + + if job is None: + return JSONResponse(content={"job": None}, status_code=200) + + return JSONResponse( + content={"job": job.to_dict()}, + status_code=200, + ) + + +@router.get( + "/media/sync/status/{job_id}", + dependencies=[Depends(require_role(["admin"]))], + summary="Get media sync job status", + description="""Get status and results for the specified media sync job id. Returns 200 with + job details including results, or 404 if the job is not found.""", +) +def get_media_sync_status(job_id: str): + """Get the status of a specific media sync job.""" + job = get_media_sync_job_by_id(job_id) + + if job is None: + return JSONResponse( + content={"error": "Job not found"}, + status_code=404, + ) + + return JSONResponse( + content={"job": job.to_dict()}, + status_code=200, + ) + + @router.get("/labels", dependencies=[Depends(allow_any_authenticated())]) def get_labels(camera: str = ""): try: @@ -647,6 +922,12 @@ def get_sub_labels(split_joined: Optional[int] = None): return JSONResponse(content=sub_labels) +@router.get("/audio_labels", dependencies=[Depends(allow_any_authenticated())]) +def get_audio_labels(): + labels = load_labels("/audio-labelmap.txt", prefill=521) + return JSONResponse(content=labels) + + @router.get("/plus/models", dependencies=[Depends(allow_any_authenticated())]) def plusModels(request: Request, filterByCurrentModelDetector: bool = False): if not request.app.frigate_config.plus_api.is_active(): diff --git a/frigate/api/auth.py b/frigate/api/auth.py index e0a6ec924..39089b583 100644 --- a/frigate/api/auth.py +++ b/frigate/api/auth.py @@ -26,12 +26,18 @@ from frigate.api.defs.request.app_body import ( AppPutRoleBody, ) from frigate.api.defs.tags import Tags -from frigate.config import AuthConfig, ProxyConfig +from frigate.config import AuthConfig, NetworkingConfig, ProxyConfig from frigate.const import CONFIG_DIR, JWT_SECRET_ENV_VAR, PASSWORD_HASH_ALGORITHM from frigate.models import User logger = logging.getLogger(__name__) +# In-memory cache to track which clients we've logged for an anonymous access event. +# Keyed by a hashed value combining remote address + user-agent. The value is +# an expiration timestamp (float). +FIRST_LOAD_TTL_SECONDS = 60 * 60 * 24 * 7 # 7 days +_first_load_seen: dict[str, float] = {} + def require_admin_by_default(): """ @@ -41,7 +47,7 @@ def require_admin_by_default(): endpoints require admin access unless explicitly overridden with allow_public(), allow_any_authenticated(), or require_role(). - Port 5000 (internal) always has admin role set by the /auth endpoint, + Internal port always has admin role set by the /auth endpoint, so this check passes automatically for internal requests. Certain paths are exempted from the global admin check because they must @@ -130,7 +136,7 @@ def require_admin_by_default(): pass # For all other paths, require admin role - # Port 5000 (internal) requests have admin role set automatically + # Internal port requests have admin role set automatically role = request.headers.get("remote-role") if role == "admin": return @@ -143,6 +149,17 @@ def require_admin_by_default(): return admin_checker +def _is_authenticated(request: Request) -> bool: + """ + Helper to determine if a request is from an authenticated user. + + Returns True if the request has a valid authenticated user (not anonymous). + Internal port requests are considered anonymous despite having admin role. + """ + username = request.headers.get("remote-user") + return username is not None and username != "anonymous" + + def allow_public(): """ Override dependency to allow unauthenticated access to an endpoint. @@ -171,6 +188,7 @@ def allow_any_authenticated(): Rejects: - Requests with no remote-user header (did not pass through /auth endpoint) + - External port requests with anonymous user (auth disabled, no proxy auth) Example: @router.get("/authenticated-endpoint", dependencies=[Depends(allow_any_authenticated())]) @@ -179,8 +197,14 @@ def allow_any_authenticated(): async def auth_checker(request: Request): # Ensure a remote-user has been set by the /auth endpoint username = request.headers.get("remote-user") - if username is None: - raise HTTPException(status_code=401, detail="Authentication required") + + # Internal port requests have admin role and should be allowed + role = request.headers.get("remote-role") + + if role != "admin": + if username is None or not _is_authenticated(request): + raise HTTPException(status_code=401, detail="Authentication required") + return return auth_checker @@ -266,6 +290,15 @@ def get_remote_addr(request: Request): return remote_addr or "127.0.0.1" +def _cleanup_first_load_seen() -> None: + """Cleanup expired entries in the in-memory first-load cache.""" + now = time.time() + # Build list for removal to avoid mutating dict during iteration + expired = [k for k, exp in _first_load_seen.items() if exp <= now] + for k in expired: + del _first_load_seen[k] + + def get_jwt_secret() -> str: jwt_secret = None # check env var @@ -570,12 +603,18 @@ def resolve_role( def auth(request: Request): auth_config: AuthConfig = request.app.frigate_config.auth proxy_config: ProxyConfig = request.app.frigate_config.proxy + networking_config: NetworkingConfig = request.app.frigate_config.networking success_response = Response("", status_code=202) + # handle case where internal port is a string with ip:port + internal_port = networking_config.listen.internal + if type(internal_port) is str: + internal_port = int(internal_port.split(":")[-1]) + # dont require auth if the request is on the internal port # this header is set by Frigate's nginx proxy, so it cant be spoofed - if int(request.headers.get("x-server-port", default=0)) == 5000: + if int(request.headers.get("x-server-port", default=0)) == internal_port: success_response.headers["remote-user"] = "anonymous" success_response.headers["remote-role"] = "admin" return success_response @@ -720,10 +759,30 @@ def profile(request: Request): roles_dict = request.app.frigate_config.auth.roles allowed_cameras = User.get_allowed_cameras(role, roles_dict, all_camera_names) - return JSONResponse( + response = JSONResponse( content={"username": username, "role": role, "allowed_cameras": allowed_cameras} ) + if username == "anonymous": + try: + remote_addr = get_remote_addr(request) + except Exception: + remote_addr = ( + request.client.host if hasattr(request, "client") else "unknown" + ) + + ua = request.headers.get("user-agent", "") + key_material = f"{remote_addr}|{ua}" + cache_key = hashlib.sha256(key_material.encode()).hexdigest() + + _cleanup_first_load_seen() + now = time.time() + if cache_key not in _first_load_seen: + _first_load_seen[cache_key] = now + FIRST_LOAD_TTL_SECONDS + logger.info(f"Anonymous user access from {remote_addr} ua={ua[:200]}") + + return response + @router.get( "/logout", diff --git a/frigate/api/chat.py b/frigate/api/chat.py new file mode 100644 index 000000000..7957ab7af --- /dev/null +++ b/frigate/api/chat.py @@ -0,0 +1,821 @@ +"""Chat and LLM tool calling APIs.""" + +import base64 +import json +import logging +import time +from datetime import datetime +from typing import Any, Dict, Generator, List, Optional + +import cv2 +from fastapi import APIRouter, Body, Depends, Request +from fastapi.responses import JSONResponse, StreamingResponse +from pydantic import BaseModel + +from frigate.api.auth import ( + allow_any_authenticated, + get_allowed_cameras_for_filter, +) +from frigate.api.defs.query.events_query_parameters import EventsQueryParams +from frigate.api.defs.request.chat_body import ChatCompletionRequest +from frigate.api.defs.response.chat_response import ( + ChatCompletionResponse, + ChatMessageResponse, + ToolCall, +) +from frigate.api.defs.tags import Tags +from frigate.api.event import events +from frigate.genai.utils import build_assistant_message_for_conversation + +logger = logging.getLogger(__name__) + +router = APIRouter(tags=[Tags.chat]) + + +def _chunk_content(content: str, chunk_size: int = 80) -> Generator[str, None, None]: + """Yield content in word-aware chunks for streaming.""" + if not content: + return + words = content.split(" ") + current: List[str] = [] + current_len = 0 + for w in words: + current.append(w) + current_len += len(w) + 1 + if current_len >= chunk_size: + yield " ".join(current) + " " + current = [] + current_len = 0 + if current: + yield " ".join(current) + + +def _format_events_with_local_time( + events_list: List[Dict[str, Any]], +) -> List[Dict[str, Any]]: + """Add human-readable local start/end times to each event for the LLM.""" + result = [] + for evt in events_list: + if not isinstance(evt, dict): + result.append(evt) + continue + copy_evt = dict(evt) + try: + start_ts = evt.get("start_time") + end_ts = evt.get("end_time") + if start_ts is not None: + dt_start = datetime.fromtimestamp(start_ts) + copy_evt["start_time_local"] = dt_start.strftime("%Y-%m-%d %I:%M:%S %p") + if end_ts is not None: + dt_end = datetime.fromtimestamp(end_ts) + copy_evt["end_time_local"] = dt_end.strftime("%Y-%m-%d %I:%M:%S %p") + except (TypeError, ValueError, OSError): + pass + result.append(copy_evt) + return result + + +class ToolExecuteRequest(BaseModel): + """Request model for tool execution.""" + + tool_name: str + arguments: Dict[str, Any] + + +def get_tool_definitions() -> List[Dict[str, Any]]: + """ + Get OpenAI-compatible tool definitions for Frigate. + + Returns a list of tool definitions that can be used with OpenAI-compatible + function calling APIs. + """ + return [ + { + "type": "function", + "function": { + "name": "search_objects", + "description": ( + "Search for detected objects in Frigate by camera, object label, time range, " + "zones, and other filters. Use this to answer questions about when " + "objects were detected, what objects appeared, or to find specific object detections. " + "An 'object' in Frigate represents a tracked detection (e.g., a person, package, car). " + "When the user asks about a specific name (person, delivery company, animal, etc.), " + "filter by sub_label only and do not set label." + ), + "parameters": { + "type": "object", + "properties": { + "camera": { + "type": "string", + "description": "Camera name to filter by (optional).", + }, + "label": { + "type": "string", + "description": "Object label to filter by (e.g., 'person', 'package', 'car').", + }, + "sub_label": { + "type": "string", + "description": "Name of a person, delivery company, animal, etc. When filtering by a specific name, use only sub_label; do not set label.", + }, + "after": { + "type": "string", + "description": "Start time in ISO 8601 format (e.g., '2024-01-01T00:00:00Z').", + }, + "before": { + "type": "string", + "description": "End time in ISO 8601 format (e.g., '2024-01-01T23:59:59Z').", + }, + "zones": { + "type": "array", + "items": {"type": "string"}, + "description": "List of zone names to filter by.", + }, + "limit": { + "type": "integer", + "description": "Maximum number of objects to return (default: 25).", + "default": 25, + }, + }, + }, + "required": [], + }, + }, + { + "type": "function", + "function": { + "name": "get_live_context", + "description": ( + "Get the current detection information for a camera: objects being tracked, " + "zones, timestamps. Use this to understand what is visible in the live view. " + "Call this when the user has included a live image (via include_live_image) or " + "when answering questions about what is happening right now on a specific camera." + ), + "parameters": { + "type": "object", + "properties": { + "camera": { + "type": "string", + "description": "Camera name to get live context for.", + }, + }, + "required": ["camera"], + }, + }, + }, + ] + + +@router.get( + "/chat/tools", + dependencies=[Depends(allow_any_authenticated())], + summary="Get available tools", + description="Returns OpenAI-compatible tool definitions for function calling.", +) +def get_tools() -> JSONResponse: + """Get list of available tools for LLM function calling.""" + tools = get_tool_definitions() + return JSONResponse(content={"tools": tools}) + + +async def _execute_search_objects( + arguments: Dict[str, Any], + allowed_cameras: List[str], +) -> JSONResponse: + """ + Execute the search_objects tool. + + This searches for detected objects (events) in Frigate using the same + logic as the events API endpoint. + """ + # Parse after/before as server local time; convert to Unix timestamp + after = arguments.get("after") + before = arguments.get("before") + + def _parse_as_local_timestamp(s: str): + s = s.replace("Z", "").strip()[:19] + dt = datetime.strptime(s, "%Y-%m-%dT%H:%M:%S") + return time.mktime(dt.timetuple()) + + if after: + try: + after = _parse_as_local_timestamp(after) + except (ValueError, AttributeError, TypeError): + logger.warning(f"Invalid 'after' timestamp format: {after}") + after = None + + if before: + try: + before = _parse_as_local_timestamp(before) + except (ValueError, AttributeError, TypeError): + logger.warning(f"Invalid 'before' timestamp format: {before}") + before = None + + # Convert zones array to comma-separated string if provided + zones = arguments.get("zones") + if isinstance(zones, list): + zones = ",".join(zones) + elif zones is None: + zones = "all" + + # Build query parameters compatible with EventsQueryParams + query_params = EventsQueryParams( + cameras=arguments.get("camera", "all"), + labels=arguments.get("label", "all"), + sub_labels=arguments.get("sub_label", "all").lower(), + zones=zones, + zone=zones, + after=after, + before=before, + limit=arguments.get("limit", 25), + ) + + try: + # Call the events endpoint function directly + # The events function is synchronous and takes params and allowed_cameras + response = events(query_params, allowed_cameras) + + # The response is already a JSONResponse with event data + # Return it as-is for the LLM + return response + except Exception as e: + logger.error(f"Error executing search_objects: {e}", exc_info=True) + return JSONResponse( + content={ + "success": False, + "message": "Error searching objects", + }, + status_code=500, + ) + + +@router.post( + "/chat/execute", + dependencies=[Depends(allow_any_authenticated())], + summary="Execute a tool", + description="Execute a tool function call from an LLM.", +) +async def execute_tool( + body: ToolExecuteRequest = Body(...), + allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter), +) -> JSONResponse: + """ + Execute a tool function call. + + This endpoint receives tool calls from LLMs and executes the corresponding + Frigate operations, returning results in a format the LLM can understand. + """ + tool_name = body.tool_name + arguments = body.arguments + + logger.debug(f"Executing tool: {tool_name} with arguments: {arguments}") + + if tool_name == "search_objects": + return await _execute_search_objects(arguments, allowed_cameras) + + return JSONResponse( + content={ + "success": False, + "message": f"Unknown tool: {tool_name}", + "tool": tool_name, + }, + status_code=400, + ) + + +async def _execute_get_live_context( + request: Request, + camera: str, + allowed_cameras: List[str], +) -> Dict[str, Any]: + if camera not in allowed_cameras: + return { + "error": f"Camera '{camera}' not found or access denied", + } + + if camera not in request.app.frigate_config.cameras: + return { + "error": f"Camera '{camera}' not found", + } + + try: + frame_processor = request.app.detected_frames_processor + camera_state = frame_processor.camera_states.get(camera) + + if camera_state is None: + return { + "error": f"Camera '{camera}' state not available", + } + + tracked_objects_dict = {} + with camera_state.current_frame_lock: + tracked_objects = camera_state.tracked_objects.copy() + frame_time = camera_state.current_frame_time + + for obj_id, tracked_obj in tracked_objects.items(): + obj_dict = tracked_obj.to_dict() + if obj_dict.get("frame_time") == frame_time: + tracked_objects_dict[obj_id] = { + "label": obj_dict.get("label"), + "zones": obj_dict.get("current_zones", []), + "sub_label": obj_dict.get("sub_label"), + "stationary": obj_dict.get("stationary", False), + } + + return { + "camera": camera, + "timestamp": frame_time, + "detections": list(tracked_objects_dict.values()), + } + + except Exception as e: + logger.error(f"Error executing get_live_context: {e}", exc_info=True) + return { + "error": "Error getting live context", + } + + +async def _get_live_frame_image_url( + request: Request, + camera: str, + allowed_cameras: List[str], +) -> Optional[str]: + """ + Fetch the current live frame for a camera as a base64 data URL. + + Returns None if the frame cannot be retrieved. Used when include_live_image + is set to attach the image to the first user message. + """ + if ( + camera not in allowed_cameras + or camera not in request.app.frigate_config.cameras + ): + return None + try: + frame_processor = request.app.detected_frames_processor + if camera not in frame_processor.camera_states: + return None + frame = frame_processor.get_current_frame(camera, {}) + if frame is None: + return None + height, width = frame.shape[:2] + max_dimension = 1024 + if height > max_dimension or width > max_dimension: + scale = max_dimension / max(height, width) + frame = cv2.resize( + frame, + (int(width * scale), int(height * scale)), + interpolation=cv2.INTER_AREA, + ) + _, img_encoded = cv2.imencode(".jpg", frame, [cv2.IMWRITE_JPEG_QUALITY, 85]) + b64 = base64.b64encode(img_encoded.tobytes()).decode("utf-8") + return f"data:image/jpeg;base64,{b64}" + except Exception as e: + logger.debug("Failed to get live frame for %s: %s", camera, e) + return None + + +async def _execute_tool_internal( + tool_name: str, + arguments: Dict[str, Any], + request: Request, + allowed_cameras: List[str], +) -> Dict[str, Any]: + """ + Internal helper to execute a tool and return the result as a dict. + + This is used by the chat completion endpoint to execute tools. + """ + if tool_name == "search_objects": + response = await _execute_search_objects(arguments, allowed_cameras) + try: + if hasattr(response, "body"): + body_str = response.body.decode("utf-8") + return json.loads(body_str) + elif hasattr(response, "content"): + return response.content + else: + return {} + except (json.JSONDecodeError, AttributeError) as e: + logger.warning(f"Failed to extract tool result: {e}") + return {"error": "Failed to parse tool result"} + elif tool_name == "get_live_context": + camera = arguments.get("camera") + if not camera: + logger.error( + "Tool get_live_context failed: camera parameter is required. " + "Arguments: %s", + json.dumps(arguments), + ) + return {"error": "Camera parameter is required"} + return await _execute_get_live_context(request, camera, allowed_cameras) + else: + logger.error( + "Tool call failed: unknown tool %r. Expected one of: search_objects, get_live_context. " + "Arguments received: %s", + tool_name, + json.dumps(arguments), + ) + return {"error": f"Unknown tool: {tool_name}"} + + +async def _execute_pending_tools( + pending_tool_calls: List[Dict[str, Any]], + request: Request, + allowed_cameras: List[str], +) -> tuple[List[ToolCall], List[Dict[str, Any]]]: + """ + Execute a list of tool calls; return (ToolCall list for API response, tool result dicts for conversation). + """ + tool_calls_out: List[ToolCall] = [] + tool_results: List[Dict[str, Any]] = [] + for tool_call in pending_tool_calls: + tool_name = tool_call["name"] + tool_args = tool_call.get("arguments") or {} + tool_call_id = tool_call["id"] + logger.debug( + f"Executing tool: {tool_name} (id: {tool_call_id}) with arguments: {json.dumps(tool_args, indent=2)}" + ) + try: + tool_result = await _execute_tool_internal( + tool_name, tool_args, request, allowed_cameras + ) + if isinstance(tool_result, dict) and tool_result.get("error"): + logger.error( + "Tool call %s (id: %s) returned error: %s. Arguments: %s", + tool_name, + tool_call_id, + tool_result.get("error"), + json.dumps(tool_args), + ) + if tool_name == "search_objects" and isinstance(tool_result, list): + tool_result = _format_events_with_local_time(tool_result) + _keys = { + "id", + "camera", + "label", + "zones", + "start_time_local", + "end_time_local", + "sub_label", + "event_count", + } + tool_result = [ + {k: evt[k] for k in _keys if k in evt} + for evt in tool_result + if isinstance(evt, dict) + ] + result_content = ( + json.dumps(tool_result) + if isinstance(tool_result, (dict, list)) + else (tool_result if isinstance(tool_result, str) else str(tool_result)) + ) + tool_calls_out.append( + ToolCall(name=tool_name, arguments=tool_args, response=result_content) + ) + tool_results.append( + { + "role": "tool", + "tool_call_id": tool_call_id, + "content": result_content, + } + ) + except Exception as e: + logger.error( + "Error executing tool %s (id: %s): %s. Arguments: %s", + tool_name, + tool_call_id, + e, + json.dumps(tool_args), + exc_info=True, + ) + error_content = json.dumps({"error": f"Tool execution failed: {str(e)}"}) + tool_calls_out.append( + ToolCall(name=tool_name, arguments=tool_args, response=error_content) + ) + tool_results.append( + { + "role": "tool", + "tool_call_id": tool_call_id, + "content": error_content, + } + ) + return (tool_calls_out, tool_results) + + +@router.post( + "/chat/completion", + dependencies=[Depends(allow_any_authenticated())], + summary="Chat completion with tool calling", + description=( + "Send a chat message to the configured GenAI provider with tool calling support. " + "The LLM can call Frigate tools to answer questions about your cameras and events." + ), +) +async def chat_completion( + request: Request, + body: ChatCompletionRequest = Body(...), + allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter), +): + """ + Chat completion endpoint with tool calling support. + + This endpoint: + 1. Gets the configured GenAI client + 2. Gets tool definitions + 3. Sends messages + tools to LLM + 4. Handles tool_calls if present + 5. Executes tools and sends results back to LLM + 6. Repeats until final answer + 7. Returns response to user + """ + genai_client = request.app.genai_manager.tool_client + if not genai_client: + return JSONResponse( + content={ + "error": "GenAI is not configured. Please configure a GenAI provider in your Frigate config.", + }, + status_code=400, + ) + + tools = get_tool_definitions() + conversation = [] + + current_datetime = datetime.now() + current_date_str = current_datetime.strftime("%Y-%m-%d") + current_time_str = current_datetime.strftime("%I:%M:%S %p") + + cameras_info = [] + config = request.app.frigate_config + for camera_id in allowed_cameras: + if camera_id not in config.cameras: + continue + camera_config = config.cameras[camera_id] + friendly_name = ( + camera_config.friendly_name + if camera_config.friendly_name + else camera_id.replace("_", " ").title() + ) + cameras_info.append(f" - {friendly_name} (ID: {camera_id})") + + cameras_section = "" + if cameras_info: + cameras_section = ( + "\n\nAvailable cameras:\n" + + "\n".join(cameras_info) + + "\n\nWhen users refer to cameras by their friendly name (e.g., 'Back Deck Camera'), use the corresponding camera ID (e.g., 'back_deck_cam') in tool calls." + ) + + live_image_note = "" + if body.include_live_image: + live_image_note = ( + f"\n\nThe first user message includes a live image from camera " + f"'{body.include_live_image}'. Use get_live_context for that camera to get " + "current detection details (objects, zones) to aid in understanding the image." + ) + + system_prompt = f"""You are a helpful assistant for Frigate, a security camera NVR system. You help users answer questions about their cameras, detected objects, and events. + +Current server local date and time: {current_date_str} at {current_time_str} + +Do not start your response with phrases like "I will check...", "Let me see...", or "Let me look...". Answer directly. + +Always present times to the user in the server's local timezone. When tool results include start_time_local and end_time_local, use those exact strings when listing or describing detection times—do not convert or invent timestamps. Do not use UTC or ISO format with Z for the user-facing answer unless the tool result only provides Unix timestamps without local time fields. +When users ask about "today", "yesterday", "this week", etc., use the current date above as reference. +When searching for objects or events, use ISO 8601 format for dates (e.g., {current_date_str}T00:00:00Z for the start of today). +Always be accurate with time calculations based on the current date provided.{cameras_section}{live_image_note}""" + + conversation.append( + { + "role": "system", + "content": system_prompt, + } + ) + + first_user_message_seen = False + for msg in body.messages: + msg_dict = { + "role": msg.role, + "content": msg.content, + } + if msg.tool_call_id: + msg_dict["tool_call_id"] = msg.tool_call_id + if msg.name: + msg_dict["name"] = msg.name + + if ( + msg.role == "user" + and not first_user_message_seen + and body.include_live_image + ): + first_user_message_seen = True + image_url = await _get_live_frame_image_url( + request, body.include_live_image, allowed_cameras + ) + if image_url: + msg_dict["content"] = [ + {"type": "text", "text": msg.content}, + {"type": "image_url", "image_url": {"url": image_url}}, + ] + + conversation.append(msg_dict) + + tool_iterations = 0 + tool_calls: List[ToolCall] = [] + max_iterations = body.max_tool_iterations + + logger.debug( + f"Starting chat completion with {len(conversation)} message(s), " + f"{len(tools)} tool(s) available, max_iterations={max_iterations}" + ) + + # True LLM streaming when client supports it and stream requested + if body.stream and hasattr(genai_client, "chat_with_tools_stream"): + stream_tool_calls: List[ToolCall] = [] + stream_iterations = 0 + + async def stream_body_llm(): + nonlocal conversation, stream_tool_calls, stream_iterations + while stream_iterations < max_iterations: + logger.debug( + f"Streaming LLM (iteration {stream_iterations + 1}/{max_iterations}) " + f"with {len(conversation)} message(s)" + ) + async for event in genai_client.chat_with_tools_stream( + messages=conversation, + tools=tools if tools else None, + tool_choice="auto", + ): + kind, value = event + if kind == "content_delta": + yield ( + json.dumps({"type": "content", "delta": value}).encode( + "utf-8" + ) + + b"\n" + ) + elif kind == "message": + msg = value + if msg.get("finish_reason") == "error": + yield ( + json.dumps( + { + "type": "error", + "error": "An error occurred while processing your request.", + } + ).encode("utf-8") + + b"\n" + ) + return + pending = msg.get("tool_calls") + if pending: + stream_iterations += 1 + conversation.append( + build_assistant_message_for_conversation( + msg.get("content"), pending + ) + ) + executed_calls, tool_results = await _execute_pending_tools( + pending, request, allowed_cameras + ) + stream_tool_calls.extend(executed_calls) + conversation.extend(tool_results) + yield ( + json.dumps( + { + "type": "tool_calls", + "tool_calls": [ + tc.model_dump() for tc in stream_tool_calls + ], + } + ).encode("utf-8") + + b"\n" + ) + break + else: + yield (json.dumps({"type": "done"}).encode("utf-8") + b"\n") + return + else: + yield json.dumps({"type": "done"}).encode("utf-8") + b"\n" + + return StreamingResponse( + stream_body_llm(), + media_type="application/x-ndjson", + headers={"X-Accel-Buffering": "no"}, + ) + + try: + while tool_iterations < max_iterations: + logger.debug( + f"Calling LLM (iteration {tool_iterations + 1}/{max_iterations}) " + f"with {len(conversation)} message(s) in conversation" + ) + response = genai_client.chat_with_tools( + messages=conversation, + tools=tools if tools else None, + tool_choice="auto", + ) + + if response.get("finish_reason") == "error": + logger.error("GenAI client returned an error") + return JSONResponse( + content={ + "error": "An error occurred while processing your request.", + }, + status_code=500, + ) + + conversation.append( + build_assistant_message_for_conversation( + response.get("content"), response.get("tool_calls") + ) + ) + + pending_tool_calls = response.get("tool_calls") + if not pending_tool_calls: + logger.debug( + f"Chat completion finished with final answer (iterations: {tool_iterations})" + ) + final_content = response.get("content") or "" + + if body.stream: + + async def stream_body() -> Any: + if tool_calls: + yield ( + json.dumps( + { + "type": "tool_calls", + "tool_calls": [ + tc.model_dump() for tc in tool_calls + ], + } + ).encode("utf-8") + + b"\n" + ) + # Stream content in word-sized chunks for smooth UX + for part in _chunk_content(final_content): + yield ( + json.dumps({"type": "content", "delta": part}).encode( + "utf-8" + ) + + b"\n" + ) + yield json.dumps({"type": "done"}).encode("utf-8") + b"\n" + + return StreamingResponse( + stream_body(), + media_type="application/x-ndjson", + ) + + return JSONResponse( + content=ChatCompletionResponse( + message=ChatMessageResponse( + role="assistant", + content=final_content, + tool_calls=None, + ), + finish_reason=response.get("finish_reason", "stop"), + tool_iterations=tool_iterations, + tool_calls=tool_calls, + ).model_dump(), + ) + + tool_iterations += 1 + logger.debug( + f"Tool calls detected (iteration {tool_iterations}/{max_iterations}): " + f"{len(pending_tool_calls)} tool(s) to execute" + ) + executed_calls, tool_results = await _execute_pending_tools( + pending_tool_calls, request, allowed_cameras + ) + tool_calls.extend(executed_calls) + conversation.extend(tool_results) + logger.debug( + f"Added {len(tool_results)} tool result(s) to conversation. " + f"Continuing with next LLM call..." + ) + + logger.warning( + f"Max tool iterations ({max_iterations}) reached. Returning partial response." + ) + return JSONResponse( + content=ChatCompletionResponse( + message=ChatMessageResponse( + role="assistant", + content="I reached the maximum number of tool call iterations. Please try rephrasing your question.", + tool_calls=None, + ), + finish_reason="length", + tool_iterations=tool_iterations, + tool_calls=tool_calls, + ).model_dump(), + ) + + except Exception as e: + logger.error(f"Error in chat completion: {e}", exc_info=True) + return JSONResponse( + content={ + "error": "An error occurred while processing your request.", + }, + status_code=500, + ) diff --git a/frigate/api/debug_replay.py b/frigate/api/debug_replay.py new file mode 100644 index 000000000..027d4e50c --- /dev/null +++ b/frigate/api/debug_replay.py @@ -0,0 +1,176 @@ +"""Debug replay API endpoints.""" + +import asyncio +import logging +from datetime import datetime + +from fastapi import APIRouter, Depends, Request +from fastapi.responses import JSONResponse +from pydantic import BaseModel, Field + +from frigate.api.auth import require_role +from frigate.api.defs.tags import Tags + +logger = logging.getLogger(__name__) + +router = APIRouter(tags=[Tags.app]) + + +class DebugReplayStartBody(BaseModel): + """Request body for starting a debug replay session.""" + + camera: str = Field(title="Source camera name") + start_time: float = Field(title="Start timestamp") + end_time: float = Field(title="End timestamp") + + +class DebugReplayStartResponse(BaseModel): + """Response for starting a debug replay session.""" + + success: bool + replay_camera: str + + +class DebugReplayStatusResponse(BaseModel): + """Response for debug replay status.""" + + active: bool + replay_camera: str | None = None + source_camera: str | None = None + start_time: float | None = None + end_time: float | None = None + live_ready: bool = False + + +class DebugReplayStopResponse(BaseModel): + """Response for stopping a debug replay session.""" + + success: bool + + +@router.post( + "/debug_replay/start", + response_model=DebugReplayStartResponse, + dependencies=[Depends(require_role(["admin"]))], + summary="Start debug replay", + description="Start a debug replay session from camera recordings.", +) +async def start_debug_replay(request: Request, body: DebugReplayStartBody): + """Start a debug replay session.""" + replay_manager = request.app.replay_manager + + if replay_manager.active: + return JSONResponse( + content={ + "success": False, + "message": "A replay session is already active", + }, + status_code=409, + ) + + try: + replay_camera = await asyncio.to_thread( + replay_manager.start, + source_camera=body.camera, + start_ts=body.start_time, + end_ts=body.end_time, + frigate_config=request.app.frigate_config, + config_publisher=request.app.config_publisher, + ) + except ValueError: + logger.exception("Invalid parameters for debug replay start request") + return JSONResponse( + content={ + "success": False, + "message": "Invalid debug replay request parameters", + }, + status_code=400, + ) + except RuntimeError: + logger.exception("Error while starting debug replay session") + return JSONResponse( + content={ + "success": False, + "message": "An internal error occurred while starting debug replay", + }, + status_code=500, + ) + + return DebugReplayStartResponse( + success=True, + replay_camera=replay_camera, + ) + + +@router.get( + "/debug_replay/status", + response_model=DebugReplayStatusResponse, + dependencies=[Depends(require_role(["admin"]))], + summary="Get debug replay status", + description="Get the status of the current debug replay session.", +) +def get_debug_replay_status(request: Request): + """Get the current replay session status.""" + replay_manager = request.app.replay_manager + + live_ready = False + replay_camera = replay_manager.replay_camera_name + + if replay_manager.active and replay_camera: + frame_processor = request.app.detected_frames_processor + frame = frame_processor.get_current_frame(replay_camera) + + if frame is not None: + frame_time = frame_processor.get_current_frame_time(replay_camera) + camera_config = request.app.frigate_config.cameras.get(replay_camera) + retry_interval = 10 + + if camera_config is not None: + retry_interval = float(camera_config.ffmpeg.retry_interval or 10) + + live_ready = datetime.now().timestamp() <= frame_time + retry_interval + + return DebugReplayStatusResponse( + active=replay_manager.active, + replay_camera=replay_camera, + source_camera=replay_manager.source_camera, + start_time=replay_manager.start_ts, + end_time=replay_manager.end_ts, + live_ready=live_ready, + ) + + +@router.post( + "/debug_replay/stop", + response_model=DebugReplayStopResponse, + dependencies=[Depends(require_role(["admin"]))], + summary="Stop debug replay", + description="Stop the active debug replay session and clean up all artifacts.", +) +async def stop_debug_replay(request: Request): + """Stop the active replay session.""" + replay_manager = request.app.replay_manager + + if not replay_manager.active: + return JSONResponse( + content={"success": False, "message": "No active replay session"}, + status_code=400, + ) + + try: + await asyncio.to_thread( + replay_manager.stop, + frigate_config=request.app.frigate_config, + config_publisher=request.app.config_publisher, + ) + except (ValueError, RuntimeError, OSError) as e: + logger.error("Error stopping replay: %s", e) + return JSONResponse( + content={ + "success": False, + "message": "Failed to stop replay session due to an internal error.", + }, + status_code=500, + ) + + return DebugReplayStopResponse(success=True) diff --git a/frigate/api/defs/query/media_query_parameters.py b/frigate/api/defs/query/media_query_parameters.py index a16f0d53f..7438f2f2f 100644 --- a/frigate/api/defs/query/media_query_parameters.py +++ b/frigate/api/defs/query/media_query_parameters.py @@ -1,8 +1,7 @@ from enum import Enum -from typing import Optional, Union +from typing import Optional from pydantic import BaseModel -from pydantic.json_schema import SkipJsonSchema class Extension(str, Enum): @@ -48,15 +47,3 @@ class MediaMjpegFeedQueryParams(BaseModel): mask: Optional[int] = None motion: Optional[int] = None regions: Optional[int] = None - - -class MediaRecordingsSummaryQueryParams(BaseModel): - timezone: str = "utc" - cameras: Optional[str] = "all" - - -class MediaRecordingsAvailabilityQueryParams(BaseModel): - cameras: str = "all" - before: Union[float, SkipJsonSchema[None]] = None - after: Union[float, SkipJsonSchema[None]] = None - scale: int = 30 diff --git a/frigate/api/defs/query/recordings_query_parameters.py b/frigate/api/defs/query/recordings_query_parameters.py new file mode 100644 index 000000000..d4f1b0a7b --- /dev/null +++ b/frigate/api/defs/query/recordings_query_parameters.py @@ -0,0 +1,21 @@ +from typing import Optional, Union + +from pydantic import BaseModel +from pydantic.json_schema import SkipJsonSchema + + +class MediaRecordingsSummaryQueryParams(BaseModel): + timezone: str = "utc" + cameras: Optional[str] = "all" + + +class MediaRecordingsAvailabilityQueryParams(BaseModel): + cameras: str = "all" + before: Union[float, SkipJsonSchema[None]] = None + after: Union[float, SkipJsonSchema[None]] = None + scale: int = 30 + + +class RecordingsDeleteQueryParams(BaseModel): + keep: Optional[str] = None + cameras: Optional[str] = "all" diff --git a/frigate/api/defs/request/app_body.py b/frigate/api/defs/request/app_body.py index c4129d8da..3d2ab5961 100644 --- a/frigate/api/defs/request/app_body.py +++ b/frigate/api/defs/request/app_body.py @@ -1,12 +1,13 @@ -from typing import Any, Dict, Optional +from typing import Any, Dict, List, Optional -from pydantic import BaseModel +from pydantic import BaseModel, Field class AppConfigSetBody(BaseModel): requires_restart: int = 1 update_topic: str | None = None config_data: Optional[Dict[str, Any]] = None + skip_save: bool = False class AppPutPasswordBody(BaseModel): @@ -27,3 +28,16 @@ class AppPostLoginBody(BaseModel): class AppPutRoleBody(BaseModel): role: str + + +class MediaSyncBody(BaseModel): + dry_run: bool = Field( + default=True, description="If True, only report orphans without deleting them" + ) + media_types: List[str] = Field( + default=["all"], + description="Types of media to sync: 'all', 'event_snapshots', 'event_thumbnails', 'review_thumbnails', 'previews', 'exports', 'recordings'", + ) + force: bool = Field( + default=False, description="If True, bypass safety threshold checks" + ) diff --git a/frigate/api/defs/request/chat_body.py b/frigate/api/defs/request/chat_body.py new file mode 100644 index 000000000..3a67cd038 --- /dev/null +++ b/frigate/api/defs/request/chat_body.py @@ -0,0 +1,45 @@ +"""Chat API request models.""" + +from typing import Optional + +from pydantic import BaseModel, Field + + +class ChatMessage(BaseModel): + """A single message in a chat conversation.""" + + role: str = Field( + description="Message role: 'user', 'assistant', 'system', or 'tool'" + ) + content: str = Field(description="Message content") + tool_call_id: Optional[str] = Field( + default=None, description="For tool messages, the ID of the tool call" + ) + name: Optional[str] = Field( + default=None, description="For tool messages, the tool name" + ) + + +class ChatCompletionRequest(BaseModel): + """Request for chat completion with tool calling.""" + + messages: list[ChatMessage] = Field( + description="List of messages in the conversation" + ) + max_tool_iterations: int = Field( + default=5, + ge=1, + le=10, + description="Maximum number of tool call iterations (default: 5)", + ) + include_live_image: Optional[str] = Field( + default=None, + description=( + "If set, the current live frame from this camera is attached to the first " + "user message as multimodal content. Use with get_live_context for detection info." + ), + ) + stream: bool = Field( + default=False, + description="If true, stream the final assistant response in the body as newline-delimited JSON.", + ) diff --git a/frigate/api/defs/request/events_body.py b/frigate/api/defs/request/events_body.py index 50754e92a..d844c31ca 100644 --- a/frigate/api/defs/request/events_body.py +++ b/frigate/api/defs/request/events_body.py @@ -41,6 +41,7 @@ class EventsCreateBody(BaseModel): duration: Optional[int] = 30 include_recording: Optional[bool] = True draw: Optional[dict] = {} + pre_capture: Optional[int] = None class EventsEndBody(BaseModel): diff --git a/frigate/api/defs/request/export_case_body.py b/frigate/api/defs/request/export_case_body.py new file mode 100644 index 000000000..35cd8ff7f --- /dev/null +++ b/frigate/api/defs/request/export_case_body.py @@ -0,0 +1,35 @@ +from typing import Optional + +from pydantic import BaseModel, Field + + +class ExportCaseCreateBody(BaseModel): + """Request body for creating a new export case.""" + + name: str = Field(max_length=100, description="Friendly name of the export case") + description: Optional[str] = Field( + default=None, description="Optional description of the export case" + ) + + +class ExportCaseUpdateBody(BaseModel): + """Request body for updating an existing export case.""" + + name: Optional[str] = Field( + default=None, + max_length=100, + description="Updated friendly name of the export case", + ) + description: Optional[str] = Field( + default=None, description="Updated description of the export case" + ) + + +class ExportCaseAssignBody(BaseModel): + """Request body for assigning or unassigning an export to a case.""" + + export_case_id: Optional[str] = Field( + default=None, + max_length=30, + description="Case ID to assign to the export, or null to unassign", + ) diff --git a/frigate/api/defs/request/export_recordings_body.py b/frigate/api/defs/request/export_recordings_body.py index 19fc2f019..96ecccaa4 100644 --- a/frigate/api/defs/request/export_recordings_body.py +++ b/frigate/api/defs/request/export_recordings_body.py @@ -3,18 +3,47 @@ from typing import Optional, Union from pydantic import BaseModel, Field from pydantic.json_schema import SkipJsonSchema -from frigate.record.export import ( - PlaybackFactorEnum, - PlaybackSourceEnum, -) +from frigate.record.export import PlaybackSourceEnum class ExportRecordingsBody(BaseModel): - playback: PlaybackFactorEnum = Field( - default=PlaybackFactorEnum.realtime, title="Playback factor" - ) source: PlaybackSourceEnum = Field( default=PlaybackSourceEnum.recordings, title="Playback source" ) name: Optional[str] = Field(title="Friendly name", default=None, max_length=256) image_path: Union[str, SkipJsonSchema[None]] = None + export_case_id: Optional[str] = Field( + default=None, + title="Export case ID", + max_length=30, + description="ID of the export case to assign this export to", + ) + + +class ExportRecordingsCustomBody(BaseModel): + source: PlaybackSourceEnum = Field( + default=PlaybackSourceEnum.recordings, title="Playback source" + ) + name: str = Field(title="Friendly name", default=None, max_length=256) + image_path: Union[str, SkipJsonSchema[None]] = None + export_case_id: Optional[str] = Field( + default=None, + title="Export case ID", + max_length=30, + description="ID of the export case to assign this export to", + ) + ffmpeg_input_args: Optional[str] = Field( + default=None, + title="FFmpeg input arguments", + description="Custom FFmpeg input arguments. If not provided, defaults to timelapse input args.", + ) + ffmpeg_output_args: Optional[str] = Field( + default=None, + title="FFmpeg output arguments", + description="Custom FFmpeg output arguments. If not provided, defaults to timelapse output args.", + ) + cpu_fallback: bool = Field( + default=False, + title="CPU Fallback", + description="If true, retry export without hardware acceleration if the initial export fails.", + ) diff --git a/frigate/api/defs/response/chat_response.py b/frigate/api/defs/response/chat_response.py new file mode 100644 index 000000000..0bc864ba6 --- /dev/null +++ b/frigate/api/defs/response/chat_response.py @@ -0,0 +1,54 @@ +"""Chat API response models.""" + +from typing import Any, Optional + +from pydantic import BaseModel, Field + + +class ToolCallInvocation(BaseModel): + """A tool call requested by the LLM (before execution).""" + + id: str = Field(description="Unique identifier for this tool call") + name: str = Field(description="Tool name to call") + arguments: dict[str, Any] = Field(description="Arguments for the tool call") + + +class ChatMessageResponse(BaseModel): + """A message in the chat response.""" + + role: str = Field(description="Message role") + content: Optional[str] = Field( + default=None, description="Message content (None if tool calls present)" + ) + tool_calls: Optional[list[ToolCallInvocation]] = Field( + default=None, description="Tool calls if LLM wants to call tools" + ) + + +class ToolCall(BaseModel): + """A tool that was executed during the completion, with its response.""" + + name: str = Field(description="Tool name that was called") + arguments: dict[str, Any] = Field( + default_factory=dict, description="Arguments passed to the tool" + ) + response: str = Field( + default="", + description="The response or result returned from the tool execution", + ) + + +class ChatCompletionResponse(BaseModel): + """Response from chat completion.""" + + message: ChatMessageResponse = Field(description="The assistant's message") + finish_reason: str = Field( + description="Reason generation stopped: 'stop', 'tool_calls', 'length', 'error'" + ) + tool_iterations: int = Field( + default=0, description="Number of tool call iterations performed" + ) + tool_calls: list[ToolCall] = Field( + default_factory=list, + description="List of tool calls that were executed during this completion", + ) diff --git a/frigate/api/defs/response/export_case_response.py b/frigate/api/defs/response/export_case_response.py new file mode 100644 index 000000000..713e16683 --- /dev/null +++ b/frigate/api/defs/response/export_case_response.py @@ -0,0 +1,22 @@ +from typing import List, Optional + +from pydantic import BaseModel, Field + + +class ExportCaseModel(BaseModel): + """Model representing a single export case.""" + + id: str = Field(description="Unique identifier for the export case") + name: str = Field(description="Friendly name of the export case") + description: Optional[str] = Field( + default=None, description="Optional description of the export case" + ) + created_at: float = Field( + description="Unix timestamp when the export case was created" + ) + updated_at: float = Field( + description="Unix timestamp when the export case was last updated" + ) + + +ExportCasesResponse = List[ExportCaseModel] diff --git a/frigate/api/defs/response/export_response.py b/frigate/api/defs/response/export_response.py index 63a9e91a1..600794f97 100644 --- a/frigate/api/defs/response/export_response.py +++ b/frigate/api/defs/response/export_response.py @@ -15,6 +15,9 @@ class ExportModel(BaseModel): in_progress: bool = Field( description="Whether the export is currently being processed" ) + export_case_id: Optional[str] = Field( + default=None, description="ID of the export case this export belongs to" + ) class StartExportResponse(BaseModel): diff --git a/frigate/api/defs/tags.py b/frigate/api/defs/tags.py index f804385d1..c6f37b67f 100644 --- a/frigate/api/defs/tags.py +++ b/frigate/api/defs/tags.py @@ -3,13 +3,16 @@ from enum import Enum class Tags(Enum): app = "App" + auth = "Auth" camera = "Camera" - preview = "Preview" + chat = "Chat" + events = "Events" + export = "Export" + classification = "Classification" logs = "Logs" media = "Media" + motion_search = "Motion Search" notifications = "Notifications" + preview = "Preview" + recordings = "Recordings" review = "Review" - export = "Export" - events = "Events" - classification = "Classification" - auth = "Auth" diff --git a/frigate/api/event.py b/frigate/api/event.py index c03cfb431..b0a749018 100644 --- a/frigate/api/event.py +++ b/frigate/api/event.py @@ -1782,6 +1782,7 @@ def create_event( body.duration, "api", body.draw, + body.pre_capture, ), EventMetadataTypeEnum.manual_event_create.value, ) diff --git a/frigate/api/export.py b/frigate/api/export.py index 24fed93b0..23f975618 100644 --- a/frigate/api/export.py +++ b/frigate/api/export.py @@ -4,10 +4,10 @@ import logging import random import string from pathlib import Path -from typing import List +from typing import List, Optional import psutil -from fastapi import APIRouter, Depends, Request +from fastapi import APIRouter, Depends, Query, Request from fastapi.responses import JSONResponse from pathvalidate import sanitize_filepath from peewee import DoesNotExist @@ -19,8 +19,20 @@ from frigate.api.auth import ( require_camera_access, require_role, ) -from frigate.api.defs.request.export_recordings_body import ExportRecordingsBody +from frigate.api.defs.request.export_case_body import ( + ExportCaseAssignBody, + ExportCaseCreateBody, + ExportCaseUpdateBody, +) +from frigate.api.defs.request.export_recordings_body import ( + ExportRecordingsBody, + ExportRecordingsCustomBody, +) from frigate.api.defs.request.export_rename_body import ExportRenameBody +from frigate.api.defs.response.export_case_response import ( + ExportCaseModel, + ExportCasesResponse, +) from frigate.api.defs.response.export_response import ( ExportModel, ExportsResponse, @@ -29,9 +41,9 @@ from frigate.api.defs.response.export_response import ( from frigate.api.defs.response.generic_response import GenericResponse from frigate.api.defs.tags import Tags from frigate.const import CLIPS_DIR, EXPORT_DIR -from frigate.models import Export, Previews, Recordings +from frigate.models import Export, ExportCase, Previews, Recordings from frigate.record.export import ( - PlaybackFactorEnum, + DEFAULT_TIME_LAPSE_FFMPEG_ARGS, PlaybackSourceEnum, RecordingExporter, ) @@ -52,17 +64,182 @@ router = APIRouter(tags=[Tags.export]) ) def get_exports( allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter), + export_case_id: Optional[str] = None, + cameras: Optional[str] = Query(default="all"), + start_date: Optional[float] = None, + end_date: Optional[float] = None, ): - exports = ( - Export.select() - .where(Export.camera << allowed_cameras) - .order_by(Export.date.desc()) - .dicts() - .iterator() - ) + query = Export.select().where(Export.camera << allowed_cameras) + + if export_case_id is not None: + if export_case_id == "unassigned": + query = query.where(Export.export_case.is_null(True)) + else: + query = query.where(Export.export_case == export_case_id) + + if cameras and cameras != "all": + requested = set(cameras.split(",")) + filtered_cameras = list(requested.intersection(allowed_cameras)) + if not filtered_cameras: + return JSONResponse(content=[]) + query = query.where(Export.camera << filtered_cameras) + + if start_date is not None: + query = query.where(Export.date >= start_date) + + if end_date is not None: + query = query.where(Export.date <= end_date) + + exports = query.order_by(Export.date.desc()).dicts().iterator() return JSONResponse(content=[e for e in exports]) +@router.get( + "/cases", + response_model=ExportCasesResponse, + dependencies=[Depends(allow_any_authenticated())], + summary="Get export cases", + description="Gets all export cases from the database.", +) +def get_export_cases(): + cases = ( + ExportCase.select().order_by(ExportCase.created_at.desc()).dicts().iterator() + ) + return JSONResponse(content=[c for c in cases]) + + +@router.post( + "/cases", + response_model=ExportCaseModel, + dependencies=[Depends(require_role(["admin"]))], + summary="Create export case", + description="Creates a new export case.", +) +def create_export_case(body: ExportCaseCreateBody): + case = ExportCase.create( + id="".join(random.choices(string.ascii_lowercase + string.digits, k=12)), + name=body.name, + description=body.description, + created_at=Path().stat().st_mtime, + updated_at=Path().stat().st_mtime, + ) + return JSONResponse(content=model_to_dict(case)) + + +@router.get( + "/cases/{case_id}", + response_model=ExportCaseModel, + dependencies=[Depends(allow_any_authenticated())], + summary="Get a single export case", + description="Gets a specific export case by ID.", +) +def get_export_case(case_id: str): + try: + case = ExportCase.get(ExportCase.id == case_id) + return JSONResponse(content=model_to_dict(case)) + except DoesNotExist: + return JSONResponse( + content={"success": False, "message": "Export case not found"}, + status_code=404, + ) + + +@router.patch( + "/cases/{case_id}", + response_model=GenericResponse, + dependencies=[Depends(require_role(["admin"]))], + summary="Update export case", + description="Updates an existing export case.", +) +def update_export_case(case_id: str, body: ExportCaseUpdateBody): + try: + case = ExportCase.get(ExportCase.id == case_id) + except DoesNotExist: + return JSONResponse( + content={"success": False, "message": "Export case not found"}, + status_code=404, + ) + + if body.name is not None: + case.name = body.name + if body.description is not None: + case.description = body.description + + case.save() + + return JSONResponse( + content={"success": True, "message": "Successfully updated export case."} + ) + + +@router.delete( + "/cases/{case_id}", + response_model=GenericResponse, + dependencies=[Depends(require_role(["admin"]))], + summary="Delete export case", + description="""Deletes an export case.\n Exports that reference this case will have their export_case set to null.\n """, +) +def delete_export_case(case_id: str): + try: + case = ExportCase.get(ExportCase.id == case_id) + except DoesNotExist: + return JSONResponse( + content={"success": False, "message": "Export case not found"}, + status_code=404, + ) + + # Unassign exports from this case but keep the exports themselves + Export.update(export_case=None).where(Export.export_case == case).execute() + + case.delete_instance() + + return JSONResponse( + content={"success": True, "message": "Successfully deleted export case."} + ) + + +@router.patch( + "/export/{export_id}/case", + response_model=GenericResponse, + dependencies=[Depends(require_role(["admin"]))], + summary="Assign export to case", + description=( + "Assigns an export to a case, or unassigns it if export_case_id is null." + ), +) +async def assign_export_case( + export_id: str, + body: ExportCaseAssignBody, + request: Request, +): + try: + export: Export = Export.get(Export.id == export_id) + await require_camera_access(export.camera, request=request) + except DoesNotExist: + return JSONResponse( + content={"success": False, "message": "Export not found."}, + status_code=404, + ) + + if body.export_case_id is not None: + try: + ExportCase.get(ExportCase.id == body.export_case_id) + except DoesNotExist: + return JSONResponse( + content={"success": False, "message": "Export case not found."}, + status_code=404, + ) + export.export_case = body.export_case_id + else: + export.export_case = None + + export.save() + + return JSONResponse( + content={"success": True, "message": "Successfully updated export case."} + ) + + @router.post( "/export/{camera_name}/start/{start_time}/end/{end_time}", response_model=StartExportResponse, @@ -88,11 +265,20 @@ def export_recording( status_code=404, ) - playback_factor = body.playback playback_source = body.source friendly_name = body.name existing_image = sanitize_filepath(body.image_path) if body.image_path else None + export_case_id = body.export_case_id + if export_case_id is not None: + try: + ExportCase.get(ExportCase.id == export_case_id) + except DoesNotExist: + return JSONResponse( + content={"success": False, "message": "Export case not found"}, + status_code=404, + ) + # Ensure that existing_image is a valid path if existing_image and not existing_image.startswith(CLIPS_DIR): return JSONResponse( @@ -151,16 +337,12 @@ def export_recording( existing_image, int(start_time), int(end_time), - ( - PlaybackFactorEnum[playback_factor] - if playback_factor in PlaybackFactorEnum.__members__.values() - else PlaybackFactorEnum.realtime - ), ( PlaybackSourceEnum[playback_source] if playback_source in PlaybackSourceEnum.__members__.values() else PlaybackSourceEnum.recordings ), + export_case_id, ) exporter.start() return JSONResponse( @@ -271,6 +453,138 @@ async def export_delete(event_id: str, request: Request): ) +@router.post( + "/export/custom/{camera_name}/start/{start_time}/end/{end_time}", + response_model=StartExportResponse, + dependencies=[Depends(require_camera_access)], + summary="Start custom recording export", + description="""Starts an export of a recording for the specified time range using custom FFmpeg arguments. + The export can be from recordings or preview footage. Returns the export ID if + successful, or an error message if the camera is invalid or no recordings/previews + are found for the time range. If ffmpeg_input_args and ffmpeg_output_args are not provided, + defaults to timelapse export settings.""", +) +def export_recording_custom( + request: Request, + camera_name: str, + start_time: float, + end_time: float, + body: ExportRecordingsCustomBody, +): + if not camera_name or not request.app.frigate_config.cameras.get(camera_name): + return JSONResponse( + content=( + {"success": False, "message": f"{camera_name} is not a valid camera."} + ), + status_code=404, + ) + + playback_source = body.source + friendly_name = body.name + existing_image = sanitize_filepath(body.image_path) if body.image_path else None + ffmpeg_input_args = body.ffmpeg_input_args + ffmpeg_output_args = body.ffmpeg_output_args + cpu_fallback = body.cpu_fallback + + export_case_id = body.export_case_id + if export_case_id is not None: + try: + ExportCase.get(ExportCase.id == export_case_id) + except DoesNotExist: + return JSONResponse( + content={"success": False, "message": "Export case not found"}, + status_code=404, + ) + + # Ensure that existing_image is a valid path + if existing_image and not existing_image.startswith(CLIPS_DIR): + return JSONResponse( + content=({"success": False, "message": "Invalid image path"}), + status_code=400, + ) + + if playback_source == "recordings": + recordings_count = ( + Recordings.select() + .where( + Recordings.start_time.between(start_time, end_time) + | Recordings.end_time.between(start_time, end_time) + | ( + (start_time > Recordings.start_time) + & (end_time < Recordings.end_time) + ) + ) + .where(Recordings.camera == camera_name) + .count() + ) + + if recordings_count <= 0: + return JSONResponse( + content=( + {"success": False, "message": "No recordings found for time range"} + ), + status_code=400, + ) + else: + previews_count = ( + Previews.select() + .where( + Previews.start_time.between(start_time, end_time) + | Previews.end_time.between(start_time, end_time) + | ((start_time > Previews.start_time) & (end_time < Previews.end_time)) + ) + .where(Previews.camera == camera_name) + .count() + ) + + if not is_current_hour(start_time) and previews_count <= 0: + return JSONResponse( + content=( + {"success": False, "message": "No previews found for time range"} + ), + status_code=400, + ) + + export_id = f"{camera_name}_{''.join(random.choices(string.ascii_lowercase + string.digits, k=6))}" + + # Set default values if not provided (timelapse defaults) + if ffmpeg_input_args is None: + ffmpeg_input_args = "" + + if ffmpeg_output_args is None: + ffmpeg_output_args = DEFAULT_TIME_LAPSE_FFMPEG_ARGS + + exporter = RecordingExporter( + request.app.frigate_config, + export_id, + camera_name, + friendly_name, + existing_image, + int(start_time), + int(end_time), + ( + PlaybackSourceEnum[playback_source] + if playback_source in PlaybackSourceEnum.__members__.values() + else PlaybackSourceEnum.recordings + ), + export_case_id, + ffmpeg_input_args, + ffmpeg_output_args, + cpu_fallback, + ) + exporter.start() + return JSONResponse( + content=( + { + "success": True, + "message": "Starting export of recording.", + "export_id": export_id, + } + ), + status_code=200, + ) + + @router.get( "/exports/{export_id}", response_model=ExportModel, diff --git a/frigate/api/fastapi_app.py b/frigate/api/fastapi_app.py index 48c97dfaf..0a731bcee 100644 --- a/frigate/api/fastapi_app.py +++ b/frigate/api/fastapi_app.py @@ -16,12 +16,16 @@ from frigate.api import app as main_app from frigate.api import ( auth, camera, + chat, classification, + debug_replay, event, export, media, + motion_search, notification, preview, + record, review, ) from frigate.api.auth import get_jwt_secret, limiter, require_admin_by_default @@ -30,7 +34,9 @@ from frigate.comms.event_metadata_updater import ( ) from frigate.config import FrigateConfig from frigate.config.camera.updater import CameraConfigUpdatePublisher +from frigate.debug_replay import DebugReplayManager from frigate.embeddings import EmbeddingsContext +from frigate.genai import GenAIClientManager from frigate.ptz.onvif import OnvifController from frigate.stats.emitter import StatsEmitter from frigate.storage import StorageMaintainer @@ -62,6 +68,7 @@ def create_fastapi_app( stats_emitter: StatsEmitter, event_metadata_updater: EventMetadataPublisher, config_publisher: CameraConfigUpdatePublisher, + replay_manager: DebugReplayManager, enforce_default_admin: bool = True, ): logger.info("Starting FastAPI app") @@ -120,6 +127,7 @@ def create_fastapi_app( # Order of include_router matters: https://fastapi.tiangolo.com/tutorial/path-params/#order-matters app.include_router(auth.router) app.include_router(camera.router) + app.include_router(chat.router) app.include_router(classification.router) app.include_router(review.router) app.include_router(main_app.router) @@ -128,8 +136,12 @@ def create_fastapi_app( app.include_router(export.router) app.include_router(event.router) app.include_router(media.router) + app.include_router(motion_search.router) + app.include_router(record.router) + app.include_router(debug_replay.router) # App Properties app.frigate_config = frigate_config + app.genai_manager = GenAIClientManager(frigate_config) app.embeddings = embeddings app.detected_frames_processor = detected_frames_processor app.storage_maintainer = storage_maintainer @@ -138,6 +150,7 @@ def create_fastapi_app( app.stats_emitter = stats_emitter app.event_metadata_updater = event_metadata_updater app.config_publisher = config_publisher + app.replay_manager = replay_manager if frigate_config.auth.enabled: secret = get_jwt_secret() diff --git a/frigate/api/media.py b/frigate/api/media.py index 971bfef83..2ddabc631 100644 --- a/frigate/api/media.py +++ b/frigate/api/media.py @@ -8,9 +8,8 @@ import os import subprocess as sp import time from datetime import datetime, timedelta, timezone -from functools import reduce from pathlib import Path as FilePath -from typing import Any, List +from typing import Any from urllib.parse import unquote import cv2 @@ -19,21 +18,19 @@ import pytz from fastapi import APIRouter, Depends, Path, Query, Request, Response from fastapi.responses import FileResponse, JSONResponse, StreamingResponse from pathvalidate import sanitize_filename -from peewee import DoesNotExist, fn, operator +from peewee import DoesNotExist, fn from tzlocal import get_localzone_name from frigate.api.auth import ( allow_any_authenticated, - get_allowed_cameras_for_filter, require_camera_access, + require_role, ) from frigate.api.defs.query.media_query_parameters import ( Extension, MediaEventsSnapshotQueryParams, MediaLatestFrameQueryParams, MediaMjpegFeedQueryParams, - MediaRecordingsAvailabilityQueryParams, - MediaRecordingsSummaryQueryParams, ) from frigate.api.defs.tags import Tags from frigate.camera.state import CameraState @@ -44,13 +41,12 @@ from frigate.const import ( INSTALL_DIR, MAX_SEGMENT_DURATION, PREVIEW_FRAME_TYPE, - RECORD_DIR, ) from frigate.models import Event, Previews, Recordings, Regions, ReviewSegment +from frigate.output.preview import get_most_recent_preview_frame from frigate.track.object_processing import TrackedObjectProcessor from frigate.util.file import get_event_thumbnail_bytes from frigate.util.image import get_image_from_recording -from frigate.util.time import get_dst_transitions logger = logging.getLogger(__name__) @@ -131,7 +127,9 @@ async def camera_ptz_info(request: Request, camera_name: str): @router.get( - "/{camera_name}/latest.{extension}", dependencies=[Depends(require_camera_access)] + "/{camera_name}/latest.{extension}", + dependencies=[Depends(require_camera_access)], + description="Returns the latest frame from the specified camera in the requested format (jpg, png, webp). Falls back to preview frames if the camera is offline.", ) async def latest_frame( request: Request, @@ -165,20 +163,37 @@ async def latest_frame( or 10 ) + is_offline = False if frame is None or datetime.now().timestamp() > ( frame_processor.get_current_frame_time(camera_name) + retry_interval ): - if request.app.camera_error_image is None: - error_image = glob.glob( - os.path.join(INSTALL_DIR, "frigate/images/camera-error.jpg") - ) + last_frame_time = frame_processor.get_current_frame_time(camera_name) + preview_path = get_most_recent_preview_frame( + camera_name, before=last_frame_time + ) - if len(error_image) > 0: - request.app.camera_error_image = cv2.imread( - error_image[0], cv2.IMREAD_UNCHANGED + if preview_path: + logger.debug(f"Using most recent preview frame for {camera_name}") + frame = cv2.imread(preview_path, cv2.IMREAD_UNCHANGED) + + if frame is not None: + is_offline = True + + if frame is None or not is_offline: + logger.debug( + f"No live or preview frame available for {camera_name}. Using error image." + ) + if request.app.camera_error_image is None: + error_image = glob.glob( + os.path.join(INSTALL_DIR, "frigate/images/camera-error.jpg") ) - frame = request.app.camera_error_image + if len(error_image) > 0: + request.app.camera_error_image = cv2.imread( + error_image[0], cv2.IMREAD_UNCHANGED + ) + + frame = request.app.camera_error_image height = int(params.height or str(frame.shape[0])) width = int(height * frame.shape[1] / frame.shape[0]) @@ -200,14 +215,18 @@ async def latest_frame( frame = cv2.resize(frame, dsize=(width, height), interpolation=cv2.INTER_AREA) _, img = cv2.imencode(f".{extension.value}", frame, quality_params) + + headers = { + "Cache-Control": "no-store" if not params.store else "private, max-age=60", + } + + if is_offline: + headers["X-Frigate-Offline"] = "true" + return Response( content=img.tobytes(), media_type=extension.get_mime_type(), - headers={ - "Cache-Control": "no-store" - if not params.store - else "private, max-age=60", - }, + headers=headers, ) elif ( camera_name == "birdseye" @@ -397,333 +416,6 @@ async def submit_recording_snapshot_to_plus( ) -@router.get("/recordings/storage", dependencies=[Depends(allow_any_authenticated())]) -def get_recordings_storage_usage(request: Request): - recording_stats = request.app.stats_emitter.get_latest_stats()["service"][ - "storage" - ][RECORD_DIR] - - if not recording_stats: - return JSONResponse({}) - - total_mb = recording_stats["total"] - - camera_usages: dict[str, dict] = ( - request.app.storage_maintainer.calculate_camera_usages() - ) - - for camera_name in camera_usages.keys(): - if camera_usages.get(camera_name, {}).get("usage"): - camera_usages[camera_name]["usage_percent"] = ( - camera_usages.get(camera_name, {}).get("usage", 0) / total_mb - ) * 100 - - return JSONResponse(content=camera_usages) - - -@router.get("/recordings/summary", dependencies=[Depends(allow_any_authenticated())]) -def all_recordings_summary( - request: Request, - params: MediaRecordingsSummaryQueryParams = Depends(), - allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter), -): - """Returns true/false by day indicating if recordings exist""" - - cameras = params.cameras - if cameras != "all": - requested = set(unquote(cameras).split(",")) - filtered = requested.intersection(allowed_cameras) - if not filtered: - return JSONResponse(content={}) - camera_list = list(filtered) - else: - camera_list = allowed_cameras - - time_range_query = ( - Recordings.select( - fn.MIN(Recordings.start_time).alias("min_time"), - fn.MAX(Recordings.start_time).alias("max_time"), - ) - .where(Recordings.camera << camera_list) - .dicts() - .get() - ) - - min_time = time_range_query.get("min_time") - max_time = time_range_query.get("max_time") - - if min_time is None or max_time is None: - return JSONResponse(content={}) - - dst_periods = get_dst_transitions(params.timezone, min_time, max_time) - - days: dict[str, bool] = {} - - for period_start, period_end, period_offset in dst_periods: - hours_offset = int(period_offset / 60 / 60) - minutes_offset = int(period_offset / 60 - hours_offset * 60) - period_hour_modifier = f"{hours_offset} hour" - period_minute_modifier = f"{minutes_offset} minute" - - period_query = ( - Recordings.select( - fn.strftime( - "%Y-%m-%d", - fn.datetime( - Recordings.start_time, - "unixepoch", - period_hour_modifier, - period_minute_modifier, - ), - ).alias("day") - ) - .where( - (Recordings.camera << camera_list) - & (Recordings.end_time >= period_start) - & (Recordings.start_time <= period_end) - ) - .group_by( - fn.strftime( - "%Y-%m-%d", - fn.datetime( - Recordings.start_time, - "unixepoch", - period_hour_modifier, - period_minute_modifier, - ), - ) - ) - .order_by(Recordings.start_time.desc()) - .namedtuples() - ) - - for g in period_query: - days[g.day] = True - - return JSONResponse(content=dict(sorted(days.items()))) - - -@router.get( - "/{camera_name}/recordings/summary", dependencies=[Depends(require_camera_access)] -) -async def recordings_summary(camera_name: str, timezone: str = "utc"): - """Returns hourly summary for recordings of given camera""" - - time_range_query = ( - Recordings.select( - fn.MIN(Recordings.start_time).alias("min_time"), - fn.MAX(Recordings.start_time).alias("max_time"), - ) - .where(Recordings.camera == camera_name) - .dicts() - .get() - ) - - min_time = time_range_query.get("min_time") - max_time = time_range_query.get("max_time") - - days: dict[str, dict] = {} - - if min_time is None or max_time is None: - return JSONResponse(content=list(days.values())) - - dst_periods = get_dst_transitions(timezone, min_time, max_time) - - for period_start, period_end, period_offset in dst_periods: - hours_offset = int(period_offset / 60 / 60) - minutes_offset = int(period_offset / 60 - hours_offset * 60) - period_hour_modifier = f"{hours_offset} hour" - period_minute_modifier = f"{minutes_offset} minute" - - recording_groups = ( - Recordings.select( - fn.strftime( - "%Y-%m-%d %H", - fn.datetime( - Recordings.start_time, - "unixepoch", - period_hour_modifier, - period_minute_modifier, - ), - ).alias("hour"), - fn.SUM(Recordings.duration).alias("duration"), - fn.SUM(Recordings.motion).alias("motion"), - fn.SUM(Recordings.objects).alias("objects"), - ) - .where( - (Recordings.camera == camera_name) - & (Recordings.end_time >= period_start) - & (Recordings.start_time <= period_end) - ) - .group_by((Recordings.start_time + period_offset).cast("int") / 3600) - .order_by(Recordings.start_time.desc()) - .namedtuples() - ) - - event_groups = ( - Event.select( - fn.strftime( - "%Y-%m-%d %H", - fn.datetime( - Event.start_time, - "unixepoch", - period_hour_modifier, - period_minute_modifier, - ), - ).alias("hour"), - fn.COUNT(Event.id).alias("count"), - ) - .where(Event.camera == camera_name, Event.has_clip) - .where( - (Event.start_time >= period_start) & (Event.start_time <= period_end) - ) - .group_by((Event.start_time + period_offset).cast("int") / 3600) - .namedtuples() - ) - - event_map = {g.hour: g.count for g in event_groups} - - for recording_group in recording_groups: - parts = recording_group.hour.split() - hour = parts[1] - day = parts[0] - events_count = event_map.get(recording_group.hour, 0) - hour_data = { - "hour": hour, - "events": events_count, - "motion": recording_group.motion, - "objects": recording_group.objects, - "duration": round(recording_group.duration), - } - if day in days: - # merge counts if already present (edge-case at DST boundary) - days[day]["events"] += events_count or 0 - days[day]["hours"].append(hour_data) - else: - days[day] = { - "events": events_count or 0, - "hours": [hour_data], - "day": day, - } - - return JSONResponse(content=list(days.values())) - - -@router.get("/{camera_name}/recordings", dependencies=[Depends(require_camera_access)]) -async def recordings( - camera_name: str, - after: float = (datetime.now() - timedelta(hours=1)).timestamp(), - before: float = datetime.now().timestamp(), -): - """Return specific camera recordings between the given 'after'/'end' times. If not provided the last hour will be used""" - recordings = ( - Recordings.select( - Recordings.id, - Recordings.start_time, - Recordings.end_time, - Recordings.segment_size, - Recordings.motion, - Recordings.objects, - Recordings.duration, - ) - .where( - Recordings.camera == camera_name, - Recordings.end_time >= after, - Recordings.start_time <= before, - ) - .order_by(Recordings.start_time) - .dicts() - .iterator() - ) - - return JSONResponse(content=list(recordings)) - - -@router.get( - "/recordings/unavailable", - response_model=list[dict], - dependencies=[Depends(allow_any_authenticated())], -) -async def no_recordings( - request: Request, - params: MediaRecordingsAvailabilityQueryParams = Depends(), - allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter), -): - """Get time ranges with no recordings.""" - cameras = params.cameras - if cameras != "all": - requested = set(unquote(cameras).split(",")) - filtered = requested.intersection(allowed_cameras) - if not filtered: - return JSONResponse(content=[]) - cameras = ",".join(filtered) - else: - cameras = allowed_cameras - - before = params.before or datetime.datetime.now().timestamp() - after = ( - params.after - or (datetime.datetime.now() - datetime.timedelta(hours=1)).timestamp() - ) - scale = params.scale - - clauses = [(Recordings.end_time >= after) & (Recordings.start_time <= before)] - if cameras != "all": - camera_list = cameras.split(",") - clauses.append((Recordings.camera << camera_list)) - else: - camera_list = allowed_cameras - - # Get recording start times - data: list[Recordings] = ( - Recordings.select(Recordings.start_time, Recordings.end_time) - .where(reduce(operator.and_, clauses)) - .order_by(Recordings.start_time.asc()) - .dicts() - .iterator() - ) - - # Convert recordings to list of (start, end) tuples - recordings = [(r["start_time"], r["end_time"]) for r in data] - - # Iterate through time segments and check if each has any recording - no_recording_segments = [] - current = after - current_gap_start = None - - while current < before: - segment_end = min(current + scale, before) - - # Check if this segment overlaps with any recording - has_recording = any( - rec_start < segment_end and rec_end > current - for rec_start, rec_end in recordings - ) - - if not has_recording: - # This segment has no recordings - if current_gap_start is None: - current_gap_start = current # Start a new gap - else: - # This segment has recordings - if current_gap_start is not None: - # End the current gap and append it - no_recording_segments.append( - {"start_time": int(current_gap_start), "end_time": int(current)} - ) - current_gap_start = None - - current = segment_end - - # Append the last gap if it exists - if current_gap_start is not None: - no_recording_segments.append( - {"start_time": int(current_gap_start), "end_time": int(before)} - ) - - return JSONResponse(content=no_recording_segments) - - @router.get( "/{camera_name}/start/{start_ts}/end/{end_ts}/clip.mp4", dependencies=[Depends(require_camera_access)], @@ -1046,6 +738,7 @@ async def event_snapshot( ): event_complete = False jpg_bytes = None + frame_time = 0 try: event = Event.get(Event.id == event_id, Event.end_time != None) event_complete = True @@ -1070,7 +763,7 @@ async def event_snapshot( if event_id in camera_state.tracked_objects: tracked_obj = camera_state.tracked_objects.get(event_id) if tracked_obj is not None: - jpg_bytes = tracked_obj.get_img_bytes( + jpg_bytes, frame_time = tracked_obj.get_img_bytes( ext="jpg", timestamp=params.timestamp, bounding_box=params.bbox, @@ -1099,6 +792,7 @@ async def event_snapshot( headers = { "Content-Type": "image/jpeg", "Cache-Control": "private, max-age=31536000" if event_complete else "no-store", + "X-Frame-Time": str(frame_time), } if params.download: @@ -1312,6 +1006,23 @@ def grid_snapshot( ) +@router.delete( + "/{camera_name}/region_grid", dependencies=[Depends(require_role("admin"))] +) +def clear_region_grid(request: Request, camera_name: str): + """Clear the region grid for a camera.""" + if camera_name not in request.app.frigate_config.cameras: + return JSONResponse( + content={"success": False, "message": "Camera not found"}, + status_code=404, + ) + + Regions.delete().where(Regions.camera == camera_name).execute() + return JSONResponse( + content={"success": True, "message": "Region grid cleared"}, + ) + + @router.get( "/events/{event_id}/snapshot-clean.webp", dependencies=[Depends(require_camera_access)], diff --git a/frigate/api/motion_search.py b/frigate/api/motion_search.py new file mode 100644 index 000000000..09bf8026d --- /dev/null +++ b/frigate/api/motion_search.py @@ -0,0 +1,292 @@ +"""Motion search API for detecting changes within a region of interest.""" + +import logging +from typing import Any, List, Optional + +from fastapi import APIRouter, Depends, Request +from fastapi.responses import JSONResponse +from pydantic import BaseModel, Field + +from frigate.api.auth import require_camera_access +from frigate.api.defs.tags import Tags +from frigate.jobs.motion_search import ( + cancel_motion_search_job, + get_motion_search_job, + start_motion_search_job, +) +from frigate.types import JobStatusTypesEnum + +logger = logging.getLogger(__name__) + +router = APIRouter(tags=[Tags.motion_search]) + + +class MotionSearchRequest(BaseModel): + """Request body for motion search.""" + + start_time: float = Field(description="Start timestamp for the search range") + end_time: float = Field(description="End timestamp for the search range") + polygon_points: List[List[float]] = Field( + description="List of [x, y] normalized coordinates (0-1) defining the ROI polygon" + ) + threshold: int = Field( + default=30, + ge=1, + le=255, + description="Pixel difference threshold (1-255)", + ) + min_area: float = Field( + default=5.0, + ge=0.1, + le=100.0, + description="Minimum change area as a percentage of the ROI", + ) + frame_skip: int = Field( + default=5, + ge=1, + le=30, + description="Process every Nth frame (1=all frames, 5=every 5th frame)", + ) + parallel: bool = Field( + default=False, + description="Enable parallel scanning across segments", + ) + max_results: int = Field( + default=25, + ge=1, + le=200, + description="Maximum number of search results to return", + ) + + +class MotionSearchResult(BaseModel): + """A single search result with timestamp and change info.""" + + timestamp: float = Field(description="Timestamp where change was detected") + change_percentage: float = Field(description="Percentage of ROI area that changed") + + +class MotionSearchMetricsResponse(BaseModel): + """Metrics collected during motion search execution.""" + + segments_scanned: int = 0 + segments_processed: int = 0 + metadata_inactive_segments: int = 0 + heatmap_roi_skip_segments: int = 0 + fallback_full_range_segments: int = 0 + frames_decoded: int = 0 + wall_time_seconds: float = 0.0 + segments_with_errors: int = 0 + + +class MotionSearchStartResponse(BaseModel): + """Response when motion search job starts.""" + + success: bool + message: str + job_id: str + + +class MotionSearchStatusResponse(BaseModel): + """Response containing job status and results.""" + + success: bool + message: str + status: str # "queued", "running", "success", "failed", or "cancelled" + results: Optional[List[MotionSearchResult]] = None + total_frames_processed: Optional[int] = None + error_message: Optional[str] = None + metrics: Optional[MotionSearchMetricsResponse] = None + + +@router.post( + "/{camera_name}/search/motion", + response_model=MotionSearchStartResponse, + dependencies=[Depends(require_camera_access)], + summary="Start motion search job", + description="""Starts an asynchronous search for significant motion changes within + a user-defined Region of Interest (ROI) over a specified time range. Returns a job_id + that can be used to poll for results.""", +) +async def start_motion_search( + request: Request, + camera_name: str, + body: MotionSearchRequest, +): + """Start an async motion search job.""" + config = request.app.frigate_config + + if camera_name not in config.cameras: + return JSONResponse( + content={"success": False, "message": f"Camera {camera_name} not found"}, + status_code=404, + ) + + # Validate polygon has at least 3 points + if len(body.polygon_points) < 3: + return JSONResponse( + content={ + "success": False, + "message": "Polygon must have at least 3 points", + }, + status_code=400, + ) + + # Validate time range + if body.start_time >= body.end_time: + return JSONResponse( + content={ + "success": False, + "message": "Start time must be before end time", + }, + status_code=400, + ) + + # Start the job using the jobs module + job_id = start_motion_search_job( + config=config, + camera_name=camera_name, + start_time=body.start_time, + end_time=body.end_time, + polygon_points=body.polygon_points, + threshold=body.threshold, + min_area=body.min_area, + frame_skip=body.frame_skip, + parallel=body.parallel, + max_results=body.max_results, + ) + + return JSONResponse( + content={ + "success": True, + "message": "Search job started", + "job_id": job_id, + } + ) + + +@router.get( + "/{camera_name}/search/motion/{job_id}", + response_model=MotionSearchStatusResponse, + dependencies=[Depends(require_camera_access)], + summary="Get motion search job status", + description="Returns the status and results (if complete) of a motion search job.", +) +async def get_motion_search_status_endpoint( + request: Request, + camera_name: str, + job_id: str, +): + """Get the status of a motion search job.""" + config = request.app.frigate_config + + if camera_name not in config.cameras: + return JSONResponse( + content={"success": False, "message": f"Camera {camera_name} not found"}, + status_code=404, + ) + + job = get_motion_search_job(job_id) + if not job: + return JSONResponse( + content={"success": False, "message": "Job not found"}, + status_code=404, + ) + + api_status = job.status + + # Build response content + response_content: dict[str, Any] = { + "success": api_status != JobStatusTypesEnum.failed, + "status": api_status, + } + + if api_status == JobStatusTypesEnum.failed: + response_content["message"] = job.error_message or "Search failed" + response_content["error_message"] = job.error_message + elif api_status == JobStatusTypesEnum.cancelled: + response_content["message"] = "Search cancelled" + response_content["total_frames_processed"] = job.total_frames_processed + elif api_status == JobStatusTypesEnum.success: + response_content["message"] = "Search complete" + if job.results: + response_content["results"] = job.results.get("results", []) + response_content["total_frames_processed"] = job.results.get( + "total_frames_processed", job.total_frames_processed + ) + else: + response_content["results"] = [] + response_content["total_frames_processed"] = job.total_frames_processed + else: + response_content["message"] = "Job processing" + response_content["total_frames_processed"] = job.total_frames_processed + # Include partial results if available (streaming) + if job.results: + response_content["results"] = job.results.get("results", []) + response_content["total_frames_processed"] = job.results.get( + "total_frames_processed", job.total_frames_processed + ) + + # Include metrics if available + if job.metrics: + response_content["metrics"] = job.metrics.to_dict() + + return JSONResponse(content=response_content) + + +@router.post( + "/{camera_name}/search/motion/{job_id}/cancel", + dependencies=[Depends(require_camera_access)], + summary="Cancel motion search job", + description="Cancels an active motion search job if it is still processing.", +) +async def cancel_motion_search_endpoint( + request: Request, + camera_name: str, + job_id: str, +): + """Cancel an active motion search job.""" + config = request.app.frigate_config + + if camera_name not in config.cameras: + return JSONResponse( + content={"success": False, "message": f"Camera {camera_name} not found"}, + status_code=404, + ) + + job = get_motion_search_job(job_id) + if not job: + return JSONResponse( + content={"success": False, "message": "Job not found"}, + status_code=404, + ) + + # Check if already finished + api_status = job.status + if api_status not in (JobStatusTypesEnum.queued, JobStatusTypesEnum.running): + return JSONResponse( + content={ + "success": True, + "message": "Job already finished", + "status": api_status, + } + ) + + # Request cancellation + cancelled = cancel_motion_search_job(job_id) + if cancelled: + return JSONResponse( + content={ + "success": True, + "message": "Search cancelled", + "status": "cancelled", + } + ) + + return JSONResponse( + content={ + "success": False, + "message": "Failed to cancel job", + }, + status_code=500, + ) diff --git a/frigate/api/record.py b/frigate/api/record.py new file mode 100644 index 000000000..6eeb9fbe6 --- /dev/null +++ b/frigate/api/record.py @@ -0,0 +1,480 @@ +"""Recording APIs.""" + +import logging +from datetime import datetime, timedelta +from functools import reduce +from pathlib import Path +from typing import List +from urllib.parse import unquote + +from fastapi import APIRouter, Depends, Request +from fastapi import Path as PathParam +from fastapi.responses import JSONResponse +from peewee import fn, operator + +from frigate.api.auth import ( + allow_any_authenticated, + get_allowed_cameras_for_filter, + require_camera_access, + require_role, +) +from frigate.api.defs.query.recordings_query_parameters import ( + MediaRecordingsAvailabilityQueryParams, + MediaRecordingsSummaryQueryParams, + RecordingsDeleteQueryParams, +) +from frigate.api.defs.response.generic_response import GenericResponse +from frigate.api.defs.tags import Tags +from frigate.const import RECORD_DIR +from frigate.models import Event, Recordings +from frigate.util.time import get_dst_transitions + +logger = logging.getLogger(__name__) + +router = APIRouter(tags=[Tags.recordings]) + + +@router.get("/recordings/storage", dependencies=[Depends(allow_any_authenticated())]) +def get_recordings_storage_usage(request: Request): + recording_stats = request.app.stats_emitter.get_latest_stats()["service"][ + "storage" + ][RECORD_DIR] + + if not recording_stats: + return JSONResponse({}) + + total_mb = recording_stats["total"] + + camera_usages: dict[str, dict] = ( + request.app.storage_maintainer.calculate_camera_usages() + ) + + for camera_name in camera_usages.keys(): + if camera_usages.get(camera_name, {}).get("usage"): + camera_usages[camera_name]["usage_percent"] = ( + camera_usages.get(camera_name, {}).get("usage", 0) / total_mb + ) * 100 + + return JSONResponse(content=camera_usages) + + +@router.get("/recordings/summary", dependencies=[Depends(allow_any_authenticated())]) +def all_recordings_summary( + request: Request, + params: MediaRecordingsSummaryQueryParams = Depends(), + allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter), +): + """Returns true/false by day indicating if recordings exist""" + + cameras = params.cameras + if cameras != "all": + requested = set(unquote(cameras).split(",")) + filtered = requested.intersection(allowed_cameras) + if not filtered: + return JSONResponse(content={}) + camera_list = list(filtered) + else: + camera_list = allowed_cameras + + time_range_query = ( + Recordings.select( + fn.MIN(Recordings.start_time).alias("min_time"), + fn.MAX(Recordings.start_time).alias("max_time"), + ) + .where(Recordings.camera << camera_list) + .dicts() + .get() + ) + + min_time = time_range_query.get("min_time") + max_time = time_range_query.get("max_time") + + if min_time is None or max_time is None: + return JSONResponse(content={}) + + dst_periods = get_dst_transitions(params.timezone, min_time, max_time) + + days: dict[str, bool] = {} + + for period_start, period_end, period_offset in dst_periods: + hours_offset = int(period_offset / 60 / 60) + minutes_offset = int(period_offset / 60 - hours_offset * 60) + period_hour_modifier = f"{hours_offset} hour" + period_minute_modifier = f"{minutes_offset} minute" + + period_query = ( + Recordings.select( + fn.strftime( + "%Y-%m-%d", + fn.datetime( + Recordings.start_time, + "unixepoch", + period_hour_modifier, + period_minute_modifier, + ), + ).alias("day") + ) + .where( + (Recordings.camera << camera_list) + & (Recordings.end_time >= period_start) + & (Recordings.start_time <= period_end) + ) + .group_by( + fn.strftime( + "%Y-%m-%d", + fn.datetime( + Recordings.start_time, + "unixepoch", + period_hour_modifier, + period_minute_modifier, + ), + ) + ) + .order_by(Recordings.start_time.desc()) + .namedtuples() + ) + + for g in period_query: + days[g.day] = True + + return JSONResponse(content=dict(sorted(days.items()))) + + +@router.get( + "/{camera_name}/recordings/summary", dependencies=[Depends(require_camera_access)] +) +async def recordings_summary(camera_name: str, timezone: str = "utc"): + """Returns hourly summary for recordings of given camera""" + + time_range_query = ( + Recordings.select( + fn.MIN(Recordings.start_time).alias("min_time"), + fn.MAX(Recordings.start_time).alias("max_time"), + ) + .where(Recordings.camera == camera_name) + .dicts() + .get() + ) + + min_time = time_range_query.get("min_time") + max_time = time_range_query.get("max_time") + + days: dict[str, dict] = {} + + if min_time is None or max_time is None: + return JSONResponse(content=list(days.values())) + + dst_periods = get_dst_transitions(timezone, min_time, max_time) + + for period_start, period_end, period_offset in dst_periods: + hours_offset = int(period_offset / 60 / 60) + minutes_offset = int(period_offset / 60 - hours_offset * 60) + period_hour_modifier = f"{hours_offset} hour" + period_minute_modifier = f"{minutes_offset} minute" + + recording_groups = ( + Recordings.select( + fn.strftime( + "%Y-%m-%d %H", + fn.datetime( + Recordings.start_time, + "unixepoch", + period_hour_modifier, + period_minute_modifier, + ), + ).alias("hour"), + fn.SUM(Recordings.duration).alias("duration"), + fn.SUM(Recordings.motion).alias("motion"), + fn.SUM(Recordings.objects).alias("objects"), + ) + .where( + (Recordings.camera == camera_name) + & (Recordings.end_time >= period_start) + & (Recordings.start_time <= period_end) + ) + .group_by((Recordings.start_time + period_offset).cast("int") / 3600) + .order_by(Recordings.start_time.desc()) + .namedtuples() + ) + + event_groups = ( + Event.select( + fn.strftime( + "%Y-%m-%d %H", + fn.datetime( + Event.start_time, + "unixepoch", + period_hour_modifier, + period_minute_modifier, + ), + ).alias("hour"), + fn.COUNT(Event.id).alias("count"), + ) + .where(Event.camera == camera_name, Event.has_clip) + .where( + (Event.start_time >= period_start) & (Event.start_time <= period_end) + ) + .group_by((Event.start_time + period_offset).cast("int") / 3600) + .namedtuples() + ) + + event_map = {g.hour: g.count for g in event_groups} + + for recording_group in recording_groups: + parts = recording_group.hour.split() + hour = parts[1] + day = parts[0] + events_count = event_map.get(recording_group.hour, 0) + hour_data = { + "hour": hour, + "events": events_count, + "motion": recording_group.motion, + "objects": recording_group.objects, + "duration": round(recording_group.duration), + } + if day in days: + # merge counts if already present (edge-case at DST boundary) + days[day]["events"] += events_count or 0 + days[day]["hours"].append(hour_data) + else: + days[day] = { + "events": events_count or 0, + "hours": [hour_data], + "day": day, + } + + return JSONResponse(content=list(days.values())) + + +@router.get("/{camera_name}/recordings", dependencies=[Depends(require_camera_access)]) +async def recordings( + camera_name: str, + after: float = (datetime.now() - timedelta(hours=1)).timestamp(), + before: float = datetime.now().timestamp(), +): + """Return specific camera recordings between the given 'after'/'end' times. If not provided the last hour will be used""" + recordings = ( + Recordings.select( + Recordings.id, + Recordings.start_time, + Recordings.end_time, + Recordings.segment_size, + Recordings.motion, + Recordings.objects, + Recordings.motion_heatmap, + Recordings.duration, + ) + .where( + Recordings.camera == camera_name, + Recordings.end_time >= after, + Recordings.start_time <= before, + ) + .order_by(Recordings.start_time) + .dicts() + .iterator() + ) + + return JSONResponse(content=list(recordings)) + + +@router.get( + "/recordings/unavailable", + response_model=list[dict], + dependencies=[Depends(allow_any_authenticated())], +) +async def no_recordings( + request: Request, + params: MediaRecordingsAvailabilityQueryParams = Depends(), + allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter), +): + """Get time ranges with no recordings.""" + cameras = params.cameras + if cameras != "all": + requested = set(unquote(cameras).split(",")) + filtered = requested.intersection(allowed_cameras) + if not filtered: + return JSONResponse(content=[]) + cameras = ",".join(filtered) + else: + cameras = allowed_cameras + + before = params.before or datetime.datetime.now().timestamp() + after = ( + params.after + or (datetime.datetime.now() - datetime.timedelta(hours=1)).timestamp() + ) + scale = params.scale + + clauses = [(Recordings.end_time >= after) & (Recordings.start_time <= before)] + if cameras != "all": + camera_list = cameras.split(",") + clauses.append((Recordings.camera << camera_list)) + else: + camera_list = allowed_cameras + + # Get recording start times + data: list[Recordings] = ( + Recordings.select(Recordings.start_time, Recordings.end_time) + .where(reduce(operator.and_, clauses)) + .order_by(Recordings.start_time.asc()) + .dicts() + .iterator() + ) + + # Convert recordings to list of (start, end) tuples + recordings = [(r["start_time"], r["end_time"]) for r in data] + + # Iterate through time segments and check if each has any recording + no_recording_segments = [] + current = after + current_gap_start = None + + while current < before: + segment_end = min(current + scale, before) + + # Check if this segment overlaps with any recording + has_recording = any( + rec_start < segment_end and rec_end > current + for rec_start, rec_end in recordings + ) + + if not has_recording: + # This segment has no recordings + if current_gap_start is None: + current_gap_start = current # Start a new gap + else: + # This segment has recordings + if current_gap_start is not None: + # End the current gap and append it + no_recording_segments.append( + {"start_time": int(current_gap_start), "end_time": int(current)} + ) + current_gap_start = None + + current = segment_end + + # Append the last gap if it exists + if current_gap_start is not None: + no_recording_segments.append( + {"start_time": int(current_gap_start), "end_time": int(before)} + ) + + return JSONResponse(content=no_recording_segments) + + +@router.delete( + "/recordings/start/{start}/end/{end}", + response_model=GenericResponse, + dependencies=[Depends(require_role(["admin"]))], + summary="Delete recordings", + description="""Deletes recordings within the specified time range. + Recordings can be filtered by cameras and kept based on motion, objects, or audio attributes. + """, +) +async def delete_recordings( + start: float = PathParam(..., description="Start timestamp (unix)"), + end: float = PathParam(..., description="End timestamp (unix)"), + params: RecordingsDeleteQueryParams = Depends(), + allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter), +): + """Delete recordings in the specified time range.""" + if start >= end: + return JSONResponse( + content={ + "success": False, + "message": "Start time must be less than end time.", + }, + status_code=400, + ) + + cameras = params.cameras + + if cameras != "all": + requested = set(cameras.split(",")) + filtered = requested.intersection(allowed_cameras) + + if not filtered: + return JSONResponse( + content={ + "success": False, + "message": "No valid cameras found in the request.", + }, + status_code=400, + ) + + camera_list = list(filtered) + else: + camera_list = allowed_cameras + + # Parse keep parameter + keep_set = set() + + if params.keep: + keep_set = set(params.keep.split(",")) + + # Build query to find overlapping recordings + clauses = [ + ( + Recordings.start_time.between(start, end) + | Recordings.end_time.between(start, end) + | ((start > Recordings.start_time) & (end < Recordings.end_time)) + ), + (Recordings.camera << camera_list), + ] + + keep_clauses = [] + + if "motion" in keep_set: + keep_clauses.append(Recordings.motion.is_null(False) & (Recordings.motion > 0)) + + if "object" in keep_set: + keep_clauses.append( + Recordings.objects.is_null(False) & (Recordings.objects > 0) + ) + + if "audio" in keep_set: + keep_clauses.append(Recordings.dBFS.is_null(False)) + + if keep_clauses: + keep_condition = reduce(operator.or_, keep_clauses) + clauses.append(~keep_condition) + + recordings_to_delete = ( + Recordings.select(Recordings.id, Recordings.path) + .where(reduce(operator.and_, clauses)) + .dicts() + .iterator() + ) + + recording_ids = [] + deleted_count = 0 + error_count = 0 + + for recording in recordings_to_delete: + recording_ids.append(recording["id"]) + + try: + Path(recording["path"]).unlink(missing_ok=True) + deleted_count += 1 + except Exception as e: + logger.error(f"Failed to delete recording file {recording['path']}: {e}") + error_count += 1 + + if recording_ids: + max_deletes = 100000 + recording_ids_list = list(recording_ids) + + for i in range(0, len(recording_ids_list), max_deletes): + Recordings.delete().where( + Recordings.id << recording_ids_list[i : i + max_deletes] + ).execute() + + message = f"Successfully deleted {deleted_count} recording(s)." + + if error_count > 0: + message += f" {error_count} file deletion error(s) occurred." + + return JSONResponse( + content={"success": True, "message": message}, + status_code=200, + ) diff --git a/frigate/api/review.py b/frigate/api/review.py index 76619dcb2..d2e8063d5 100644 --- a/frigate/api/review.py +++ b/frigate/api/review.py @@ -33,7 +33,6 @@ from frigate.api.defs.response.review_response import ( ReviewSummaryResponse, ) from frigate.api.defs.tags import Tags -from frigate.config import FrigateConfig from frigate.embeddings import EmbeddingsContext from frigate.models import Recordings, ReviewSegment, UserReviewStatus from frigate.review.types import SeverityEnum @@ -747,9 +746,7 @@ async def set_not_reviewed( description="Use GenAI to summarize review items over a period of time.", ) def generate_review_summary(request: Request, start_ts: float, end_ts: float): - config: FrigateConfig = request.app.frigate_config - - if not config.genai.provider: + if not request.app.genai_manager.vision_client: return JSONResponse( content=( { diff --git a/frigate/app.py b/frigate/app.py index fac7a08d9..0add3e3b8 100644 --- a/frigate/app.py +++ b/frigate/app.py @@ -43,10 +43,15 @@ from frigate.const import ( ) from frigate.data_processing.types import DataProcessorMetrics from frigate.db.sqlitevecq import SqliteVecQueueDatabase +from frigate.debug_replay import ( + DebugReplayManager, + cleanup_replay_cameras, +) from frigate.embeddings import EmbeddingProcess, EmbeddingsContext from frigate.events.audio import AudioProcessor from frigate.events.cleanup import EventCleanup from frigate.events.maintainer import EventProcessor +from frigate.jobs.motion_search import stop_all_motion_search_jobs from frigate.log import _stop_logging from frigate.models import ( Event, @@ -139,6 +144,9 @@ class FrigateApp: else: logger.debug(f"Skipping directory: {d}") + def init_debug_replay_manager(self) -> None: + self.replay_manager = DebugReplayManager() + def init_camera_metrics(self) -> None: # create camera_metrics for camera_name in self.config.cameras.keys(): @@ -531,6 +539,7 @@ class FrigateApp: set_file_limit() # Start frigate services. + self.init_debug_replay_manager() self.init_camera_metrics() self.init_queues() self.init_database() @@ -541,6 +550,10 @@ class FrigateApp: self.init_embeddings_manager() self.bind_database() self.check_db_data_migrations() + + # Clean up any stale replay camera artifacts (filesystem + DB) + cleanup_replay_cameras() + self.init_inter_process_communicator() self.start_detectors() self.init_dispatcher() @@ -572,6 +585,7 @@ class FrigateApp: self.stats_emitter, self.event_metadata_updater, self.inter_config_updater, + self.replay_manager, ), host="127.0.0.1", port=5001, @@ -586,6 +600,9 @@ class FrigateApp: # used by the docker healthcheck Path("/dev/shm/.frigate-is-stopping").touch() + # Cancel any running motion search jobs before setting stop_event + stop_all_motion_search_jobs() + self.stop_event.set() # set an end_time on entries without an end_time before exiting @@ -637,6 +654,7 @@ class FrigateApp: self.record_cleanup.join() self.stats_emitter.join() self.frigate_watchdog.join() + self.camera_maintainer.join() self.db.stop() # Save embeddings stats to disk diff --git a/frigate/camera/__init__.py b/frigate/camera/__init__.py index 77b1fd424..0461c98cb 100644 --- a/frigate/camera/__init__.py +++ b/frigate/camera/__init__.py @@ -19,6 +19,8 @@ class CameraMetrics: process_pid: Synchronized capture_process_pid: Synchronized ffmpeg_pid: Synchronized + reconnects_last_hour: Synchronized + stalls_last_hour: Synchronized def __init__(self, manager: SyncManager): self.camera_fps = manager.Value("d", 0) @@ -35,6 +37,8 @@ class CameraMetrics: self.process_pid = manager.Value("i", 0) self.capture_process_pid = manager.Value("i", 0) self.ffmpeg_pid = manager.Value("i", 0) + self.reconnects_last_hour = manager.Value("i", 0) + self.stalls_last_hour = manager.Value("i", 0) class PTZMetrics: diff --git a/frigate/camera/activity_manager.py b/frigate/camera/activity_manager.py index c2dfa891d..3f229e490 100644 --- a/frigate/camera/activity_manager.py +++ b/frigate/camera/activity_manager.py @@ -57,6 +57,9 @@ class CameraActivityManager: all_objects: list[dict[str, Any]] = [] for camera in new_activity.keys(): + if camera not in self.config.cameras: + continue + # handle cameras that were added dynamically if camera not in self.camera_all_object_counts: self.__init_camera(self.config.cameras[camera]) @@ -124,7 +127,11 @@ class CameraActivityManager: any_changed = False # run through each object and check what topics need to be updated - for label in self.config.cameras[camera].objects.track: + camera_config = self.config.cameras.get(camera) + if camera_config is None: + return + + for label in camera_config.objects.track: if label in self.config.model.non_logo_attributes: continue @@ -174,6 +181,9 @@ class AudioActivityManager: now = datetime.datetime.now().timestamp() for camera in new_activity.keys(): + if camera not in self.config.cameras: + continue + # handle cameras that were added dynamically if camera not in self.current_audio_detections: self.__init_camera(self.config.cameras[camera]) @@ -193,7 +203,11 @@ class AudioActivityManager: def compare_audio_activity( self, camera: str, new_detections: list[tuple[str, float]], now: float ) -> None: - max_not_heard = self.config.cameras[camera].audio.max_not_heard + camera_config = self.config.cameras.get(camera) + if camera_config is None: + return False + + max_not_heard = camera_config.audio.max_not_heard current = self.current_audio_detections[camera] any_changed = False @@ -222,6 +236,7 @@ class AudioActivityManager: None, "audio", {}, + None, ), EventMetadataTypeEnum.manual_event_create.value, ) diff --git a/frigate/camera/maintainer.py b/frigate/camera/maintainer.py index 815e650e9..9cfdcc7f3 100644 --- a/frigate/camera/maintainer.py +++ b/frigate/camera/maintainer.py @@ -55,8 +55,20 @@ class CameraMaintainer(threading.Thread): self.shm_count = self.__calculate_shm_frame_count() self.camera_processes: dict[str, mp.Process] = {} self.capture_processes: dict[str, mp.Process] = {} + self.camera_stop_events: dict[str, MpEvent] = {} self.metrics_manager = metrics_manager + def __ensure_camera_stop_event(self, camera: str) -> MpEvent: + camera_stop_event = self.camera_stop_events.get(camera) + + if camera_stop_event is None: + camera_stop_event = mp.Event() + self.camera_stop_events[camera] = camera_stop_event + else: + camera_stop_event.clear() + + return camera_stop_event + def __init_historical_regions(self) -> None: # delete region grids for removed or renamed cameras cameras = list(self.config.cameras.keys()) @@ -99,6 +111,8 @@ class CameraMaintainer(threading.Thread): logger.info(f"Camera processor not started for disabled camera {name}") return + camera_stop_event = self.__ensure_camera_stop_event(name) + if runtime: self.camera_metrics[name] = CameraMetrics(self.metrics_manager) self.ptz_metrics[name] = PTZMetrics(autotracker_enabled=False) @@ -135,7 +149,7 @@ class CameraMaintainer(threading.Thread): self.camera_metrics[name], self.ptz_metrics[name], self.region_grids[name], - self.stop_event, + camera_stop_event, self.config.logger, ) self.camera_processes[config.name] = camera_process @@ -150,6 +164,8 @@ class CameraMaintainer(threading.Thread): logger.info(f"Capture process not started for disabled camera {name}") return + camera_stop_event = self.__ensure_camera_stop_event(name) + # pre-create shms count = 10 if runtime else self.shm_count for i in range(count): @@ -160,7 +176,7 @@ class CameraMaintainer(threading.Thread): config, count, self.camera_metrics[name], - self.stop_event, + camera_stop_event, self.config.logger, ) capture_process.daemon = True @@ -170,18 +186,36 @@ class CameraMaintainer(threading.Thread): logger.info(f"Capture process started for {name}: {capture_process.pid}") def __stop_camera_capture_process(self, camera: str) -> None: - capture_process = self.capture_processes[camera] + capture_process = self.capture_processes.get(camera) if capture_process is not None: logger.info(f"Waiting for capture process for {camera} to stop") - capture_process.terminate() - capture_process.join() + camera_stop_event = self.camera_stop_events.get(camera) + + if camera_stop_event is not None: + camera_stop_event.set() + + capture_process.join(timeout=10) + if capture_process.is_alive(): + logger.warning( + f"Capture process for {camera} didn't exit, forcing termination" + ) + capture_process.terminate() + capture_process.join() def __stop_camera_process(self, camera: str) -> None: - camera_process = self.camera_processes[camera] + camera_process = self.camera_processes.get(camera) if camera_process is not None: logger.info(f"Waiting for process for {camera} to stop") - camera_process.terminate() - camera_process.join() + camera_stop_event = self.camera_stop_events.get(camera) + + if camera_stop_event is not None: + camera_stop_event.set() + + camera_process.join(timeout=10) + if camera_process.is_alive(): + logger.warning(f"Process for {camera} didn't exit, forcing termination") + camera_process.terminate() + camera_process.join() logger.info(f"Closing frame queue for {camera}") empty_and_close_queue(self.camera_metrics[camera].frame_queue) @@ -199,6 +233,12 @@ class CameraMaintainer(threading.Thread): for update_type, updated_cameras in updates.items(): if update_type == CameraConfigUpdateEnum.add.name: for camera in updated_cameras: + if ( + camera in self.camera_processes + or camera in self.capture_processes + ): + continue + self.__start_camera_processor( camera, self.update_subscriber.camera_configs[camera], @@ -210,15 +250,22 @@ class CameraMaintainer(threading.Thread): runtime=True, ) elif update_type == CameraConfigUpdateEnum.remove.name: - self.__stop_camera_capture_process(camera) - self.__stop_camera_process(camera) + for camera in updated_cameras: + self.__stop_camera_capture_process(camera) + self.__stop_camera_process(camera) + self.capture_processes.pop(camera, None) + self.camera_processes.pop(camera, None) + self.camera_stop_events.pop(camera, None) + self.region_grids.pop(camera, None) + self.camera_metrics.pop(camera, None) + self.ptz_metrics.pop(camera, None) # ensure the capture processes are done - for camera in self.camera_processes.keys(): + for camera in self.capture_processes.keys(): self.__stop_camera_capture_process(camera) # ensure the camera processors are done - for camera in self.capture_processes.keys(): + for camera in self.camera_processes.keys(): self.__stop_camera_process(camera) self.update_subscriber.stop() diff --git a/frigate/camera/state.py b/frigate/camera/state.py index 97c715388..eccdc41e8 100644 --- a/frigate/camera/state.py +++ b/frigate/camera/state.py @@ -65,7 +65,7 @@ class CameraState: frame_copy = cv2.cvtColor(frame_copy, cv2.COLOR_YUV2BGR_I420) # draw on the frame if draw_options.get("mask"): - mask_overlay = np.where(self.camera_config.motion.mask == [0]) + mask_overlay = np.where(self.camera_config.motion.rasterized_mask == [0]) frame_copy[mask_overlay] = [0, 0, 0] if draw_options.get("bounding_boxes"): @@ -197,6 +197,10 @@ class CameraState: if draw_options.get("zones"): for name, zone in self.camera_config.zones.items(): + # skip disabled zones + if not zone.enabled: + continue + thickness = ( 8 if any( diff --git a/frigate/comms/config_updater.py b/frigate/comms/config_updater.py index 447089a94..4552abc11 100644 --- a/frigate/comms/config_updater.py +++ b/frigate/comms/config_updater.py @@ -26,8 +26,8 @@ class ConfigPublisher: def stop(self) -> None: self.stop_event.set() - self.socket.close() - self.context.destroy() + self.socket.close(linger=0) + self.context.destroy(linger=0) class ConfigSubscriber: @@ -55,5 +55,5 @@ class ConfigSubscriber: return (None, None) def stop(self) -> None: - self.socket.close() - self.context.destroy() + self.socket.close(linger=0) + self.context.destroy(linger=0) diff --git a/frigate/comms/dispatcher.py b/frigate/comms/dispatcher.py index 6e45ac175..490a829dc 100644 --- a/frigate/comms/dispatcher.py +++ b/frigate/comms/dispatcher.py @@ -15,6 +15,7 @@ from frigate.config.camera.updater import ( CameraConfigUpdatePublisher, CameraConfigUpdateTopic, ) +from frigate.config.config import RuntimeFilterConfig, RuntimeMotionConfig from frigate.const import ( CLEAR_ONGOING_REVIEW_SEGMENTS, EXPIRE_AUDIO_ACTIVITY, @@ -28,6 +29,7 @@ from frigate.const import ( UPDATE_CAMERA_ACTIVITY, UPDATE_EMBEDDINGS_REINDEX_PROGRESS, UPDATE_EVENT_DESCRIPTION, + UPDATE_JOB_STATE, UPDATE_MODEL_STATE, UPDATE_REVIEW_DESCRIPTION, UPSERT_REVIEW_SEGMENT, @@ -60,6 +62,7 @@ class Dispatcher: self.camera_activity = CameraActivityManager(config, self.publish) self.audio_activity = AudioActivityManager(config, self.publish) self.model_state: dict[str, ModelStatusTypesEnum] = {} + self.job_state: dict[str, dict[str, Any]] = {} # {job_type: job_data} self.embeddings_reindex: dict[str, Any] = {} self.birdseye_layout: dict[str, Any] = {} self.audio_transcription_state: str = "idle" @@ -82,6 +85,9 @@ class Dispatcher: "review_detections": self._on_detections_command, "object_descriptions": self._on_object_description_command, "review_descriptions": self._on_review_description_command, + "motion_mask": self._on_motion_mask_command, + "object_mask": self._on_object_mask_command, + "zone": self._on_zone_command, } self._global_settings_handlers: dict[str, Callable] = { "notifications": self._on_global_notification_command, @@ -98,11 +104,23 @@ class Dispatcher: """Handle receiving of payload from communicators.""" def handle_camera_command( - command_type: str, camera_name: str, command: str, payload: str + command_type: str, + camera_name: str, + command: str, + payload: str, + sub_command: str | None = None, ) -> None: + if camera_name not in self.config.cameras: + return + try: if command_type == "set": - self._camera_settings_handlers[command](camera_name, payload) + if sub_command: + self._camera_settings_handlers[command]( + camera_name, sub_command, payload + ) + else: + self._camera_settings_handlers[command](camera_name, payload) elif command_type == "ptz": self._on_ptz_command(camera_name, payload) except KeyError: @@ -116,6 +134,9 @@ class Dispatcher: def handle_request_region_grid() -> Any: camera = payload + if camera not in self.config.cameras: + return None + grid = get_camera_regions_grid( camera, self.config.cameras[camera].detect, @@ -180,6 +201,19 @@ class Dispatcher: def handle_model_state() -> None: self.publish("model_state", json.dumps(self.model_state.copy())) + def handle_update_job_state() -> None: + if payload and isinstance(payload, dict): + job_type = payload.get("job_type") + if job_type: + self.job_state[job_type] = payload + self.publish( + "job_state", + json.dumps(self.job_state), + ) + + def handle_job_state() -> None: + self.publish("job_state", json.dumps(self.job_state.copy())) + def handle_update_audio_transcription_state() -> None: if payload: self.audio_transcription_state = payload @@ -215,7 +249,11 @@ class Dispatcher: self.publish("birdseye_layout", json.dumps(self.birdseye_layout.copy())) def handle_on_connect() -> None: - camera_status = self.camera_activity.last_camera_activity.copy() + camera_status = { + camera: status + for camera, status in self.camera_activity.last_camera_activity.copy().items() + if camera in self.config.cameras + } audio_detections = self.audio_activity.current_audio_detections.copy() cameras_with_status = camera_status.keys() @@ -277,6 +315,7 @@ class Dispatcher: UPDATE_EVENT_DESCRIPTION: handle_update_event_description, UPDATE_REVIEW_DESCRIPTION: handle_update_review_description, UPDATE_MODEL_STATE: handle_update_model_state, + UPDATE_JOB_STATE: handle_update_job_state, UPDATE_EMBEDDINGS_REINDEX_PROGRESS: handle_update_embeddings_reindex_progress, UPDATE_BIRDSEYE_LAYOUT: handle_update_birdseye_layout, UPDATE_AUDIO_TRANSCRIPTION_STATE: handle_update_audio_transcription_state, @@ -284,6 +323,7 @@ class Dispatcher: "restart": handle_restart, "embeddingsReindexProgress": handle_embeddings_reindex_progress, "modelState": handle_model_state, + "jobState": handle_job_state, "audioTranscriptionState": handle_audio_transcription_state, "birdseyeLayout": handle_birdseye_layout, "onConnect": handle_on_connect, @@ -297,6 +337,14 @@ class Dispatcher: camera_name = parts[-3] command = parts[-2] handle_camera_command("set", camera_name, command, payload) + elif len(parts) == 4 and topic.endswith("set"): + # example /cam_name/motion_mask/mask_name/set payload=ON|OFF + camera_name = parts[-4] + command = parts[-3] + sub_command = parts[-2] + handle_camera_command( + "set", camera_name, command, payload, sub_command + ) elif len(parts) == 2 and topic.endswith("set"): command = parts[-2] self._global_settings_handlers[command](payload) @@ -308,7 +356,8 @@ class Dispatcher: # example /cam_name/notifications/suspend payload=duration camera_name = parts[-3] command = parts[-2] - self._on_camera_notification_suspend(camera_name, payload) + if camera_name in self.config.cameras: + self._on_camera_notification_suspend(camera_name, payload) except IndexError: logger.error( f"Received invalid {topic.split('/')[-1]} command: {topic}" @@ -841,3 +890,149 @@ class Dispatcher: genai_settings, ) self.publish(f"{camera_name}/review_descriptions/state", payload, retain=True) + + def _on_motion_mask_command( + self, camera_name: str, mask_name: str, payload: str + ) -> None: + """Callback for motion mask topic.""" + if payload not in ["ON", "OFF"]: + logger.error(f"Invalid payload for motion mask {mask_name}: {payload}") + return + + motion_settings = self.config.cameras[camera_name].motion + + if mask_name not in motion_settings.mask: + logger.error(f"Unknown motion mask: {mask_name}") + return + + mask = motion_settings.mask[mask_name] + + if not mask: + logger.error(f"Motion mask {mask_name} is None") + return + + if payload == "ON": + if not mask.enabled_in_config: + logger.error( + f"Motion mask {mask_name} must be enabled in the config to be turned on via MQTT." + ) + return + + mask.enabled = payload == "ON" + + # Recreate RuntimeMotionConfig to update rasterized_mask + motion_settings = RuntimeMotionConfig( + frame_shape=self.config.cameras[camera_name].frame_shape, + **motion_settings.model_dump(exclude_unset=True), + ) + + # Update the dispatcher's own config + self.config.cameras[camera_name].motion = motion_settings + + self.config_updater.publish_update( + CameraConfigUpdateTopic(CameraConfigUpdateEnum.motion, camera_name), + motion_settings, + ) + self.publish( + f"{camera_name}/motion_mask/{mask_name}/state", payload, retain=True + ) + + def _on_object_mask_command( + self, camera_name: str, mask_name: str, payload: str + ) -> None: + """Callback for object mask topic.""" + if payload not in ["ON", "OFF"]: + logger.error(f"Invalid payload for object mask {mask_name}: {payload}") + return + + object_settings = self.config.cameras[camera_name].objects + + # Check if this is a global mask + mask_found = False + if mask_name in object_settings.mask: + mask = object_settings.mask[mask_name] + if mask: + if payload == "ON": + if not mask.enabled_in_config: + logger.error( + f"Object mask {mask_name} must be enabled in the config to be turned on via MQTT." + ) + return + mask.enabled = payload == "ON" + mask_found = True + + # Check if this is a per-object filter mask + for object_name, filter_config in object_settings.filters.items(): + if mask_name in filter_config.mask: + mask = filter_config.mask[mask_name] + if mask: + if payload == "ON": + if not mask.enabled_in_config: + logger.error( + f"Object mask {mask_name} must be enabled in the config to be turned on via MQTT." + ) + return + mask.enabled = payload == "ON" + mask_found = True + + if not mask_found: + logger.error(f"Unknown object mask: {mask_name}") + return + + # Recreate RuntimeFilterConfig for each object filter to update rasterized_mask + for object_name, filter_config in object_settings.filters.items(): + # Merge global object masks with per-object filter masks + merged_mask = dict(filter_config.mask) # Copy filter-specific masks + + # Add global object masks if they exist + if object_settings.mask: + for global_mask_id, global_mask_config in object_settings.mask.items(): + # Use a global prefix to avoid key collisions + global_mask_id_prefixed = f"global_{global_mask_id}" + merged_mask[global_mask_id_prefixed] = global_mask_config + + object_settings.filters[object_name] = RuntimeFilterConfig( + frame_shape=self.config.cameras[camera_name].frame_shape, + mask=merged_mask, + **filter_config.model_dump( + exclude_unset=True, exclude={"mask", "raw_mask"} + ), + ) + + # Update the dispatcher's own config + self.config.cameras[camera_name].objects = object_settings + + self.config_updater.publish_update( + CameraConfigUpdateTopic(CameraConfigUpdateEnum.objects, camera_name), + object_settings, + ) + self.publish( + f"{camera_name}/object_mask/{mask_name}/state", payload, retain=True + ) + + def _on_zone_command(self, camera_name: str, zone_name: str, payload: str) -> None: + """Callback for zone topic.""" + if payload not in ["ON", "OFF"]: + logger.error(f"Invalid payload for zone {zone_name}: {payload}") + return + + camera_config = self.config.cameras[camera_name] + + if zone_name not in camera_config.zones: + logger.error(f"Unknown zone: {zone_name}") + return + + if payload == "ON": + if not camera_config.zones[zone_name].enabled_in_config: + logger.error( + f"Zone {zone_name} must be enabled in the config to be turned on via MQTT." + ) + return + + camera_config.zones[zone_name].enabled = payload == "ON" + + self.config_updater.publish_update( + CameraConfigUpdateTopic(CameraConfigUpdateEnum.zones, camera_name), + camera_config.zones, + ) + self.publish(f"{camera_name}/zone/{zone_name}/state", payload, retain=True) diff --git a/frigate/comms/inter_process.py b/frigate/comms/inter_process.py index e4aad9107..5e76da5eb 100644 --- a/frigate/comms/inter_process.py +++ b/frigate/comms/inter_process.py @@ -61,8 +61,8 @@ class InterProcessCommunicator(Communicator): def stop(self) -> None: self.stop_event.set() self.reader_thread.join() - self.socket.close() - self.context.destroy() + self.socket.close(linger=0) + self.context.destroy(linger=0) class InterProcessRequestor: @@ -82,5 +82,5 @@ class InterProcessRequestor: return "" def stop(self) -> None: - self.socket.close() - self.context.destroy() + self.socket.close(linger=0) + self.context.destroy(linger=0) diff --git a/frigate/comms/mqtt.py b/frigate/comms/mqtt.py index 68ae698d9..9279b4388 100644 --- a/frigate/comms/mqtt.py +++ b/frigate/comms/mqtt.py @@ -133,6 +133,29 @@ class MqttClient(Communicator): retain=True, ) + for mask_name, motion_mask in camera.motion.mask.items(): + if motion_mask: + self.publish( + f"{camera_name}/motion_mask/{mask_name}/state", + "ON" if motion_mask.enabled else "OFF", + retain=True, + ) + + for mask_name, object_mask in camera.objects.mask.items(): + if object_mask: + self.publish( + f"{camera_name}/object_mask/{mask_name}/state", + "ON" if object_mask.enabled else "OFF", + retain=True, + ) + + for zone_name, zone in camera.zones.items(): + self.publish( + f"{camera_name}/zone/{zone_name}/state", + "ON" if zone.enabled else "OFF", + retain=True, + ) + if self.config.notifications.enabled_in_config: self.publish( "notifications/state", @@ -242,6 +265,24 @@ class MqttClient(Communicator): self.on_mqtt_command, ) + for mask_name in self.config.cameras[name].motion.mask.keys(): + self.client.message_callback_add( + f"{self.mqtt_config.topic_prefix}/{name}/motion_mask/{mask_name}/set", + self.on_mqtt_command, + ) + + for mask_name in self.config.cameras[name].objects.mask.keys(): + self.client.message_callback_add( + f"{self.mqtt_config.topic_prefix}/{name}/object_mask/{mask_name}/set", + self.on_mqtt_command, + ) + + for zone_name in self.config.cameras[name].zones.keys(): + self.client.message_callback_add( + f"{self.mqtt_config.topic_prefix}/{name}/zone/{zone_name}/set", + self.on_mqtt_command, + ) + if self.config.notifications.enabled_in_config: self.client.message_callback_add( f"{self.mqtt_config.topic_prefix}/notifications/set", diff --git a/frigate/comms/zmq_proxy.py b/frigate/comms/zmq_proxy.py index 29329ec59..4a4a0492a 100644 --- a/frigate/comms/zmq_proxy.py +++ b/frigate/comms/zmq_proxy.py @@ -43,7 +43,7 @@ class ZmqProxy: def stop(self) -> None: # destroying the context will tell the proxy to stop - self.context.destroy() + self.context.destroy(linger=0) self.runner.join() @@ -66,8 +66,8 @@ class Publisher(Generic[T]): self.socket.send_string(f"{self.topic}{sub_topic} {json.dumps(payload)}") def stop(self) -> None: - self.socket.close() - self.context.destroy() + self.socket.close(linger=0) + self.context.destroy(linger=0) class Subscriber(Generic[T]): @@ -96,8 +96,8 @@ class Subscriber(Generic[T]): return self._return_object("", None) def stop(self) -> None: - self.socket.close() - self.context.destroy() + self.socket.close(linger=0) + self.context.destroy(linger=0) def _return_object(self, topic: str, payload: T | None) -> T | None: return payload diff --git a/frigate/config/__init__.py b/frigate/config/__init__.py index c6ff535b0..88f7b79f9 100644 --- a/frigate/config/__init__.py +++ b/frigate/config/__init__.py @@ -8,6 +8,7 @@ from .config import * # noqa: F403 from .database import * # noqa: F403 from .logger import * # noqa: F403 from .mqtt import * # noqa: F403 +from .network import * # noqa: F403 from .proxy import * # noqa: F403 from .telemetry import * # noqa: F403 from .tls import * # noqa: F403 diff --git a/frigate/config/auth.py b/frigate/config/auth.py index 6935350a0..fccbfbaf2 100644 --- a/frigate/config/auth.py +++ b/frigate/config/auth.py @@ -8,39 +8,63 @@ __all__ = ["AuthConfig"] class AuthConfig(FrigateBaseModel): - enabled: bool = Field(default=True, title="Enable authentication") + enabled: bool = Field( + default=True, + title="Enable authentication", + description="Enable native authentication for the Frigate UI.", + ) reset_admin_password: bool = Field( - default=False, title="Reset the admin password on startup" + default=False, + title="Reset admin password", + description="If true, reset the admin user's password on startup and print the new password in logs.", ) cookie_name: str = Field( - default="frigate_token", title="Name for jwt token cookie", pattern=r"^[a-z_]+$" + default="frigate_token", + title="JWT cookie name", + description="Name of the cookie used to store the JWT token for native authentication.", + pattern=r"^[a-z_]+$", + ) + cookie_secure: bool = Field( + default=False, + title="Secure cookie flag", + description="Set the secure flag on the auth cookie; should be true when using TLS.", ) - cookie_secure: bool = Field(default=False, title="Set secure flag on cookie") session_length: int = Field( - default=86400, title="Session length for jwt session tokens", ge=60 + default=86400, + title="Session length", + description="Session duration in seconds for JWT-based sessions.", + ge=60, ) refresh_time: int = Field( default=1800, - title="Refresh the session if it is going to expire in this many seconds", + title="Session refresh window", + description="When a session is within this many seconds of expiring, refresh it back to full length.", ge=30, ) failed_login_rate_limit: Optional[str] = Field( default=None, - title="Rate limits for failed login attempts.", + title="Failed login limits", + description="Rate limiting rules for failed login attempts to reduce brute-force attacks.", ) trusted_proxies: list[str] = Field( default=[], - title="Trusted proxies for determining IP address to rate limit", + title="Trusted proxies", + description="List of trusted proxy IPs used when determining client IP for rate limiting.", ) # As of Feb 2023, OWASP recommends 600000 iterations for PBKDF2-SHA256 - hash_iterations: int = Field(default=600000, title="Password hash iterations") + hash_iterations: int = Field( + default=600000, + title="Hash iterations", + description="Number of PBKDF2-SHA256 iterations to use when hashing user passwords.", + ) roles: Dict[str, List[str]] = Field( default_factory=dict, - title="Role to camera mappings. Empty list grants access to all cameras.", + title="Role mappings", + description="Map roles to camera lists. An empty list grants access to all cameras for the role.", ) admin_first_time_login: Optional[bool] = Field( default=False, - title="Internal field to expose first-time admin login flag to the UI", + title="First-time admin flag", description=( "When true the UI may show a help link on the login page informing users how to sign in after an admin password reset. " ), diff --git a/frigate/config/camera/audio.py b/frigate/config/camera/audio.py index 3734455a2..6028802df 100644 --- a/frigate/config/camera/audio.py +++ b/frigate/config/camera/audio.py @@ -17,25 +17,45 @@ class AudioFilterConfig(FrigateBaseModel): default=0.8, ge=AUDIO_MIN_CONFIDENCE, lt=1.0, - title="Minimum detection confidence threshold for audio to be counted.", + title="Minimum audio confidence", + description="Minimum confidence threshold for the audio event to be counted.", ) class AudioConfig(FrigateBaseModel): - enabled: bool = Field(default=False, title="Enable audio events.") + enabled: bool = Field( + default=False, + title="Enable audio detection", + description="Enable or disable audio event detection for all cameras; can be overridden per-camera.", + ) max_not_heard: int = Field( - default=30, title="Seconds of not hearing the type of audio to end the event." + default=30, + title="End timeout", + description="Amount of seconds without the configured audio type before the audio event is ended.", ) min_volume: int = Field( - default=500, title="Min volume required to run audio detection." + default=500, + title="Minimum volume", + description="Minimum RMS volume threshold required to run audio detection; lower values increase sensitivity (e.g., 200 high, 500 medium, 1000 low).", ) listen: list[str] = Field( - default=DEFAULT_LISTEN_AUDIO, title="Audio to listen for." + default=DEFAULT_LISTEN_AUDIO, + title="Listen types", + description="List of audio event types to detect (for example: bark, fire_alarm, scream, speech, yell).", ) filters: Optional[dict[str, AudioFilterConfig]] = Field( - None, title="Audio filters." + None, + title="Audio filters", + description="Per-audio-type filter settings such as confidence thresholds used to reduce false positives.", ) enabled_in_config: Optional[bool] = Field( - None, title="Keep track of original state of audio detection." + None, + title="Original audio state", + description="Indicates whether audio detection was originally enabled in the static config file.", + ) + num_threads: int = Field( + default=2, + title="Detection threads", + description="Number of threads to use for audio detection processing.", + ge=1, ) - num_threads: int = Field(default=2, title="Number of detection threads", ge=1) diff --git a/frigate/config/camera/birdseye.py b/frigate/config/camera/birdseye.py index 1e6f0f335..32aa66a98 100644 --- a/frigate/config/camera/birdseye.py +++ b/frigate/config/camera/birdseye.py @@ -29,45 +29,88 @@ class BirdseyeModeEnum(str, Enum): class BirdseyeLayoutConfig(FrigateBaseModel): scaling_factor: float = Field( - default=2.0, title="Birdseye Scaling Factor", ge=1.0, le=5.0 + default=2.0, + title="Scaling factor", + description="Scaling factor used by the layout calculator (range 1.0 to 5.0).", + ge=1.0, + le=5.0, + ) + max_cameras: Optional[int] = Field( + default=None, + title="Max cameras", + description="Maximum number of cameras to display at once in Birdseye; shows the most recent cameras.", ) - max_cameras: Optional[int] = Field(default=None, title="Max cameras") class BirdseyeConfig(FrigateBaseModel): - enabled: bool = Field(default=True, title="Enable birdseye view.") + enabled: bool = Field( + default=True, + title="Enable Birdseye", + description="Enable or disable the Birdseye view feature.", + ) mode: BirdseyeModeEnum = Field( - default=BirdseyeModeEnum.objects, title="Tracking mode." + default=BirdseyeModeEnum.objects, + title="Tracking mode", + description="Mode for including cameras in Birdseye: 'objects', 'motion', or 'continuous'.", ) - restream: bool = Field(default=False, title="Restream birdseye via RTSP.") - width: int = Field(default=1280, title="Birdseye width.") - height: int = Field(default=720, title="Birdseye height.") + restream: bool = Field( + default=False, + title="Restream RTSP", + description="Re-stream the Birdseye output as an RTSP feed; enabling this will keep Birdseye running continuously.", + ) + width: int = Field( + default=1280, + title="Width", + description="Output width (pixels) of the composed Birdseye frame.", + ) + height: int = Field( + default=720, + title="Height", + description="Output height (pixels) of the composed Birdseye frame.", + ) quality: int = Field( default=8, - title="Encoding quality.", + title="Encoding quality", + description="Encoding quality for the Birdseye mpeg1 feed (1 highest quality, 31 lowest).", ge=1, le=31, ) inactivity_threshold: int = Field( - default=30, title="Birdseye Inactivity Threshold", gt=0 + default=30, + title="Inactivity threshold", + description="Seconds of inactivity after which a camera will stop being shown in Birdseye.", + gt=0, ) layout: BirdseyeLayoutConfig = Field( - default_factory=BirdseyeLayoutConfig, title="Birdseye Layout Config" + default_factory=BirdseyeLayoutConfig, + title="Layout", + description="Layout options for the Birdseye composition.", ) idle_heartbeat_fps: float = Field( default=0.0, ge=0.0, le=10.0, - title="Idle heartbeat FPS (0 disables, max 10)", + title="Idle heartbeat FPS", + description="Frames-per-second to resend the last composed Birdseye frame when idle; set to 0 to disable.", ) # uses BaseModel because some global attributes are not available at the camera level class BirdseyeCameraConfig(BaseModel): - enabled: bool = Field(default=True, title="Enable birdseye view for camera.") + enabled: bool = Field( + default=True, + title="Enable Birdseye", + description="Enable or disable the Birdseye view feature.", + ) mode: BirdseyeModeEnum = Field( - default=BirdseyeModeEnum.objects, title="Tracking mode for camera." + default=BirdseyeModeEnum.objects, + title="Tracking mode", + description="Mode for including cameras in Birdseye: 'objects', 'motion', or 'continuous'.", ) - order: int = Field(default=0, title="Position of the camera in the birdseye view.") + order: int = Field( + default=0, + title="Position", + description="Numeric position controlling the camera's ordering in the Birdseye layout.", + ) diff --git a/frigate/config/camera/camera.py b/frigate/config/camera/camera.py index 0f2b1c8be..21397065b 100644 --- a/frigate/config/camera/camera.py +++ b/frigate/config/camera/camera.py @@ -50,10 +50,17 @@ class CameraTypeEnum(str, Enum): class CameraConfig(FrigateBaseModel): - name: Optional[str] = Field(None, title="Camera name.", pattern=REGEX_CAMERA_NAME) + name: Optional[str] = Field( + None, + title="Camera name", + description="Camera name is required", + pattern=REGEX_CAMERA_NAME, + ) friendly_name: Optional[str] = Field( - None, title="Camera friendly name used in the Frigate UI." + None, + title="Friendly name", + description="Camera friendly name used in the Frigate UI", ) @model_validator(mode="before") @@ -63,80 +70,129 @@ class CameraConfig(FrigateBaseModel): pass return values - enabled: bool = Field(default=True, title="Enable camera.") + enabled: bool = Field(default=True, title="Enabled", description="Enabled") # Options with global fallback audio: AudioConfig = Field( - default_factory=AudioConfig, title="Audio events configuration." + default_factory=AudioConfig, + title="Audio events", + description="Settings for audio-based event detection for this camera.", ) audio_transcription: CameraAudioTranscriptionConfig = Field( default_factory=CameraAudioTranscriptionConfig, - title="Audio transcription config.", + title="Audio transcription", + description="Settings for live and speech audio transcription used for events and live captions.", ) birdseye: BirdseyeCameraConfig = Field( - default_factory=BirdseyeCameraConfig, title="Birdseye camera configuration." + default_factory=BirdseyeCameraConfig, + title="Birdseye", + description="Settings for the Birdseye composite view that composes multiple camera feeds into a single layout.", ) detect: DetectConfig = Field( - default_factory=DetectConfig, title="Object detection configuration." + default_factory=DetectConfig, + title="Object Detection", + description="Settings for the detection/detect role used to run object detection and initialize trackers.", ) face_recognition: CameraFaceRecognitionConfig = Field( - default_factory=CameraFaceRecognitionConfig, title="Face recognition config." + default_factory=CameraFaceRecognitionConfig, + title="Face recognition", + description="Settings for face detection and recognition for this camera.", + ) + ffmpeg: CameraFfmpegConfig = Field( + title="FFmpeg", + description="FFmpeg settings including binary path, args, hwaccel options, and per-role output args.", ) - ffmpeg: CameraFfmpegConfig = Field(title="FFmpeg configuration for the camera.") live: CameraLiveConfig = Field( - default_factory=CameraLiveConfig, title="Live playback settings." + default_factory=CameraLiveConfig, + title="Live playback", + description="Settings used by the Web UI to control live stream selection, resolution and quality.", ) lpr: CameraLicensePlateRecognitionConfig = Field( - default_factory=CameraLicensePlateRecognitionConfig, title="LPR config." + default_factory=CameraLicensePlateRecognitionConfig, + title="License Plate Recognition", + description="License plate recognition settings including detection thresholds, formatting, and known plates.", + ) + motion: MotionConfig = Field( + None, + title="Motion detection", + description="Default motion detection settings for this camera.", ) - motion: MotionConfig = Field(None, title="Motion detection configuration.") objects: ObjectConfig = Field( - default_factory=ObjectConfig, title="Object configuration." + default_factory=ObjectConfig, + title="Objects", + description="Object tracking defaults including which labels to track and per-object filters.", ) record: RecordConfig = Field( - default_factory=RecordConfig, title="Record configuration." + default_factory=RecordConfig, + title="Recording", + description="Recording and retention settings for this camera.", ) review: ReviewConfig = Field( - default_factory=ReviewConfig, title="Review configuration." + default_factory=ReviewConfig, + title="Review", + description="Settings that control alerts, detections, and GenAI review summaries used by the UI and storage for this camera.", ) semantic_search: CameraSemanticSearchConfig = Field( default_factory=CameraSemanticSearchConfig, - title="Semantic search configuration.", + title="Semantic Search", + description="Settings for semantic search which builds and queries object embeddings to find similar items.", ) snapshots: SnapshotsConfig = Field( - default_factory=SnapshotsConfig, title="Snapshot configuration." + default_factory=SnapshotsConfig, + title="Snapshots", + description="Settings for saved JPEG snapshots of tracked objects for this camera.", ) timestamp_style: TimestampStyleConfig = Field( - default_factory=TimestampStyleConfig, title="Timestamp style configuration." + default_factory=TimestampStyleConfig, + title="Timestamp style", + description="Styling options for in-feed timestamps applied to recordings and snapshots.", ) # Options without global fallback best_image_timeout: int = Field( default=60, - title="How long to wait for the image with the highest confidence score.", + title="Best image timeout", + description="How long to wait for the image with the highest confidence score.", ) mqtt: CameraMqttConfig = Field( - default_factory=CameraMqttConfig, title="MQTT configuration." + default_factory=CameraMqttConfig, + title="MQTT", + description="MQTT image publishing settings.", ) notifications: NotificationConfig = Field( - default_factory=NotificationConfig, title="Notifications configuration." + default_factory=NotificationConfig, + title="Notifications", + description="Settings to enable and control notifications for this camera.", ) onvif: OnvifConfig = Field( - default_factory=OnvifConfig, title="Camera Onvif Configuration." + default_factory=OnvifConfig, + title="ONVIF", + description="ONVIF connection and PTZ autotracking settings for this camera.", + ) + type: CameraTypeEnum = Field( + default=CameraTypeEnum.generic, + title="Camera type", + description="Camera Type", ) - type: CameraTypeEnum = Field(default=CameraTypeEnum.generic, title="Camera Type") ui: CameraUiConfig = Field( - default_factory=CameraUiConfig, title="Camera UI Modifications." + default_factory=CameraUiConfig, + title="Camera UI", + description="Display ordering and visibility for this camera in the UI. Ordering affects the default dashboard. For more granular control, use camera groups.", ) webui_url: Optional[str] = Field( None, - title="URL to visit the camera directly from system page", + title="Camera URL", + description="URL to visit the camera directly from system page", ) zones: dict[str, ZoneConfig] = Field( - default_factory=dict, title="Zone configuration." + default_factory=dict, + title="Zones", + description="Zones allow you to define a specific area of the frame so you can determine whether or not an object is within a particular area.", ) enabled_in_config: Optional[bool] = Field( - default=None, title="Keep track of original state of camera." + default=None, + title="Original camera state", + description="Keep track of original state of camera.", ) _ffmpeg_cmds: list[dict[str, list[str]]] = PrivateAttr() diff --git a/frigate/config/camera/detect.py b/frigate/config/camera/detect.py index 1926f3254..19ba670a6 100644 --- a/frigate/config/camera/detect.py +++ b/frigate/config/camera/detect.py @@ -8,56 +8,82 @@ __all__ = ["DetectConfig", "StationaryConfig", "StationaryMaxFramesConfig"] class StationaryMaxFramesConfig(FrigateBaseModel): - default: Optional[int] = Field(default=None, title="Default max frames.", ge=1) + default: Optional[int] = Field( + default=None, + title="Default max frames", + description="Default maximum frames to track a stationary object before stopping.", + ge=1, + ) objects: dict[str, int] = Field( - default_factory=dict, title="Object specific max frames." + default_factory=dict, + title="Object max frames", + description="Per-object overrides for maximum frames to track stationary objects.", ) class StationaryConfig(FrigateBaseModel): interval: Optional[int] = Field( default=None, - title="Frame interval for checking stationary objects.", + title="Stationary interval", + description="How often (in frames) to run a detection check to confirm a stationary object.", gt=0, ) threshold: Optional[int] = Field( default=None, - title="Number of frames without a position change for an object to be considered stationary", + title="Stationary threshold", + description="Number of frames with no position change required to mark an object as stationary.", ge=1, ) max_frames: StationaryMaxFramesConfig = Field( default_factory=StationaryMaxFramesConfig, - title="Max frames for stationary objects.", + title="Max frames", + description="Limits how long stationary objects are tracked before being discarded.", ) classifier: bool = Field( default=True, - title="Enable visual classifier for determing if objects with jittery bounding boxes are stationary.", + title="Enable visual classifier", + description="Use a visual classifier to detect truly stationary objects even when bounding boxes jitter.", ) class DetectConfig(FrigateBaseModel): - enabled: bool = Field(default=False, title="Detection Enabled.") + enabled: bool = Field( + default=False, + title="Detection enabled", + description="Enable or disable object detection for all cameras; can be overridden per-camera. Detection must be enabled for object tracking to run.", + ) height: Optional[int] = Field( - default=None, title="Height of the stream for the detect role." + default=None, + title="Detect height", + description="Height (pixels) of frames used for the detect stream; leave empty to use the native stream resolution.", ) width: Optional[int] = Field( - default=None, title="Width of the stream for the detect role." + default=None, + title="Detect width", + description="Width (pixels) of frames used for the detect stream; leave empty to use the native stream resolution.", ) fps: int = Field( - default=5, title="Number of frames per second to process through detection." + default=5, + title="Detect FPS", + description="Desired frames per second to run detection on; lower values reduce CPU usage (recommended value is 5, only set higher - at most 10 - if tracking extremely fast moving objects).", ) min_initialized: Optional[int] = Field( default=None, - title="Minimum number of consecutive hits for an object to be initialized by the tracker.", + title="Minimum initialization frames", + description="Number of consecutive detection hits required before creating a tracked object. Increase to reduce false initializations. Default value is fps divided by 2.", ) max_disappeared: Optional[int] = Field( default=None, - title="Maximum number of frames the object can disappear before detection ends.", + title="Maximum disappeared frames", + description="Number of frames without a detection before a tracked object is considered gone.", ) stationary: StationaryConfig = Field( default_factory=StationaryConfig, - title="Stationary objects config.", + title="Stationary objects config", + description="Settings to detect and manage objects that remain stationary for a period of time.", ) annotation_offset: int = Field( - default=0, title="Milliseconds to offset detect annotations by." + default=0, + title="Annotation offset", + description="Milliseconds to shift detect annotations to better align timeline bounding boxes with recordings; can be positive or negative.", ) diff --git a/frigate/config/camera/ffmpeg.py b/frigate/config/camera/ffmpeg.py index 2c1e4cdca..05769dc66 100644 --- a/frigate/config/camera/ffmpeg.py +++ b/frigate/config/camera/ffmpeg.py @@ -35,39 +35,58 @@ DETECT_FFMPEG_OUTPUT_ARGS_DEFAULT = [ class FfmpegOutputArgsConfig(FrigateBaseModel): detect: Union[str, list[str]] = Field( default=DETECT_FFMPEG_OUTPUT_ARGS_DEFAULT, - title="Detect role FFmpeg output arguments.", + title="Detect output arguments", + description="Default output arguments for detect role streams.", ) record: Union[str, list[str]] = Field( default=RECORD_FFMPEG_OUTPUT_ARGS_DEFAULT, - title="Record role FFmpeg output arguments.", + title="Record output arguments", + description="Default output arguments for record role streams.", ) class FfmpegConfig(FrigateBaseModel): - path: str = Field(default="default", title="FFmpeg path") + path: str = Field( + default="default", + title="FFmpeg path", + description='Path to the FFmpeg binary to use or a version alias ("5.0" or "7.0").', + ) global_args: Union[str, list[str]] = Field( - default=FFMPEG_GLOBAL_ARGS_DEFAULT, title="Global FFmpeg arguments." + default=FFMPEG_GLOBAL_ARGS_DEFAULT, + title="FFmpeg global arguments", + description="Global arguments passed to FFmpeg processes.", ) hwaccel_args: Union[str, list[str]] = Field( - default="auto", title="FFmpeg hardware acceleration arguments." + default="auto", + title="Hardware acceleration arguments", + description="Hardware acceleration arguments for FFmpeg. Provider-specific presets are recommended.", ) input_args: Union[str, list[str]] = Field( - default=FFMPEG_INPUT_ARGS_DEFAULT, title="FFmpeg input arguments." + default=FFMPEG_INPUT_ARGS_DEFAULT, + title="Input arguments", + description="Input arguments applied to FFmpeg input streams.", ) output_args: FfmpegOutputArgsConfig = Field( default_factory=FfmpegOutputArgsConfig, - title="FFmpeg output arguments per role.", + title="Output arguments", + description="Default output arguments used for different FFmpeg roles such as detect and record.", ) retry_interval: float = Field( default=10.0, - title="Time in seconds to wait before FFmpeg retries connecting to the camera.", + title="FFmpeg retry time", + description="Seconds to wait before attempting to reconnect a camera stream after failure. Default is 10.", gt=0.0, ) apple_compatibility: bool = Field( default=False, - title="Set tag on HEVC (H.265) recording stream to improve compatibility with Apple players.", + title="Apple compatibility", + description="Enable HEVC tagging for better Apple player compatibility when recording H.265.", + ) + gpu: int = Field( + default=0, + title="GPU index", + description="Default GPU index used for hardware acceleration if available.", ) - gpu: int = Field(default=0, title="GPU index to use for hardware acceleration.") @property def ffmpeg_path(self) -> str: @@ -95,21 +114,36 @@ class CameraRoleEnum(str, Enum): class CameraInput(FrigateBaseModel): - path: EnvString = Field(title="Camera input path.") - roles: list[CameraRoleEnum] = Field(title="Roles assigned to this input.") + path: EnvString = Field( + title="Input path", + description="Camera input stream URL or path.", + ) + roles: list[CameraRoleEnum] = Field( + title="Input roles", + description="Roles for this input stream.", + ) global_args: Union[str, list[str]] = Field( - default_factory=list, title="FFmpeg global arguments." + default_factory=list, + title="FFmpeg global arguments", + description="FFmpeg global arguments for this input stream.", ) hwaccel_args: Union[str, list[str]] = Field( - default_factory=list, title="FFmpeg hardware acceleration arguments." + default_factory=list, + title="Hardware acceleration arguments", + description="Hardware acceleration arguments for this input stream.", ) input_args: Union[str, list[str]] = Field( - default_factory=list, title="FFmpeg input arguments." + default_factory=list, + title="Input arguments", + description="Input arguments specific to this stream.", ) class CameraFfmpegConfig(FfmpegConfig): - inputs: list[CameraInput] = Field(title="Camera inputs.") + inputs: list[CameraInput] = Field( + title="Camera inputs", + description="List of input stream definitions (paths and roles) for this camera.", + ) @field_validator("inputs") @classmethod diff --git a/frigate/config/camera/genai.py b/frigate/config/camera/genai.py index a4d9199af..fae0ae577 100644 --- a/frigate/config/camera/genai.py +++ b/frigate/config/camera/genai.py @@ -6,7 +6,7 @@ from pydantic import Field from ..base import FrigateBaseModel from ..env import EnvString -__all__ = ["GenAIConfig", "GenAIProviderEnum"] +__all__ = ["GenAIConfig", "GenAIProviderEnum", "GenAIRoleEnum"] class GenAIProviderEnum(str, Enum): @@ -14,18 +14,56 @@ class GenAIProviderEnum(str, Enum): azure_openai = "azure_openai" gemini = "gemini" ollama = "ollama" + llamacpp = "llamacpp" + + +class GenAIRoleEnum(str, Enum): + tools = "tools" + vision = "vision" + embeddings = "embeddings" class GenAIConfig(FrigateBaseModel): """Primary GenAI Config to define GenAI Provider.""" - api_key: Optional[EnvString] = Field(default=None, title="Provider API key.") - base_url: Optional[str] = Field(default=None, title="Provider base url.") - model: str = Field(default="gpt-4o", title="GenAI model.") - provider: GenAIProviderEnum | None = Field(default=None, title="GenAI provider.") + api_key: Optional[EnvString] = Field( + default=None, + title="API key", + description="API key required by some providers (can also be set via environment variables).", + ) + base_url: Optional[str] = Field( + default=None, + title="Base URL", + description="Base URL for self-hosted or compatible providers (for example an Ollama instance).", + ) + model: str = Field( + default="gpt-4o", + title="Model", + description="The model to use from the provider for generating descriptions or summaries.", + ) + provider: GenAIProviderEnum | None = Field( + default=None, + title="Provider", + description="The GenAI provider to use (for example: ollama, gemini, openai).", + ) + roles: list[GenAIRoleEnum] = Field( + default_factory=lambda: [ + GenAIRoleEnum.embeddings, + GenAIRoleEnum.vision, + GenAIRoleEnum.tools, + ], + title="Roles", + description="GenAI roles (tools, vision, embeddings); one provider per role.", + ) provider_options: dict[str, Any] = Field( - default={}, title="GenAI Provider extra options." + default={}, + title="Provider options", + description="Additional provider-specific options to pass to the GenAI client.", + json_schema_extra={"additionalProperties": {"type": "string"}}, ) runtime_options: dict[str, Any] = Field( - default={}, title="Options to pass during inference calls." + default={}, + title="Runtime options", + description="Runtime options passed to the provider for each inference call.", + json_schema_extra={"additionalProperties": {"type": "string"}}, ) diff --git a/frigate/config/camera/live.py b/frigate/config/camera/live.py index 13ae2d04f..54b5a2bfd 100644 --- a/frigate/config/camera/live.py +++ b/frigate/config/camera/live.py @@ -10,7 +10,18 @@ __all__ = ["CameraLiveConfig"] class CameraLiveConfig(FrigateBaseModel): streams: Dict[str, str] = Field( default_factory=list, - title="Friendly names and restream names to use for live view.", + title="Live stream names", + description="Mapping of configured stream names to restream/go2rtc names used for live playback.", + ) + height: int = Field( + default=720, + title="Live height", + description="Height (pixels) to render the jsmpeg live stream in the Web UI; must be <= detect stream height.", + ) + quality: int = Field( + default=8, + ge=1, + le=31, + title="Live quality", + description="Encoding quality for the jsmpeg stream (1 highest, 31 lowest).", ) - height: int = Field(default=720, title="Live camera view height") - quality: int = Field(default=8, ge=1, le=31, title="Live camera view quality") diff --git a/frigate/config/camera/mask.py b/frigate/config/camera/mask.py new file mode 100644 index 000000000..dbe0f063c --- /dev/null +++ b/frigate/config/camera/mask.py @@ -0,0 +1,85 @@ +"""Mask configuration for motion and object masks.""" + +from typing import Any, Optional, Union + +from pydantic import Field, field_serializer + +from ..base import FrigateBaseModel + +__all__ = ["MotionMaskConfig", "ObjectMaskConfig"] + + +class MotionMaskConfig(FrigateBaseModel): + """Configuration for a single motion mask.""" + + friendly_name: Optional[str] = Field( + default=None, + title="Friendly name", + description="A friendly name for this motion mask used in the Frigate UI", + ) + enabled: bool = Field( + default=True, + title="Enabled", + description="Enable or disable this motion mask", + ) + coordinates: Union[str, list[str]] = Field( + default="", + title="Coordinates", + description="Ordered x,y coordinates defining the motion mask polygon used to include/exclude areas.", + ) + raw_coordinates: Union[str, list[str]] = "" + enabled_in_config: Optional[bool] = Field( + default=None, title="Keep track of original state of motion mask." + ) + + def get_formatted_name(self, mask_id: str) -> str: + """Return the friendly name if set, otherwise return a formatted version of the mask ID.""" + if self.friendly_name: + return self.friendly_name + return mask_id.replace("_", " ").title() + + @field_serializer("coordinates", when_used="json") + def serialize_coordinates(self, value: Any, info): + return self.raw_coordinates if self.raw_coordinates else value + + @field_serializer("raw_coordinates", when_used="json") + def serialize_raw_coordinates(self, value: Any, info): + return None + + +class ObjectMaskConfig(FrigateBaseModel): + """Configuration for a single object mask.""" + + friendly_name: Optional[str] = Field( + default=None, + title="Friendly name", + description="A friendly name for this object mask used in the Frigate UI", + ) + enabled: bool = Field( + default=True, + title="Enabled", + description="Enable or disable this object mask", + ) + coordinates: Union[str, list[str]] = Field( + default="", + title="Coordinates", + description="Ordered x,y coordinates defining the object mask polygon used to include/exclude areas.", + ) + raw_coordinates: Union[str, list[str]] = "" + enabled_in_config: Optional[bool] = Field( + default=None, title="Keep track of original state of object mask." + ) + + @field_serializer("coordinates", when_used="json") + def serialize_coordinates(self, value: Any, info): + return self.raw_coordinates if self.raw_coordinates else value + + @field_serializer("raw_coordinates", when_used="json") + def serialize_raw_coordinates(self, value: Any, info): + return None + + def get_formatted_name(self, mask_id: str) -> str: + """Return the friendly name if set, otherwise return a formatted version of the mask ID.""" + if self.friendly_name: + return self.friendly_name + return mask_id.replace("_", " ").title() diff --git a/frigate/config/camera/motion.py b/frigate/config/camera/motion.py index 65c03f731..ebba8613c 100644 --- a/frigate/config/camera/motion.py +++ b/frigate/config/camera/motion.py @@ -1,43 +1,89 @@ -from typing import Any, Optional, Union +from typing import Any, Optional from pydantic import Field, field_serializer from ..base import FrigateBaseModel +from .mask import MotionMaskConfig __all__ = ["MotionConfig"] class MotionConfig(FrigateBaseModel): - enabled: bool = Field(default=True, title="Enable motion on all cameras.") + enabled: bool = Field( + default=True, + title="Enable motion detection", + description="Enable or disable motion detection for all cameras; can be overridden per-camera.", + ) threshold: int = Field( default=30, - title="Motion detection threshold (1-255).", + title="Motion threshold", + description="Pixel difference threshold used by the motion detector; higher values reduce sensitivity (range 1-255).", ge=1, le=255, ) lightning_threshold: float = Field( - default=0.8, title="Lightning detection threshold (0.3-1.0).", ge=0.3, le=1.0 + default=0.8, + title="Lightning threshold", + description="Threshold to detect and ignore brief lighting spikes (lower is more sensitive, values between 0.3 and 1.0). This does not prevent motion detection entirely; it merely causes the detector to stop analyzing additional frames once the threshold is exceeded. Motion-based recordings are still created during these events.", + ge=0.3, + le=1.0, ) - improve_contrast: bool = Field(default=True, title="Improve Contrast") - contour_area: Optional[int] = Field(default=10, title="Contour Area") - delta_alpha: float = Field(default=0.2, title="Delta Alpha") - frame_alpha: float = Field(default=0.01, title="Frame Alpha") - frame_height: Optional[int] = Field(default=100, title="Frame Height") - mask: Union[str, list[str]] = Field( - default="", title="Coordinates polygon for the motion mask." + skip_motion_threshold: Optional[float] = Field( + default=None, + title="Skip motion threshold", + description="If set to a value between 0.0 and 1.0, and more than this fraction of the image changes in a single frame, the detector will return no motion boxes and immediately recalibrate. This can save CPU and reduce false positives during lightning, storms, etc., but may miss real events such as a PTZ camera auto‑tracking an object. The trade‑off is between dropping a few megabytes of recordings versus reviewing a couple short clips. Leave unset (None) to disable this feature.", + ge=0.0, + le=1.0, + ) + improve_contrast: bool = Field( + default=True, + title="Improve contrast", + description="Apply contrast improvement to frames before motion analysis to help detection.", + ) + contour_area: Optional[int] = Field( + default=10, + title="Contour area", + description="Minimum contour area in pixels required for a motion contour to be counted.", + ) + delta_alpha: float = Field( + default=0.2, + title="Delta alpha", + description="Alpha blending factor used in frame differencing for motion calculation.", + ) + frame_alpha: float = Field( + default=0.01, + title="Frame alpha", + description="Alpha value used when blending frames for motion preprocessing.", + ) + frame_height: Optional[int] = Field( + default=100, + title="Frame height", + description="Height in pixels to scale frames to when computing motion.", + ) + mask: dict[str, Optional[MotionMaskConfig]] = Field( + default_factory=dict, + title="Mask coordinates", + description="Ordered x,y coordinates defining the motion mask polygon used to include/exclude areas.", ) mqtt_off_delay: int = Field( default=30, - title="Delay for updating MQTT with no motion detected.", + title="MQTT off delay", + description="Seconds to wait after last motion before publishing an MQTT 'off' state.", ) enabled_in_config: Optional[bool] = Field( - default=None, title="Keep track of original state of motion detection." + default=None, + title="Original motion state", + description="Indicates whether motion detection was enabled in the original static configuration.", + ) + raw_mask: dict[str, Optional[MotionMaskConfig]] = Field( + default_factory=dict, exclude=True ) - raw_mask: Union[str, list[str]] = "" @field_serializer("mask", when_used="json") def serialize_mask(self, value: Any, info): - return self.raw_mask + if self.raw_mask: + return self.raw_mask + return value @field_serializer("raw_mask", when_used="json") def serialize_raw_mask(self, value: Any, info): diff --git a/frigate/config/camera/mqtt.py b/frigate/config/camera/mqtt.py index 132fee059..5f8da1a73 100644 --- a/frigate/config/camera/mqtt.py +++ b/frigate/config/camera/mqtt.py @@ -6,18 +6,40 @@ __all__ = ["CameraMqttConfig"] class CameraMqttConfig(FrigateBaseModel): - enabled: bool = Field(default=True, title="Send image over MQTT.") - timestamp: bool = Field(default=True, title="Add timestamp to MQTT image.") - bounding_box: bool = Field(default=True, title="Add bounding box to MQTT image.") - crop: bool = Field(default=True, title="Crop MQTT image to detected object.") - height: int = Field(default=270, title="MQTT image height.") + enabled: bool = Field( + default=True, + title="Send image", + description="Enable publishing image snapshots for objects to MQTT topics for this camera.", + ) + timestamp: bool = Field( + default=True, + title="Add timestamp", + description="Overlay a timestamp on images published to MQTT.", + ) + bounding_box: bool = Field( + default=True, + title="Add bounding box", + description="Draw bounding boxes on images published over MQTT.", + ) + crop: bool = Field( + default=True, + title="Crop image", + description="Crop images published to MQTT to the detected object's bounding box.", + ) + height: int = Field( + default=270, + title="Image height", + description="Height (pixels) to resize images published over MQTT.", + ) required_zones: list[str] = Field( default_factory=list, - title="List of required zones to be entered in order to send the image.", + title="Required zones", + description="Zones that an object must enter for an MQTT image to be published.", ) quality: int = Field( default=70, - title="Quality of the encoded jpeg (0-100).", + title="JPEG quality", + description="JPEG quality for images published to MQTT (0-100).", ge=0, le=100, ) diff --git a/frigate/config/camera/notification.py b/frigate/config/camera/notification.py index ce1ac8223..dabf94675 100644 --- a/frigate/config/camera/notification.py +++ b/frigate/config/camera/notification.py @@ -8,11 +8,24 @@ __all__ = ["NotificationConfig"] class NotificationConfig(FrigateBaseModel): - enabled: bool = Field(default=False, title="Enable notifications") - email: Optional[str] = Field(default=None, title="Email required for push.") + enabled: bool = Field( + default=False, + title="Enable notifications", + description="Enable or disable notifications for all cameras; can be overridden per-camera.", + ) + email: Optional[str] = Field( + default=None, + title="Notification email", + description="Email address used for push notifications or required by certain notification providers.", + ) cooldown: int = Field( - default=0, ge=0, title="Cooldown period for notifications (time in seconds)." + default=0, + ge=0, + title="Cooldown period", + description="Cooldown (seconds) between notifications to avoid spamming recipients.", ) enabled_in_config: Optional[bool] = Field( - default=None, title="Keep track of original state of notifications." + default=None, + title="Original notifications state", + description="Indicates whether notifications were enabled in the original static configuration.", ) diff --git a/frigate/config/camera/objects.py b/frigate/config/camera/objects.py index 7b6317dd0..e93778f23 100644 --- a/frigate/config/camera/objects.py +++ b/frigate/config/camera/objects.py @@ -3,6 +3,7 @@ from typing import Any, Optional, Union from pydantic import Field, PrivateAttr, field_serializer, field_validator from ..base import FrigateBaseModel +from .mask import ObjectMaskConfig __all__ = ["ObjectConfig", "GenAIObjectConfig", "FilterConfig"] @@ -13,36 +14,48 @@ DEFAULT_TRACKED_OBJECTS = ["person"] class FilterConfig(FrigateBaseModel): min_area: Union[int, float] = Field( default=0, - title="Minimum area of bounding box for object to be counted. Can be pixels (int) or percentage (float between 0.000001 and 0.99).", + title="Minimum object area", + description="Minimum bounding box area (pixels or percentage) required for this object type. Can be pixels (int) or percentage (float between 0.000001 and 0.99).", ) max_area: Union[int, float] = Field( default=24000000, - title="Maximum area of bounding box for object to be counted. Can be pixels (int) or percentage (float between 0.000001 and 0.99).", + title="Maximum object area", + description="Maximum bounding box area (pixels or percentage) allowed for this object type. Can be pixels (int) or percentage (float between 0.000001 and 0.99).", ) min_ratio: float = Field( default=0, - title="Minimum ratio of bounding box's width/height for object to be counted.", + title="Minimum aspect ratio", + description="Minimum width/height ratio required for the bounding box to qualify.", ) max_ratio: float = Field( default=24000000, - title="Maximum ratio of bounding box's width/height for object to be counted.", + title="Maximum aspect ratio", + description="Maximum width/height ratio allowed for the bounding box to qualify.", ) threshold: float = Field( default=0.7, - title="Average detection confidence threshold for object to be counted.", + title="Confidence threshold", + description="Average detection confidence threshold required for the object to be considered a true positive.", ) min_score: float = Field( - default=0.5, title="Minimum detection confidence for object to be counted." + default=0.5, + title="Minimum confidence", + description="Minimum single-frame detection confidence required for the object to be counted.", ) - mask: Optional[Union[str, list[str]]] = Field( - default=None, - title="Detection area polygon mask for this filter configuration.", + mask: dict[str, Optional[ObjectMaskConfig]] = Field( + default_factory=dict, + title="Filter mask", + description="Polygon coordinates defining where this filter applies within the frame.", + ) + raw_mask: dict[str, Optional[ObjectMaskConfig]] = Field( + default_factory=dict, exclude=True ) - raw_mask: Union[str, list[str]] = "" @field_serializer("mask", when_used="json") def serialize_mask(self, value: Any, info): - return self.raw_mask + if self.raw_mask: + return self.raw_mask + return value @field_serializer("raw_mask", when_used="json") def serialize_raw_mask(self, value: Any, info): @@ -51,46 +64,64 @@ class FilterConfig(FrigateBaseModel): class GenAIObjectTriggerConfig(FrigateBaseModel): tracked_object_end: bool = Field( - default=True, title="Send once the object is no longer tracked." + default=True, + title="Send on end", + description="Send a request to GenAI when the tracked object ends.", ) after_significant_updates: Optional[int] = Field( default=None, - title="Send an early request to generative AI when X frames accumulated.", + title="Early GenAI trigger", + description="Send a request to GenAI after a specified number of significant updates for the tracked object.", ge=1, ) class GenAIObjectConfig(FrigateBaseModel): - enabled: bool = Field(default=False, title="Enable GenAI for camera.") + enabled: bool = Field( + default=False, + title="Enable GenAI", + description="Enable GenAI generation of descriptions for tracked objects by default.", + ) use_snapshot: bool = Field( - default=False, title="Use snapshots for generating descriptions." + default=False, + title="Use snapshots", + description="Use object snapshots instead of thumbnails for GenAI description generation.", ) prompt: str = Field( default="Analyze the sequence of images containing the {label}. Focus on the likely intent or behavior of the {label} based on its actions and movement, rather than describing its appearance or the surroundings. Consider what the {label} is doing, why, and what it might do next.", - title="Default caption prompt.", + title="Caption prompt", + description="Default prompt template used when generating descriptions with GenAI.", ) object_prompts: dict[str, str] = Field( - default_factory=dict, title="Object specific prompts." + default_factory=dict, + title="Object prompts", + description="Per-object prompts to customize GenAI outputs for specific labels.", ) objects: Union[str, list[str]] = Field( default_factory=list, - title="List of objects to run generative AI for.", + title="GenAI objects", + description="List of object labels to send to GenAI by default.", ) required_zones: Union[str, list[str]] = Field( default_factory=list, - title="List of required zones to be entered in order to run generative AI.", + title="Required zones", + description="Zones that must be entered for objects to qualify for GenAI description generation.", ) debug_save_thumbnails: bool = Field( default=False, - title="Save thumbnails sent to generative AI for debugging purposes.", + title="Save thumbnails", + description="Save thumbnails sent to GenAI for debugging and review.", ) send_triggers: GenAIObjectTriggerConfig = Field( default_factory=GenAIObjectTriggerConfig, - title="What triggers to use to send frames to generative AI for a tracked object.", + title="GenAI triggers", + description="Defines when frames should be sent to GenAI (on end, after updates, etc.).", ) enabled_in_config: Optional[bool] = Field( - default=None, title="Keep track of original state of generative AI." + default=None, + title="Original GenAI state", + description="Indicates whether GenAI was enabled in the original static config.", ) @field_validator("required_zones", mode="before") @@ -103,14 +134,28 @@ class GenAIObjectConfig(FrigateBaseModel): class ObjectConfig(FrigateBaseModel): - track: list[str] = Field(default=DEFAULT_TRACKED_OBJECTS, title="Objects to track.") - filters: dict[str, FilterConfig] = Field( - default_factory=dict, title="Object filters." + track: list[str] = Field( + default=DEFAULT_TRACKED_OBJECTS, + title="Objects to track", + description="List of object labels to track for all cameras; can be overridden per-camera.", + ) + filters: dict[str, FilterConfig] = Field( + default_factory=dict, + title="Object filters", + description="Filters applied to detected objects to reduce false positives (area, ratio, confidence).", + ) + mask: dict[str, Optional[ObjectMaskConfig]] = Field( + default_factory=dict, + title="Object mask", + description="Mask polygon used to prevent object detection in specified areas.", + ) + raw_mask: dict[str, Optional[ObjectMaskConfig]] = Field( + default_factory=dict, exclude=True ) - mask: Union[str, list[str]] = Field(default="", title="Object mask.") genai: GenAIObjectConfig = Field( default_factory=GenAIObjectConfig, - title="Config for using genai to analyze objects.", + title="GenAI object config", + description="GenAI options for describing tracked objects and sending frames for generation.", ) _all_objects: list[str] = PrivateAttr() @@ -129,3 +174,13 @@ class ObjectConfig(FrigateBaseModel): enabled_labels.update(camera.objects.track) self._all_objects = list(enabled_labels) + + @field_serializer("mask", when_used="json") + def serialize_mask(self, value: Any, info): + if self.raw_mask: + return self.raw_mask + return value + + @field_serializer("raw_mask", when_used="json") + def serialize_raw_mask(self, value: Any, info): + return None diff --git a/frigate/config/camera/onvif.py b/frigate/config/camera/onvif.py index d4955799b..c5f1e19f3 100644 --- a/frigate/config/camera/onvif.py +++ b/frigate/config/camera/onvif.py @@ -17,37 +17,57 @@ class ZoomingModeEnum(str, Enum): class PtzAutotrackConfig(FrigateBaseModel): - enabled: bool = Field(default=False, title="Enable PTZ object autotracking.") + enabled: bool = Field( + default=False, + title="Enable Autotracking", + description="Enable or disable automatic PTZ camera tracking of detected objects.", + ) calibrate_on_startup: bool = Field( - default=False, title="Perform a camera calibration when Frigate starts." + default=False, + title="Calibrate on start", + description="Measure PTZ motor speeds on startup to improve tracking accuracy. Frigate will update config with movement_weights after calibration.", ) zooming: ZoomingModeEnum = Field( - default=ZoomingModeEnum.disabled, title="Autotracker zooming mode." + default=ZoomingModeEnum.disabled, + title="Zoom mode", + description="Control zoom behavior: disabled (pan/tilt only), absolute (most compatible), or relative (concurrent pan/tilt/zoom).", ) zoom_factor: float = Field( default=0.3, - title="Zooming factor (0.1-0.75).", + title="Zoom factor", + description="Control zoom level on tracked objects. Lower values keep more scene in view; higher values zoom in closer but may lose tracking. Values between 0.1 and 0.75.", ge=0.1, le=0.75, ) - track: list[str] = Field(default=DEFAULT_TRACKED_OBJECTS, title="Objects to track.") + track: list[str] = Field( + default=DEFAULT_TRACKED_OBJECTS, + title="Tracked objects", + description="List of object types that should trigger autotracking.", + ) required_zones: list[str] = Field( default_factory=list, - title="List of required zones to be entered in order to begin autotracking.", + title="Required zones", + description="Objects must enter one of these zones before autotracking begins.", ) return_preset: str = Field( default="home", - title="Name of camera preset to return to when object tracking is over.", + title="Return preset", + description="ONVIF preset name configured in camera firmware to return to after tracking ends.", ) timeout: int = Field( - default=10, title="Seconds to delay before returning to preset." + default=10, + title="Return timeout", + description="Wait this many seconds after losing tracking before returning camera to preset position.", ) movement_weights: Optional[Union[str, list[str]]] = Field( default_factory=list, - title="Internal value used for PTZ movements based on the speed of your camera's motor.", + title="Movement weights", + description="Calibration values automatically generated by camera calibration. Do not modify manually.", ) enabled_in_config: Optional[bool] = Field( - default=None, title="Keep track of original state of autotracking." + default=None, + title="Original autotrack state", + description="Internal field to track whether autotracking was enabled in configuration.", ) @field_validator("movement_weights", mode="before") @@ -72,16 +92,38 @@ class PtzAutotrackConfig(FrigateBaseModel): class OnvifConfig(FrigateBaseModel): - host: str = Field(default="", title="Onvif Host") - port: int = Field(default=8000, title="Onvif Port") - user: Optional[EnvString] = Field(default=None, title="Onvif Username") - password: Optional[EnvString] = Field(default=None, title="Onvif Password") - tls_insecure: bool = Field(default=False, title="Onvif Disable TLS verification") + host: str = Field( + default="", + title="ONVIF host", + description="Host (and optional scheme) for the ONVIF service for this camera.", + ) + port: int = Field( + default=8000, + title="ONVIF port", + description="Port number for the ONVIF service.", + ) + user: Optional[EnvString] = Field( + default=None, + title="ONVIF username", + description="Username for ONVIF authentication; some devices require admin user for ONVIF.", + ) + password: Optional[EnvString] = Field( + default=None, + title="ONVIF password", + description="Password for ONVIF authentication.", + ) + tls_insecure: bool = Field( + default=False, + title="Disable TLS verify", + description="Skip TLS verification and disable digest auth for ONVIF (unsafe; use in safe networks only).", + ) autotracking: PtzAutotrackConfig = Field( default_factory=PtzAutotrackConfig, - title="PTZ auto tracking config.", + title="Autotracking", + description="Automatically track moving objects and keep them centered in the frame using PTZ camera movements.", ) ignore_time_mismatch: bool = Field( default=False, - title="Onvif Ignore Time Synchronization Mismatch Between Camera and Server", + title="Ignore time mismatch", + description="Ignore time synchronization differences between camera and Frigate server for ONVIF communication.", ) diff --git a/frigate/config/camera/record.py b/frigate/config/camera/record.py index 09a7a84d5..7eae7500d 100644 --- a/frigate/config/camera/record.py +++ b/frigate/config/camera/record.py @@ -1,5 +1,5 @@ from enum import Enum -from typing import Optional +from typing import Optional, Union from pydantic import Field @@ -19,11 +19,14 @@ __all__ = [ "RetainModeEnum", ] -DEFAULT_TIME_LAPSE_FFMPEG_ARGS = "-vf setpts=0.04*PTS -r 30" - class RecordRetainConfig(FrigateBaseModel): - days: float = Field(default=0, ge=0, title="Default retention period.") + days: float = Field( + default=0, + ge=0, + title="Retention days", + description="Days to retain recordings.", + ) class RetainModeEnum(str, Enum): @@ -33,22 +36,37 @@ class RetainModeEnum(str, Enum): class ReviewRetainConfig(FrigateBaseModel): - days: float = Field(default=10, ge=0, title="Default retention period.") - mode: RetainModeEnum = Field(default=RetainModeEnum.motion, title="Retain mode.") + days: float = Field( + default=10, + ge=0, + title="Retention days", + description="Number of days to retain recordings of detection events.", + ) + mode: RetainModeEnum = Field( + default=RetainModeEnum.motion, + title="Retention mode", + description="Mode for retention: all (save all segments), motion (save segments with motion), or active_objects (save segments with active objects).", + ) class EventsConfig(FrigateBaseModel): pre_capture: int = Field( default=5, - title="Seconds to retain before event starts.", + title="Pre-capture seconds", + description="Number of seconds before the detection event to include in the recording.", le=MAX_PRE_CAPTURE, ge=0, ) post_capture: int = Field( - default=5, ge=0, title="Seconds to retain after event ends." + default=5, + ge=0, + title="Post-capture seconds", + description="Number of seconds after the detection event to include in the recording.", ) retain: ReviewRetainConfig = Field( - default_factory=ReviewRetainConfig, title="Event retention settings." + default_factory=ReviewRetainConfig, + title="Event retention", + description="Retention settings for recordings of detection events.", ) @@ -62,46 +80,65 @@ class RecordQualityEnum(str, Enum): class RecordPreviewConfig(FrigateBaseModel): quality: RecordQualityEnum = Field( - default=RecordQualityEnum.medium, title="Quality of recording preview." + default=RecordQualityEnum.medium, + title="Preview quality", + description="Preview quality level (very_low, low, medium, high, very_high).", ) class RecordExportConfig(FrigateBaseModel): - timelapse_args: str = Field( - default=DEFAULT_TIME_LAPSE_FFMPEG_ARGS, title="Timelapse Args" + hwaccel_args: Union[str, list[str]] = Field( + default="auto", + title="Export hwaccel args", + description="Hardware acceleration args to use for export/transcode operations.", ) class RecordConfig(FrigateBaseModel): - enabled: bool = Field(default=False, title="Enable record on all cameras.") - sync_recordings: bool = Field( - default=False, title="Sync recordings with disk on startup and once a day." + enabled: bool = Field( + default=False, + title="Enable recording", + description="Enable or disable recording for all cameras; can be overridden per-camera.", ) expire_interval: int = Field( default=60, - title="Number of minutes to wait between cleanup runs.", + title="Record cleanup interval", + description="Minutes between cleanup passes that remove expired recording segments.", ) continuous: RecordRetainConfig = Field( default_factory=RecordRetainConfig, - title="Continuous recording retention settings.", + title="Continuous retention", + description="Number of days to retain recordings regardless of tracked objects or motion. Set to 0 if you only want to retain recordings of alerts and detections.", ) motion: RecordRetainConfig = Field( - default_factory=RecordRetainConfig, title="Motion recording retention settings." + default_factory=RecordRetainConfig, + title="Motion retention", + description="Number of days to retain recordings triggered by motion regardless of tracked objects. Set to 0 if you only want to retain recordings of alerts and detections.", ) detections: EventsConfig = Field( - default_factory=EventsConfig, title="Detection specific retention settings." + default_factory=EventsConfig, + title="Detection retention", + description="Recording retention settings for detection events including pre/post capture durations.", ) alerts: EventsConfig = Field( - default_factory=EventsConfig, title="Alert specific retention settings." + default_factory=EventsConfig, + title="Alert retention", + description="Recording retention settings for alert events including pre/post capture durations.", ) export: RecordExportConfig = Field( - default_factory=RecordExportConfig, title="Recording Export Config" + default_factory=RecordExportConfig, + title="Export config", + description="Settings used when exporting recordings such as timelapse and hardware acceleration.", ) preview: RecordPreviewConfig = Field( - default_factory=RecordPreviewConfig, title="Recording Preview Config" + default_factory=RecordPreviewConfig, + title="Preview config", + description="Settings controlling the quality of recording previews shown in the UI.", ) enabled_in_config: Optional[bool] = Field( - default=None, title="Keep track of original state of recording." + default=None, + title="Original recording state", + description="Indicates whether recording was enabled in the original static configuration.", ) @property diff --git a/frigate/config/camera/review.py b/frigate/config/camera/review.py index 6e55b6242..ff07fb368 100644 --- a/frigate/config/camera/review.py +++ b/frigate/config/camera/review.py @@ -21,22 +21,32 @@ DEFAULT_ALERT_OBJECTS = ["person", "car"] class AlertsConfig(FrigateBaseModel): """Configure alerts""" - enabled: bool = Field(default=True, title="Enable alerts.") + enabled: bool = Field( + default=True, + title="Enable alerts", + description="Enable or disable alert generation for all cameras; can be overridden per-camera.", + ) labels: list[str] = Field( - default=DEFAULT_ALERT_OBJECTS, title="Labels to create alerts for." + default=DEFAULT_ALERT_OBJECTS, + title="Alert labels", + description="List of object labels that qualify as alerts (for example: car, person).", ) required_zones: Union[str, list[str]] = Field( default_factory=list, - title="List of required zones to be entered in order to save the event as an alert.", + title="Required zones", + description="Zones that an object must enter to be considered an alert; leave empty to allow any zone.", ) enabled_in_config: Optional[bool] = Field( - default=None, title="Keep track of original state of alerts." + default=None, + title="Original alerts state", + description="Tracks whether alerts were originally enabled in the static configuration.", ) cutoff_time: int = Field( default=40, - title="Time to cutoff alerts after no alert-causing activity has occurred.", + title="Alerts cutoff time", + description="Seconds to wait after no alert-causing activity before cutting off an alert.", ) @field_validator("required_zones", mode="before") @@ -51,22 +61,32 @@ class AlertsConfig(FrigateBaseModel): class DetectionsConfig(FrigateBaseModel): """Configure detections""" - enabled: bool = Field(default=True, title="Enable detections.") + enabled: bool = Field( + default=True, + title="Enable detections", + description="Enable or disable detection events for all cameras; can be overridden per-camera.", + ) labels: Optional[list[str]] = Field( - default=None, title="Labels to create detections for." + default=None, + title="Detection labels", + description="List of object labels that qualify as detection events.", ) required_zones: Union[str, list[str]] = Field( default_factory=list, - title="List of required zones to be entered in order to save the event as a detection.", + title="Required zones", + description="Zones that an object must enter to be considered a detection; leave empty to allow any zone.", ) cutoff_time: int = Field( default=30, - title="Time to cutoff detection after no detection-causing activity has occurred.", + title="Detections cutoff time", + description="Seconds to wait after no detection-causing activity before cutting off a detection.", ) enabled_in_config: Optional[bool] = Field( - default=None, title="Keep track of original state of detections." + default=None, + title="Original detections state", + description="Tracks whether detections were originally enabled in the static configuration.", ) @field_validator("required_zones", mode="before") @@ -81,27 +101,42 @@ class DetectionsConfig(FrigateBaseModel): class GenAIReviewConfig(FrigateBaseModel): enabled: bool = Field( default=False, - title="Enable GenAI descriptions for review items.", + title="Enable GenAI descriptions", + description="Enable or disable GenAI-generated descriptions and summaries for review items.", + ) + alerts: bool = Field( + default=True, + title="Enable GenAI for alerts", + description="Use GenAI to generate descriptions for alert items.", + ) + detections: bool = Field( + default=False, + title="Enable GenAI for detections", + description="Use GenAI to generate descriptions for detection items.", ) - alerts: bool = Field(default=True, title="Enable GenAI for alerts.") - detections: bool = Field(default=False, title="Enable GenAI for detections.") image_source: ImageSourceEnum = Field( default=ImageSourceEnum.preview, - title="Image source for review descriptions.", + title="Review image source", + description="Source of images sent to GenAI ('preview' or 'recordings'); 'recordings' uses higher quality frames but more tokens.", ) additional_concerns: list[str] = Field( default=[], - title="Additional concerns that GenAI should make note of on this camera.", + title="Additional concerns", + description="A list of additional concerns or notes the GenAI should consider when evaluating activity on this camera.", ) debug_save_thumbnails: bool = Field( default=False, - title="Save thumbnails sent to generative AI for debugging purposes.", + title="Save thumbnails", + description="Save thumbnails that are sent to the GenAI provider for debugging and review.", ) enabled_in_config: Optional[bool] = Field( - default=None, title="Keep track of original state of generative AI." + default=None, + title="Original GenAI state", + description="Tracks whether GenAI review was originally enabled in the static configuration.", ) preferred_language: str | None = Field( - title="Preferred language for GenAI Response", + title="Preferred language", + description="Preferred language to request from the GenAI provider for generated responses.", default=None, ) activity_context_prompt: str = Field( @@ -139,19 +174,24 @@ Evaluate in this order: 3. **Escalate to Level 2 if:** Weapons, break-in tools, forced entry in progress, violence, or active property damage visible (escalates from Level 0 or 1) The mere presence of an unidentified person in private areas during late night hours is inherently suspicious and warrants human review, regardless of what activity they appear to be doing or how brief the sequence is.""", - title="Custom activity context prompt defining normal and suspicious activity patterns for this property.", + title="Activity context prompt", + description="Custom prompt describing what is and is not suspicious activity to provide context for GenAI summaries.", ) class ReviewConfig(FrigateBaseModel): - """Configure reviews""" - alerts: AlertsConfig = Field( - default_factory=AlertsConfig, title="Review alerts config." + default_factory=AlertsConfig, + title="Alerts config", + description="Settings for which tracked objects generate alerts and how alerts are retained.", ) detections: DetectionsConfig = Field( - default_factory=DetectionsConfig, title="Review detections config." + default_factory=DetectionsConfig, + title="Detections config", + description="Settings for creating detection events (non-alert) and how long to keep them.", ) genai: GenAIReviewConfig = Field( - default_factory=GenAIReviewConfig, title="Review description genai config." + default_factory=GenAIReviewConfig, + title="GenAI config", + description="Controls use of generative AI for producing descriptions and summaries of review items.", ) diff --git a/frigate/config/camera/snapshots.py b/frigate/config/camera/snapshots.py index 156b56a7e..c367aad8e 100644 --- a/frigate/config/camera/snapshots.py +++ b/frigate/config/camera/snapshots.py @@ -9,36 +9,68 @@ __all__ = ["SnapshotsConfig", "RetainConfig"] class RetainConfig(FrigateBaseModel): - default: float = Field(default=10, title="Default retention period.") - mode: RetainModeEnum = Field(default=RetainModeEnum.motion, title="Retain mode.") + default: float = Field( + default=10, + title="Default retention", + description="Default number of days to retain snapshots.", + ) + mode: RetainModeEnum = Field( + default=RetainModeEnum.motion, + title="Retention mode", + description="Mode for retention: all (save all segments), motion (save segments with motion), or active_objects (save segments with active objects).", + ) objects: dict[str, float] = Field( - default_factory=dict, title="Object retention period." + default_factory=dict, + title="Object retention", + description="Per-object overrides for snapshot retention days.", ) class SnapshotsConfig(FrigateBaseModel): - enabled: bool = Field(default=False, title="Snapshots enabled.") + enabled: bool = Field( + default=False, + title="Snapshots enabled", + description="Enable or disable saving snapshots for all cameras; can be overridden per-camera.", + ) clean_copy: bool = Field( - default=True, title="Create a clean copy of the snapshot image." + default=True, + title="Save clean copy", + description="Save an unannotated clean copy of snapshots in addition to annotated ones.", ) timestamp: bool = Field( - default=False, title="Add a timestamp overlay on the snapshot." + default=False, + title="Timestamp overlay", + description="Overlay a timestamp on saved snapshots.", ) bounding_box: bool = Field( - default=True, title="Add a bounding box overlay on the snapshot." + default=True, + title="Bounding box overlay", + description="Draw bounding boxes for tracked objects on saved snapshots.", + ) + crop: bool = Field( + default=False, + title="Crop snapshot", + description="Crop saved snapshots to the detected object's bounding box.", ) - crop: bool = Field(default=False, title="Crop the snapshot to the detected object.") required_zones: list[str] = Field( default_factory=list, - title="List of required zones to be entered in order to save a snapshot.", + title="Required zones", + description="Zones an object must enter for a snapshot to be saved.", + ) + height: Optional[int] = Field( + default=None, + title="Snapshot height", + description="Height (pixels) to resize saved snapshots to; leave empty to preserve original size.", ) - height: Optional[int] = Field(default=None, title="Snapshot image height.") retain: RetainConfig = Field( - default_factory=RetainConfig, title="Snapshot retention." + default_factory=RetainConfig, + title="Snapshot retention", + description="Retention settings for saved snapshots including default days and per-object overrides.", ) quality: int = Field( default=70, - title="Quality of the encoded jpeg (0-100).", + title="JPEG quality", + description="JPEG encode quality for saved snapshots (0-100).", ge=0, le=100, ) diff --git a/frigate/config/camera/timestamp.py b/frigate/config/camera/timestamp.py index fcf352a9b..48ec8240b 100644 --- a/frigate/config/camera/timestamp.py +++ b/frigate/config/camera/timestamp.py @@ -27,9 +27,27 @@ class TimestampPositionEnum(str, Enum): class ColorConfig(FrigateBaseModel): - red: int = Field(default=255, ge=0, le=255, title="Red") - green: int = Field(default=255, ge=0, le=255, title="Green") - blue: int = Field(default=255, ge=0, le=255, title="Blue") + red: int = Field( + default=255, + ge=0, + le=255, + title="Red", + description="Red component (0-255) for timestamp color.", + ) + green: int = Field( + default=255, + ge=0, + le=255, + title="Green", + description="Green component (0-255) for timestamp color.", + ) + blue: int = Field( + default=255, + ge=0, + le=255, + title="Blue", + description="Blue component (0-255) for timestamp color.", + ) class TimestampEffectEnum(str, Enum): @@ -39,11 +57,27 @@ class TimestampEffectEnum(str, Enum): class TimestampStyleConfig(FrigateBaseModel): position: TimestampPositionEnum = Field( - default=TimestampPositionEnum.tl, title="Timestamp position." + default=TimestampPositionEnum.tl, + title="Timestamp position", + description="Position of the timestamp on the image (tl/tr/bl/br).", + ) + format: str = Field( + default=DEFAULT_TIME_FORMAT, + title="Timestamp format", + description="Datetime format string used for timestamps (Python datetime format codes).", + ) + color: ColorConfig = Field( + default_factory=ColorConfig, + title="Timestamp color", + description="RGB color values for the timestamp text (all values 0-255).", + ) + thickness: int = Field( + default=2, + title="Timestamp thickness", + description="Line thickness of the timestamp text.", ) - format: str = Field(default=DEFAULT_TIME_FORMAT, title="Timestamp format.") - color: ColorConfig = Field(default_factory=ColorConfig, title="Timestamp color.") - thickness: int = Field(default=2, title="Timestamp thickness.") effect: Optional[TimestampEffectEnum] = Field( - default=None, title="Timestamp effect." + default=None, + title="Timestamp effect", + description="Visual effect for the timestamp text (none, solid, shadow).", ) diff --git a/frigate/config/camera/ui.py b/frigate/config/camera/ui.py index b6b9c58ad..5e903b254 100644 --- a/frigate/config/camera/ui.py +++ b/frigate/config/camera/ui.py @@ -6,7 +6,13 @@ __all__ = ["CameraUiConfig"] class CameraUiConfig(FrigateBaseModel): - order: int = Field(default=0, title="Order of camera in UI.") - dashboard: bool = Field( - default=True, title="Show this camera in Frigate dashboard UI." + order: int = Field( + default=0, + title="UI order", + description="Numeric order used to sort the camera in the UI (default dashboard and lists); larger numbers appear later.", + ) + dashboard: bool = Field( + default=True, + title="Show in UI", + description="Toggle whether this camera is visible everywhere in the Frigate UI. Disabling this will require manually editing the config to view this camera in the UI again.", ) diff --git a/frigate/config/camera/updater.py b/frigate/config/camera/updater.py index 125094f10..44aea527d 100644 --- a/frigate/config/camera/updater.py +++ b/frigate/config/camera/updater.py @@ -80,8 +80,8 @@ class CameraConfigUpdateSubscriber: self.camera_configs[camera] = updated_config return elif update_type == CameraConfigUpdateEnum.remove: - self.config.cameras.pop(camera) - self.camera_configs.pop(camera) + self.config.cameras.pop(camera, None) + self.camera_configs.pop(camera, None) return config = self.camera_configs.get(camera) diff --git a/frigate/config/camera/zone.py b/frigate/config/camera/zone.py index 7df1a1f25..e4737f8dc 100644 --- a/frigate/config/camera/zone.py +++ b/frigate/config/camera/zone.py @@ -14,36 +14,54 @@ logger = logging.getLogger(__name__) class ZoneConfig(BaseModel): friendly_name: Optional[str] = Field( - None, title="Zone friendly name used in the Frigate UI." + None, + title="Zone name", + description="A user-friendly name for the zone, displayed in the Frigate UI. If not set, a formatted version of the zone name will be used.", + ) + enabled: bool = Field( + default=True, + title="Enabled", + description="Enable or disable this zone. Disabled zones are ignored at runtime.", + ) + enabled_in_config: Optional[bool] = Field( + default=None, title="Keep track of original state of zone." ) filters: dict[str, FilterConfig] = Field( - default_factory=dict, title="Zone filters." + default_factory=dict, + title="Zone filters", + description="Filters to apply to objects within this zone. Used to reduce false positives or restrict which objects are considered present in the zone.", ) coordinates: Union[str, list[str]] = Field( - title="Coordinates polygon for the defined zone." + title="Coordinates", + description="Polygon coordinates that define the zone area. Can be a comma-separated string or a list of coordinate strings. Coordinates should be relative (0-1) or absolute (legacy).", ) distances: Optional[Union[str, list[str]]] = Field( default_factory=list, - title="Real-world distances for the sides of quadrilateral for the defined zone.", + title="Real-world distances", + description="Optional real-world distances for each side of the zone quadrilateral, used for speed or distance calculations. Must have exactly 4 values if set.", ) inertia: int = Field( default=3, - title="Number of consecutive frames required for object to be considered present in the zone.", + title="Inertia frames", gt=0, + description="Number of consecutive frames an object must be detected in the zone before it is considered present. Helps filter out transient detections.", ) loitering_time: int = Field( default=0, ge=0, - title="Number of seconds that an object must loiter to be considered in the zone.", + title="Loitering seconds", + description="Number of seconds an object must remain in the zone to be considered as loitering. Set to 0 to disable loitering detection.", ) speed_threshold: Optional[float] = Field( default=None, ge=0.1, - title="Minimum speed value for an object to be considered in the zone.", + title="Minimum speed", + description="Minimum speed (in real-world units if distances are set) required for an object to be considered present in the zone. Used for speed-based zone triggers.", ) objects: Union[str, list[str]] = Field( default_factory=list, - title="List of objects that can trigger the zone.", + title="Trigger objects", + description="List of object types (from labelmap) that can trigger this zone. Can be a string or a list of strings. If empty, all objects are considered.", ) _color: Optional[tuple[int, int, int]] = PrivateAttr() _contour: np.ndarray = PrivateAttr() diff --git a/frigate/config/camera_group.py b/frigate/config/camera_group.py index 7449e86a1..65319001a 100644 --- a/frigate/config/camera_group.py +++ b/frigate/config/camera_group.py @@ -8,13 +8,21 @@ __all__ = ["CameraGroupConfig"] class CameraGroupConfig(FrigateBaseModel): - """Represents a group of cameras.""" - cameras: Union[str, list[str]] = Field( - default_factory=list, title="List of cameras in this group." + default_factory=list, + title="Camera list", + description="Array of camera names included in this group.", + ) + icon: str = Field( + default="generic", + title="Group icon", + description="Icon used to represent the camera group in the UI.", + ) + order: int = Field( + default=0, + title="Sort order", + description="Numeric order used to sort camera groups in the UI; larger numbers appear later.", ) - icon: str = Field(default="generic", title="Icon that represents camera group.") - order: int = Field(default=0, title="Sort order for group.") @field_validator("cameras", mode="before") @classmethod diff --git a/frigate/config/classification.py b/frigate/config/classification.py index fb8e3de29..a1e7b89a5 100644 --- a/frigate/config/classification.py +++ b/frigate/config/classification.py @@ -43,28 +43,43 @@ class ObjectClassificationType(str, Enum): class AudioTranscriptionConfig(FrigateBaseModel): - enabled: bool = Field(default=False, title="Enable audio transcription.") + enabled: bool = Field( + default=False, + title="Enable audio transcription", + description="Enable or disable automatic audio transcription for all cameras; can be overridden per-camera.", + ) language: str = Field( default="en", - title="Language abbreviation to use for audio event transcription/translation.", + title="Transcription language", + description="Language code used for transcription/translation (for example 'en' for English). See https://whisper-api.com/docs/languages/ for supported language codes.", ) device: Optional[EnrichmentsDeviceEnum] = Field( default=EnrichmentsDeviceEnum.CPU, - title="The device used for audio transcription.", + title="Transcription device", + description="Device key (CPU/GPU) to run the transcription model on. Only NVIDIA CUDA GPUs are currently supported for transcription.", ) model_size: str = Field( - default="small", title="The size of the embeddings model used." + default="small", + title="Model size", + description="Model size to use for offline audio event transcription.", ) live_enabled: Optional[bool] = Field( - default=False, title="Enable live transcriptions." + default=False, + title="Live transcription", + description="Enable streaming live transcription for audio as it is received.", ) class BirdClassificationConfig(FrigateBaseModel): - enabled: bool = Field(default=False, title="Enable bird classification.") + enabled: bool = Field( + default=False, + title="Bird classification", + description="Enable or disable bird classification.", + ) threshold: float = Field( default=0.9, - title="Minimum classification score required to be considered a match.", + title="Minimum score", + description="Minimum classification score required to accept a bird classification.", gt=0.0, le=1.0, ) @@ -72,42 +87,62 @@ class BirdClassificationConfig(FrigateBaseModel): class CustomClassificationStateCameraConfig(FrigateBaseModel): crop: list[float, float, float, float] = Field( - title="Crop of image frame on this camera to run classification on." + title="Classification crop", + description="Crop coordinates to use for running classification on this camera.", ) class CustomClassificationStateConfig(FrigateBaseModel): cameras: Dict[str, CustomClassificationStateCameraConfig] = Field( - title="Cameras to run classification on." + title="Classification cameras", + description="Per-camera crop and settings for running state classification.", ) motion: bool = Field( default=False, - title="If classification should be run when motion is detected in the crop.", + title="Run on motion", + description="If true, run classification when motion is detected within the specified crop.", ) interval: int | None = Field( default=None, - title="Interval to run classification on in seconds.", + title="Classification interval", + description="Interval (seconds) between periodic classification runs for state classification.", gt=0, ) class CustomClassificationObjectConfig(FrigateBaseModel): - objects: list[str] = Field(title="Object types to classify.") + objects: list[str] = Field( + default_factory=list, + title="Classify objects", + description="List of object types to run object classification on.", + ) classification_type: ObjectClassificationType = Field( default=ObjectClassificationType.sub_label, - title="Type of classification that is applied.", + title="Classification type", + description="Classification type applied: 'sub_label' (adds sub_label) or other supported types.", ) class CustomClassificationConfig(FrigateBaseModel): - enabled: bool = Field(default=True, title="Enable running the model.") - name: str | None = Field(default=None, title="Name of classification model.") + enabled: bool = Field( + default=True, + title="Enable model", + description="Enable or disable the custom classification model.", + ) + name: str | None = Field( + default=None, + title="Model name", + description="Identifier for the custom classification model to use.", + ) threshold: float = Field( - default=0.8, title="Classification score threshold to change the state." + default=0.8, + title="Score threshold", + description="Score threshold used to change the classification state.", ) save_attempts: int | None = Field( default=None, - title="Number of classification attempts to save in the recent classifications tab. If not specified, defaults to 200 for object classification and 100 for state classification.", + title="Save attempts", + description="How many classification attempts to save for recent classifications UI.", ge=0, ) object_config: CustomClassificationObjectConfig | None = Field(default=None) @@ -116,47 +151,76 @@ class CustomClassificationConfig(FrigateBaseModel): class ClassificationConfig(FrigateBaseModel): bird: BirdClassificationConfig = Field( - default_factory=BirdClassificationConfig, title="Bird classification config." + default_factory=BirdClassificationConfig, + title="Bird classification config", + description="Settings specific to bird classification models.", ) custom: Dict[str, CustomClassificationConfig] = Field( - default={}, title="Custom Classification Model Configs." + default={}, + title="Custom Classification Models", + description="Configuration for custom classification models used for objects or state detection.", ) class SemanticSearchConfig(FrigateBaseModel): - enabled: bool = Field(default=False, title="Enable semantic search.") + enabled: bool = Field( + default=False, + title="Enable semantic search", + description="Enable or disable the semantic search feature.", + ) reindex: Optional[bool] = Field( - default=False, title="Reindex all tracked objects on startup." + default=False, + title="Reindex on startup", + description="Trigger a full reindex of historical tracked objects into the embeddings database.", ) model: Optional[SemanticSearchModelEnum] = Field( default=SemanticSearchModelEnum.jinav1, - title="The CLIP model to use for semantic search.", + title="Semantic search model", + description="The embeddings model to use for semantic search (for example 'jinav1').", ) model_size: str = Field( - default="small", title="The size of the embeddings model used." + default="small", + title="Model size", + description="Select model size; 'small' runs on CPU and 'large' typically requires GPU.", ) device: Optional[str] = Field( default=None, - title="The device key to use for semantic search.", + title="Device", description="This is an override, to target a specific device. See https://onnxruntime.ai/docs/execution-providers/ for more information", ) class TriggerConfig(FrigateBaseModel): friendly_name: Optional[str] = Field( - None, title="Trigger friendly name used in the Frigate UI." + None, + title="Friendly name", + description="Optional friendly name displayed in the UI for this trigger.", + ) + enabled: bool = Field( + default=True, + title="Enable this trigger", + description="Enable or disable this semantic search trigger.", + ) + type: TriggerType = Field( + default=TriggerType.DESCRIPTION, + title="Trigger type", + description="Type of trigger: 'thumbnail' (match against image) or 'description' (match against text).", + ) + data: str = Field( + title="Trigger content", + description="Text phrase or thumbnail ID to match against tracked objects.", ) - enabled: bool = Field(default=True, title="Enable this trigger") - type: TriggerType = Field(default=TriggerType.DESCRIPTION, title="Type of trigger") - data: str = Field(title="Trigger content (text phrase or image ID)") threshold: float = Field( - title="Confidence score required to run the trigger", + title="Trigger threshold", + description="Minimum similarity score (0-1) required to activate this trigger.", default=0.8, gt=0.0, le=1.0, ) actions: List[TriggerAction] = Field( - default=[], title="Actions to perform when trigger is matched" + default=[], + title="Trigger actions", + description="List of actions to execute when trigger matches (notification, sub_label, attribute).", ) model_config = ConfigDict(extra="forbid", protected_namespaces=()) @@ -165,147 +229,191 @@ class TriggerConfig(FrigateBaseModel): class CameraSemanticSearchConfig(FrigateBaseModel): triggers: Dict[str, TriggerConfig] = Field( default={}, - title="Trigger actions on tracked objects that match existing thumbnails or descriptions", + title="Triggers", + description="Actions and matching criteria for camera-specific semantic search triggers.", ) model_config = ConfigDict(extra="forbid", protected_namespaces=()) class FaceRecognitionConfig(FrigateBaseModel): - enabled: bool = Field(default=False, title="Enable face recognition.") + enabled: bool = Field( + default=False, + title="Enable face recognition", + description="Enable or disable face recognition for all cameras; can be overridden per-camera.", + ) model_size: str = Field( - default="small", title="The size of the embeddings model used." + default="small", + title="Model size", + description="Model size to use for face embeddings (small/large); larger may require GPU.", ) unknown_score: float = Field( - title="Minimum face distance score required to be marked as a potential match.", + title="Unknown score threshold", + description="Distance threshold below which a face is considered a potential match (higher = stricter).", default=0.8, gt=0.0, le=1.0, ) detection_threshold: float = Field( default=0.7, - title="Minimum face detection score required to be considered a face.", + title="Detection threshold", + description="Minimum detection confidence required to consider a face detection valid.", gt=0.0, le=1.0, ) recognition_threshold: float = Field( default=0.9, - title="Minimum face distance score required to be considered a match.", + title="Recognition threshold", + description="Face embedding distance threshold to consider two faces a match.", gt=0.0, le=1.0, ) min_area: int = Field( - default=750, title="Min area of face box to consider running face recognition." + default=750, + title="Minimum face area", + description="Minimum area (pixels) of a detected face box required to attempt recognition.", ) min_faces: int = Field( default=1, gt=0, le=6, - title="Min face recognitions for the sub label to be applied to the person object.", + title="Minimum faces", + description="Minimum number of face recognitions required before applying a recognized sub-label to a person.", ) save_attempts: int = Field( default=200, ge=0, - title="Number of face attempts to save in the recent recognitions tab.", + title="Save attempts", + description="Number of face recognition attempts to retain for recent recognition UI.", ) blur_confidence_filter: bool = Field( - default=True, title="Apply blur quality filter to face confidence." + default=True, + title="Blur confidence filter", + description="Adjust confidence scores based on image blur to reduce false positives for poor quality faces.", ) device: Optional[str] = Field( default=None, - title="The device key to use for face recognition.", + title="Device", description="This is an override, to target a specific device. See https://onnxruntime.ai/docs/execution-providers/ for more information", ) class CameraFaceRecognitionConfig(FrigateBaseModel): - enabled: bool = Field(default=False, title="Enable face recognition.") + enabled: bool = Field( + default=False, + title="Enable face recognition", + description="Enable or disable face recognition.", + ) min_area: int = Field( - default=750, title="Min area of face box to consider running face recognition." + default=750, + title="Minimum face area", + description="Minimum area (pixels) of a detected face box required to attempt recognition.", ) model_config = ConfigDict(extra="forbid", protected_namespaces=()) class ReplaceRule(FrigateBaseModel): - pattern: str = Field(..., title="Regex pattern to match.") - replacement: str = Field( - ..., title="Replacement string (supports backrefs like '\\1')." - ) + pattern: str = Field(..., title="Regex pattern") + replacement: str = Field(..., title="Replacement string") class LicensePlateRecognitionConfig(FrigateBaseModel): - enabled: bool = Field(default=False, title="Enable license plate recognition.") + enabled: bool = Field( + default=False, + title="Enable LPR", + description="Enable or disable license plate recognition for all cameras; can be overridden per-camera.", + ) model_size: str = Field( - default="small", title="The size of the embeddings model used." + default="small", + title="Model size", + description="Model size used for text detection/recognition. Most users should use 'small'.", ) detection_threshold: float = Field( default=0.7, - title="License plate object confidence score required to begin running recognition.", + title="Detection threshold", + description="Detection confidence threshold to begin running OCR on a suspected plate.", gt=0.0, le=1.0, ) min_area: int = Field( default=1000, - title="Minimum area of license plate to begin running recognition.", + title="Minimum plate area", + description="Minimum plate area (pixels) required to attempt recognition.", ) recognition_threshold: float = Field( default=0.9, - title="Recognition confidence score required to add the plate to the object as a sub label.", + title="Recognition threshold", + description="Confidence threshold required for recognized plate text to be attached as a sub-label.", gt=0.0, le=1.0, ) min_plate_length: int = Field( default=4, - title="Minimum number of characters a license plate must have to be added to the object as a sub label.", + title="Min plate length", + description="Minimum number of characters a recognized plate must contain to be considered valid.", ) format: Optional[str] = Field( default=None, - title="Regular expression for the expected format of license plate.", + title="Plate format regex", + description="Optional regex to validate recognized plate strings against an expected format.", ) match_distance: int = Field( default=1, - title="Allow this number of missing/incorrect characters to still cause a detected plate to match a known plate.", + title="Match distance", + description="Number of character mismatches allowed when comparing detected plates to known plates.", ge=0, ) known_plates: Optional[Dict[str, List[str]]] = Field( - default={}, title="Known plates to track (strings or regular expressions)." + default={}, + title="Known plates", + description="List of plates or regexes to specially track or alert on.", ) enhancement: int = Field( default=0, - title="Amount of contrast adjustment and denoising to apply to license plate images before recognition.", + title="Enhancement level", + description="Enhancement level (0-10) to apply to plate crops prior to OCR; higher values may not always improve results, levels above 5 may only work with night time plates and should be used with caution.", ge=0, le=10, ) debug_save_plates: bool = Field( default=False, - title="Save plates captured for LPR for debugging purposes.", + title="Save debug plates", + description="Save plate crop images for debugging LPR performance.", ) device: Optional[str] = Field( default=None, - title="The device key to use for LPR.", + title="Device", description="This is an override, to target a specific device. See https://onnxruntime.ai/docs/execution-providers/ for more information", ) replace_rules: List[ReplaceRule] = Field( default_factory=list, - title="List of regex replacement rules for normalizing detected plates. Each rule has 'pattern' and 'replacement'.", + title="Replacement rules", + description="Regex replacement rules used to normalize detected plate strings before matching.", ) class CameraLicensePlateRecognitionConfig(FrigateBaseModel): - enabled: bool = Field(default=False, title="Enable license plate recognition.") + enabled: bool = Field( + default=False, + title="Enable LPR", + description="Enable or disable LPR on this camera.", + ) expire_time: int = Field( default=3, - title="Expire plates not seen after number of seconds (for dedicated LPR cameras only).", + title="Expire seconds", + description="Time in seconds after which an unseen plate is expired from the tracker (for dedicated LPR cameras only).", gt=0, ) min_area: int = Field( default=1000, - title="Minimum area of license plate to begin running recognition.", + title="Minimum plate area", + description="Minimum plate area (pixels) required to attempt recognition.", ) enhancement: int = Field( default=0, - title="Amount of contrast adjustment and denoising to apply to license plate images before recognition.", + title="Enhancement level", + description="Enhancement level (0-10) to apply to plate crops prior to OCR; higher values may not always improve results, levels above 5 may only work with night time plates and should be used with caution.", ge=0, le=10, ) @@ -314,12 +422,18 @@ class CameraLicensePlateRecognitionConfig(FrigateBaseModel): class CameraAudioTranscriptionConfig(FrigateBaseModel): - enabled: bool = Field(default=False, title="Enable audio transcription.") + enabled: bool = Field( + default=False, + title="Enable transcription", + description="Enable or disable manually triggered audio event transcription.", + ) enabled_in_config: Optional[bool] = Field( - default=None, title="Keep track of original state of audio transcription." + default=None, title="Original transcription state" ) live_enabled: Optional[bool] = Field( - default=False, title="Enable live transcriptions." + default=False, + title="Live transcription", + description="Enable streaming live transcription for audio as it is received.", ) model_config = ConfigDict(extra="forbid", protected_namespaces=()) diff --git a/frigate/config/config.py b/frigate/config/config.py index a26d4c50e..7e2d0eddc 100644 --- a/frigate/config/config.py +++ b/frigate/config/config.py @@ -3,7 +3,7 @@ from __future__ import annotations import json import logging import os -from typing import Any, Dict, List, Optional, Union +from typing import Any, Dict, Optional import numpy as np from pydantic import ( @@ -45,7 +45,8 @@ from .camera.audio import AudioConfig from .camera.birdseye import BirdseyeConfig from .camera.detect import DetectConfig from .camera.ffmpeg import FfmpegConfig -from .camera.genai import GenAIConfig +from .camera.genai import GenAIConfig, GenAIRoleEnum +from .camera.mask import ObjectMaskConfig from .camera.motion import MotionConfig from .camera.notification import NotificationConfig from .camera.objects import FilterConfig, ObjectConfig @@ -93,54 +94,111 @@ stream_info_retriever = StreamInfoRetriever() class RuntimeMotionConfig(MotionConfig): - raw_mask: Union[str, List[str]] = "" - mask: np.ndarray = None + """Runtime version of MotionConfig with rasterized masks.""" + + # The rasterized numpy mask (combination of all enabled masks) + rasterized_mask: np.ndarray = None def __init__(self, **config): frame_shape = config.get("frame_shape", (1, 1)) - mask = get_relative_coordinates(config.get("mask", ""), frame_shape) - config["raw_mask"] = mask - - if mask: - config["mask"] = create_mask(frame_shape, mask) - else: - empty_mask = np.zeros(frame_shape, np.uint8) - empty_mask[:] = 255 - config["mask"] = empty_mask + # Store original mask dict for serialization + original_mask = config.get("mask", {}) + if isinstance(original_mask, dict): + # Process the new dict format - update raw_coordinates for each mask + processed_mask = {} + for mask_id, mask_config in original_mask.items(): + if isinstance(mask_config, dict): + coords = mask_config.get("coordinates", "") + relative_coords = get_relative_coordinates(coords, frame_shape) + mask_config_copy = mask_config.copy() + mask_config_copy["raw_coordinates"] = ( + relative_coords if relative_coords else coords + ) + mask_config_copy["coordinates"] = ( + relative_coords if relative_coords else coords + ) + processed_mask[mask_id] = mask_config_copy + else: + processed_mask[mask_id] = mask_config + config["mask"] = processed_mask + config["raw_mask"] = processed_mask super().__init__(**config) + # Rasterize only enabled masks + enabled_coords = [] + for mask_config in self.mask.values(): + if mask_config.enabled and mask_config.coordinates: + coords = mask_config.coordinates + if isinstance(coords, list): + enabled_coords.extend(coords) + else: + enabled_coords.append(coords) + + if enabled_coords: + self.rasterized_mask = create_mask(frame_shape, enabled_coords) + else: + empty_mask = np.zeros(frame_shape, np.uint8) + empty_mask[:] = 255 + self.rasterized_mask = empty_mask + def dict(self, **kwargs): ret = super().model_dump(**kwargs) - if "mask" in ret: - ret["mask"] = ret["raw_mask"] - ret.pop("raw_mask") + if "rasterized_mask" in ret: + ret.pop("rasterized_mask") return ret - @field_serializer("mask", when_used="json") - def serialize_mask(self, value: Any, info): - return self.raw_mask - - @field_serializer("raw_mask", when_used="json") - def serialize_raw_mask(self, value: Any, info): + @field_serializer("rasterized_mask", when_used="json") + def serialize_rasterized_mask(self, value: Any, info): return None model_config = ConfigDict(arbitrary_types_allowed=True, extra="ignore") class RuntimeFilterConfig(FilterConfig): - mask: Optional[np.ndarray] = None - raw_mask: Optional[Union[str, List[str]]] = None + """Runtime version of FilterConfig with rasterized masks.""" + + # The rasterized numpy mask (combination of all enabled masks) + rasterized_mask: Optional[np.ndarray] = None def __init__(self, **config): frame_shape = config.get("frame_shape", (1, 1)) - mask = get_relative_coordinates(config.get("mask"), frame_shape) - config["raw_mask"] = mask - - if mask is not None: - config["mask"] = create_mask(frame_shape, mask) + # Store original mask dict for serialization + original_mask = config.get("mask", {}) + if isinstance(original_mask, dict): + # Process the new dict format - update raw_coordinates for each mask + processed_mask = {} + for mask_id, mask_config in original_mask.items(): + # Handle both dict and ObjectMaskConfig formats + if hasattr(mask_config, "model_dump"): + # It's an ObjectMaskConfig object + mask_dict = mask_config.model_dump() + coords = mask_dict.get("coordinates", "") + relative_coords = get_relative_coordinates(coords, frame_shape) + mask_dict["raw_coordinates"] = ( + relative_coords if relative_coords else coords + ) + mask_dict["coordinates"] = ( + relative_coords if relative_coords else coords + ) + processed_mask[mask_id] = mask_dict + elif isinstance(mask_config, dict): + coords = mask_config.get("coordinates", "") + relative_coords = get_relative_coordinates(coords, frame_shape) + mask_config_copy = mask_config.copy() + mask_config_copy["raw_coordinates"] = ( + relative_coords if relative_coords else coords + ) + mask_config_copy["coordinates"] = ( + relative_coords if relative_coords else coords + ) + processed_mask[mask_id] = mask_config_copy + else: + processed_mask[mask_id] = mask_config + config["mask"] = processed_mask + config["raw_mask"] = processed_mask # Convert min_area and max_area to pixels if they're percentages if "min_area" in config: @@ -151,13 +209,31 @@ class RuntimeFilterConfig(FilterConfig): super().__init__(**config) + # Rasterize only enabled masks + enabled_coords = [] + for mask_config in self.mask.values(): + if mask_config.enabled and mask_config.coordinates: + coords = mask_config.coordinates + if isinstance(coords, list): + enabled_coords.extend(coords) + else: + enabled_coords.append(coords) + + if enabled_coords: + self.rasterized_mask = create_mask(frame_shape, enabled_coords) + else: + self.rasterized_mask = None + def dict(self, **kwargs): ret = super().model_dump(**kwargs) - if "mask" in ret: - ret["mask"] = ret["raw_mask"] - ret.pop("raw_mask") + if "rasterized_mask" in ret: + ret.pop("rasterized_mask") return ret + @field_serializer("rasterized_mask", when_used="json") + def serialize_rasterized_mask(self, value: Any, info): + return None + model_config = ConfigDict(arbitrary_types_allowed=True, extra="ignore") @@ -299,116 +375,189 @@ def verify_lpr_and_face( class FrigateConfig(FrigateBaseModel): - version: Optional[str] = Field(default=None, title="Current config version.") + version: Optional[str] = Field( + default=None, + title="Current config version", + description="Numeric or string version of the active configuration to help detect migrations or format changes.", + ) safe_mode: bool = Field( - default=False, title="If Frigate should be started in safe mode." + default=False, + title="Safe mode", + description="When enabled, start Frigate in safe mode with reduced features for troubleshooting.", ) # Fields that install global state should be defined first, so that their validators run first. environment_vars: EnvVars = Field( - default_factory=dict, title="Frigate environment variables." + default_factory=dict, + title="Environment variables", + description="Key/value pairs of environment variables to set for the Frigate process in Home Assistant OS. Non-HAOS users must use Docker environment variable configuration instead.", ) logger: LoggerConfig = Field( default_factory=LoggerConfig, - title="Logging configuration.", + title="Logging", + description="Controls default log verbosity and per-component log level overrides.", validate_default=True, ) # Global config - auth: AuthConfig = Field(default_factory=AuthConfig, title="Auth configuration.") + auth: AuthConfig = Field( + default_factory=AuthConfig, + title="Authentication", + description="Authentication and session-related settings including cookie and rate limit options.", + ) database: DatabaseConfig = Field( - default_factory=DatabaseConfig, title="Database configuration." + default_factory=DatabaseConfig, + title="Database", + description="Settings for the SQLite database used by Frigate to store tracked object and recording metadata.", ) go2rtc: RestreamConfig = Field( - default_factory=RestreamConfig, title="Global restream configuration." + default_factory=RestreamConfig, + title="go2rtc", + description="Settings for the integrated go2rtc restreaming service used for live stream relaying and translation.", + ) + mqtt: MqttConfig = Field( + title="MQTT", + description="Settings for connecting and publishing telemetry, snapshots, and event details to an MQTT broker.", ) - mqtt: MqttConfig = Field(title="MQTT configuration.") notifications: NotificationConfig = Field( - default_factory=NotificationConfig, title="Global notification configuration." + default_factory=NotificationConfig, + title="Notifications", + description="Settings to enable and control notifications for all cameras; can be overridden per-camera.", ) networking: NetworkingConfig = Field( - default_factory=NetworkingConfig, title="Networking configuration" + default_factory=NetworkingConfig, + title="Networking", + description="Network-related settings such as IPv6 enablement for Frigate endpoints.", ) proxy: ProxyConfig = Field( - default_factory=ProxyConfig, title="Proxy configuration." + default_factory=ProxyConfig, + title="Proxy", + description="Settings for integrating Frigate behind a reverse proxy that passes authenticated user headers.", ) telemetry: TelemetryConfig = Field( - default_factory=TelemetryConfig, title="Telemetry configuration." + default_factory=TelemetryConfig, + title="Telemetry", + description="System telemetry and stats options including GPU and network bandwidth monitoring.", + ) + tls: TlsConfig = Field( + default_factory=TlsConfig, + title="TLS", + description="TLS settings for Frigate's web endpoints (port 8971).", + ) + ui: UIConfig = Field( + default_factory=UIConfig, + title="UI", + description="User interface preferences such as timezone, time/date formatting, and units.", ) - tls: TlsConfig = Field(default_factory=TlsConfig, title="TLS configuration.") - ui: UIConfig = Field(default_factory=UIConfig, title="UI configuration.") # Detector config detectors: Dict[str, BaseDetectorConfig] = Field( default=DEFAULT_DETECTORS, - title="Detector hardware configuration.", + title="Detector hardware", + description="Configuration for object detectors (CPU, GPU, ONNX backends) and any detector-specific model settings.", ) model: ModelConfig = Field( - default_factory=ModelConfig, title="Detection model configuration." + default_factory=ModelConfig, + title="Detection model", + description="Settings to configure a custom object detection model and its input shape.", ) - # GenAI config - genai: GenAIConfig = Field( - default_factory=GenAIConfig, title="Generative AI configuration." + # GenAI config (named provider configs: name -> GenAIConfig) + genai: Dict[str, GenAIConfig] = Field( + default_factory=dict, + title="Generative AI configuration (named providers).", + description="Settings for integrated generative AI providers used to generate object descriptions and review summaries.", ) # Camera config - cameras: Dict[str, CameraConfig] = Field(title="Camera configuration.") + cameras: Dict[str, CameraConfig] = Field(title="Cameras", description="Cameras") audio: AudioConfig = Field( - default_factory=AudioConfig, title="Global Audio events configuration." + default_factory=AudioConfig, + title="Audio events", + description="Settings for audio-based event detection for all cameras; can be overridden per-camera.", ) birdseye: BirdseyeConfig = Field( - default_factory=BirdseyeConfig, title="Birdseye configuration." + default_factory=BirdseyeConfig, + title="Birdseye", + description="Settings for the Birdseye composite view that composes multiple camera feeds into a single layout.", ) detect: DetectConfig = Field( - default_factory=DetectConfig, title="Global object tracking configuration." + default_factory=DetectConfig, + title="Object Detection", + description="Settings for the detection/detect role used to run object detection and initialize trackers.", ) ffmpeg: FfmpegConfig = Field( - default_factory=FfmpegConfig, title="Global FFmpeg configuration." + default_factory=FfmpegConfig, + title="FFmpeg", + description="FFmpeg settings including binary path, args, hwaccel options, and per-role output args.", ) live: CameraLiveConfig = Field( - default_factory=CameraLiveConfig, title="Live playback settings." + default_factory=CameraLiveConfig, + title="Live playback", + description="Settings used by the Web UI to control live stream resolution and quality.", ) motion: Optional[MotionConfig] = Field( - default=None, title="Global motion detection configuration." + default=None, + title="Motion detection", + description="Default motion detection settings applied to cameras unless overridden per-camera.", ) objects: ObjectConfig = Field( - default_factory=ObjectConfig, title="Global object configuration." + default_factory=ObjectConfig, + title="Objects", + description="Object tracking defaults including which labels to track and per-object filters.", ) record: RecordConfig = Field( - default_factory=RecordConfig, title="Global record configuration." + default_factory=RecordConfig, + title="Recording", + description="Recording and retention settings applied to cameras unless overridden per-camera.", ) review: ReviewConfig = Field( - default_factory=ReviewConfig, title="Review configuration." + default_factory=ReviewConfig, + title="Review", + description="Settings that control alerts, detections, and GenAI review summaries used by the UI and storage.", ) snapshots: SnapshotsConfig = Field( - default_factory=SnapshotsConfig, title="Global snapshots configuration." + default_factory=SnapshotsConfig, + title="Snapshots", + description="Settings for saved JPEG snapshots of tracked objects for all cameras; can be overridden per-camera.", ) timestamp_style: TimestampStyleConfig = Field( default_factory=TimestampStyleConfig, - title="Global timestamp style configuration.", + title="Timestamp style", + description="Styling options for in-feed timestamps applied to debug view and snapshots.", ) # Classification Config audio_transcription: AudioTranscriptionConfig = Field( - default_factory=AudioTranscriptionConfig, title="Audio transcription config." + default_factory=AudioTranscriptionConfig, + title="Audio transcription", + description="Settings for live and speech audio transcription used for events and live captions.", ) classification: ClassificationConfig = Field( - default_factory=ClassificationConfig, title="Object classification config." + default_factory=ClassificationConfig, + title="Object classification", + description="Settings for classification models used to refine object labels or state classification.", ) semantic_search: SemanticSearchConfig = Field( - default_factory=SemanticSearchConfig, title="Semantic search configuration." + default_factory=SemanticSearchConfig, + title="Semantic Search", + description="Settings for Semantic Search which builds and queries object embeddings to find similar items.", ) face_recognition: FaceRecognitionConfig = Field( - default_factory=FaceRecognitionConfig, title="Face recognition config." + default_factory=FaceRecognitionConfig, + title="Face recognition", + description="Settings for face detection and recognition for all cameras; can be overridden per-camera.", ) lpr: LicensePlateRecognitionConfig = Field( default_factory=LicensePlateRecognitionConfig, - title="License Plate recognition config.", + title="License Plate Recognition", + description="License plate recognition settings including detection thresholds, formatting, and known plates.", ) camera_groups: Dict[str, CameraGroupConfig] = Field( - default_factory=dict, title="Camera group configuration" + default_factory=dict, + title="Camera groups", + description="Configuration for named camera groups used to organize cameras in the UI.", ) _plus_api: PlusApi @@ -431,6 +580,18 @@ class FrigateConfig(FrigateBaseModel): # set notifications state self.notifications.enabled_in_config = self.notifications.enabled + # validate genai: each role (tools, vision, embeddings) at most once + role_to_name: dict[GenAIRoleEnum, str] = {} + for name, genai_cfg in self.genai.items(): + for role in genai_cfg.roles: + if role in role_to_name: + raise ValueError( + f"GenAI role '{role.value}' is assigned to both " + f"'{role_to_name[role]}' and '{name}'; each role must have " + "exactly one provider." + ) + role_to_name[role] = name + # set default min_score for object attributes for attribute in self.model.all_attributes: if not self.objects.filters.get(attribute): @@ -475,6 +636,9 @@ class FrigateConfig(FrigateBaseModel): # users should not set model themselves if detector_config.model: + logger.warning( + "The model key should be specified at the root level of the config, not under detectors. The nested model key will be ignored." + ) detector_config.model = None model_config = self.model.model_dump(exclude_unset=True, warnings="none") @@ -525,6 +689,14 @@ class FrigateConfig(FrigateBaseModel): if camera_config.ffmpeg.hwaccel_args == "auto": camera_config.ffmpeg.hwaccel_args = self.ffmpeg.hwaccel_args + # Resolve export hwaccel_args: camera export -> camera ffmpeg -> global ffmpeg + # This allows per-camera override for exports (e.g., when camera resolution + # exceeds hardware encoder limits) + if camera_config.record.export.hwaccel_args == "auto": + camera_config.record.export.hwaccel_args = ( + camera_config.ffmpeg.hwaccel_args + ) + for input in camera_config.ffmpeg.inputs: need_detect_dimensions = "detect" in input.roles and ( camera_config.detect.height is None @@ -617,35 +789,63 @@ class FrigateConfig(FrigateBaseModel): for key in object_keys: camera_config.objects.filters[key] = FilterConfig() + # Process global object masks to set raw_coordinates + if camera_config.objects.mask: + processed_global_masks = {} + for mask_id, mask_config in camera_config.objects.mask.items(): + if mask_config: + coords = mask_config.coordinates + relative_coords = get_relative_coordinates( + coords, camera_config.frame_shape + ) + # Create a new ObjectMaskConfig with raw_coordinates set + processed_global_masks[mask_id] = ObjectMaskConfig( + friendly_name=mask_config.friendly_name, + enabled=mask_config.enabled, + coordinates=relative_coords if relative_coords else coords, + raw_coordinates=relative_coords + if relative_coords + else coords, + enabled_in_config=mask_config.enabled, + ) + else: + processed_global_masks[mask_id] = mask_config + camera_config.objects.mask = processed_global_masks + camera_config.objects.raw_mask = processed_global_masks + # Apply global object masks and convert masks to numpy array for object, filter in camera_config.objects.filters.items(): + # Set enabled_in_config for per-object masks before processing + for mask_config in filter.mask.values(): + if mask_config: + mask_config.enabled_in_config = mask_config.enabled + + # Merge global object masks with per-object filter masks + merged_mask = dict(filter.mask) # Copy filter-specific masks + + # Add global object masks if they exist if camera_config.objects.mask: - filter_mask = [] - if filter.mask is not None: - filter_mask = ( - filter.mask - if isinstance(filter.mask, list) - else [filter.mask] - ) - object_mask = ( - get_relative_coordinates( - ( - camera_config.objects.mask - if isinstance(camera_config.objects.mask, list) - else [camera_config.objects.mask] - ), - camera_config.frame_shape, - ) - or [] - ) - filter.mask = filter_mask + object_mask + for mask_id, mask_config in camera_config.objects.mask.items(): + # Use a global prefix to avoid key collisions + global_mask_id = f"global_{mask_id}" + merged_mask[global_mask_id] = mask_config # Set runtime filter to create masks camera_config.objects.filters[object] = RuntimeFilterConfig( frame_shape=camera_config.frame_shape, - **filter.model_dump(exclude_unset=True), + mask=merged_mask, + **filter.model_dump( + exclude_unset=True, exclude={"mask", "raw_mask"} + ), ) + # Set enabled_in_config for motion masks to match config file state BEFORE creating RuntimeMotionConfig + if camera_config.motion: + camera_config.motion.enabled_in_config = camera_config.motion.enabled + for mask_config in camera_config.motion.mask.values(): + if mask_config: + mask_config.enabled_in_config = mask_config.enabled + # Convert motion configuration if camera_config.motion is None: camera_config.motion = RuntimeMotionConfig( @@ -654,10 +854,8 @@ class FrigateConfig(FrigateBaseModel): else: camera_config.motion = RuntimeMotionConfig( frame_shape=camera_config.frame_shape, - raw_mask=camera_config.motion.mask, **camera_config.motion.model_dump(exclude_unset=True), ) - camera_config.motion.enabled_in_config = camera_config.motion.enabled # generate zone contours if len(camera_config.zones) > 0: @@ -671,6 +869,10 @@ class FrigateConfig(FrigateBaseModel): zone.generate_contour(camera_config.frame_shape) + # Set enabled_in_config for zones to match config file state + for zone in camera_config.zones.values(): + zone.enabled_in_config = zone.enabled + # Set live view stream if none is set if not camera_config.live.streams: camera_config.live.streams = {name: name} diff --git a/frigate/config/database.py b/frigate/config/database.py index 8daca0d49..8064561f1 100644 --- a/frigate/config/database.py +++ b/frigate/config/database.py @@ -8,4 +8,8 @@ __all__ = ["DatabaseConfig"] class DatabaseConfig(FrigateBaseModel): - path: str = Field(default=DEFAULT_DB_PATH, title="Database path.") # noqa: F821 + path: str = Field( + default=DEFAULT_DB_PATH, + title="Database path", + description="Filesystem path where the Frigate SQLite database file will be stored.", + ) # noqa: F821 diff --git a/frigate/config/logger.py b/frigate/config/logger.py index 0ba3e6972..c8920a198 100644 --- a/frigate/config/logger.py +++ b/frigate/config/logger.py @@ -9,9 +9,15 @@ __all__ = ["LoggerConfig"] class LoggerConfig(FrigateBaseModel): - default: LogLevel = Field(default=LogLevel.info, title="Default logging level.") + default: LogLevel = Field( + default=LogLevel.info, + title="Logging level", + description="Default global log verbosity (debug, info, warning, error).", + ) logs: dict[str, LogLevel] = Field( - default_factory=dict, title="Log level for specified processes." + default_factory=dict, + title="Per-process log level", + description="Per-component log level overrides to increase or decrease verbosity for specific modules.", ) @model_validator(mode="after") diff --git a/frigate/config/mqtt.py b/frigate/config/mqtt.py index a760d0a1f..abd5c74b2 100644 --- a/frigate/config/mqtt.py +++ b/frigate/config/mqtt.py @@ -12,25 +12,73 @@ __all__ = ["MqttConfig"] class MqttConfig(FrigateBaseModel): - enabled: bool = Field(default=True, title="Enable MQTT Communication.") - host: str = Field(default="", title="MQTT Host") - port: int = Field(default=1883, title="MQTT Port") - topic_prefix: str = Field(default="frigate", title="MQTT Topic Prefix") - client_id: str = Field(default="frigate", title="MQTT Client ID") + enabled: bool = Field( + default=True, + title="Enable MQTT", + description="Enable or disable MQTT integration for state, events, and snapshots.", + ) + host: str = Field( + default="", + title="MQTT host", + description="Hostname or IP address of the MQTT broker.", + ) + port: int = Field( + default=1883, + title="MQTT port", + description="Port of the MQTT broker (usually 1883 for plain MQTT).", + ) + topic_prefix: str = Field( + default="frigate", + title="Topic prefix", + description="MQTT topic prefix for all Frigate topics; must be unique if running multiple instances.", + ) + client_id: str = Field( + default="frigate", + title="Client ID", + description="Client identifier used when connecting to the MQTT broker; should be unique per instance.", + ) stats_interval: int = Field( - default=60, ge=FREQUENCY_STATS_POINTS, title="MQTT Camera Stats Interval" + default=60, + ge=FREQUENCY_STATS_POINTS, + title="Stats interval", + description="Interval in seconds for publishing system and camera stats to MQTT.", + ) + user: Optional[EnvString] = Field( + default=None, + title="MQTT username", + description="Optional MQTT username; can be provided via environment variables or secrets.", ) - user: Optional[EnvString] = Field(default=None, title="MQTT Username") password: Optional[EnvString] = Field( - default=None, title="MQTT Password", validate_default=True + default=None, + title="MQTT password", + description="Optional MQTT password; can be provided via environment variables or secrets.", + validate_default=True, + ) + tls_ca_certs: Optional[str] = Field( + default=None, + title="TLS CA certs", + description="Path to CA certificate for TLS connections to the broker (for self-signed certs).", ) - tls_ca_certs: Optional[str] = Field(default=None, title="MQTT TLS CA Certificates") tls_client_cert: Optional[str] = Field( - default=None, title="MQTT TLS Client Certificate" + default=None, + title="Client cert", + description="Client certificate path for TLS mutual authentication; do not set user/password when using client certs.", + ) + tls_client_key: Optional[str] = Field( + default=None, + title="Client key", + description="Private key path for the client certificate.", + ) + tls_insecure: Optional[bool] = Field( + default=None, + title="TLS insecure", + description="Allow insecure TLS connections by skipping hostname verification (not recommended).", + ) + qos: int = Field( + default=0, + title="MQTT QoS", + description="Quality of Service level for MQTT publishes/subscriptions (0, 1, or 2).", ) - tls_client_key: Optional[str] = Field(default=None, title="MQTT TLS Client Key") - tls_insecure: Optional[bool] = Field(default=None, title="MQTT TLS Insecure") - qos: int = Field(default=0, title="MQTT QoS") @model_validator(mode="after") def user_requires_pass(self, info: ValidationInfo) -> Self: diff --git a/frigate/config/network.py b/frigate/config/network.py index c8b3cfd1c..f537c73b9 100644 --- a/frigate/config/network.py +++ b/frigate/config/network.py @@ -1,13 +1,41 @@ +from typing import Union + from pydantic import Field from .base import FrigateBaseModel -__all__ = ["IPv6Config", "NetworkingConfig"] +__all__ = ["IPv6Config", "ListenConfig", "NetworkingConfig"] class IPv6Config(FrigateBaseModel): - enabled: bool = Field(default=False, title="Enable IPv6 for port 5000 and/or 8971") + enabled: bool = Field( + default=False, + title="Enable IPv6", + description="Enable IPv6 support for Frigate services (API and UI) where applicable.", + ) + + +class ListenConfig(FrigateBaseModel): + internal: Union[int, str] = Field( + default=5000, + title="Internal port", + description="Internal listening port for Frigate (default 5000).", + ) + external: Union[int, str] = Field( + default=8971, + title="External port", + description="External listening port for Frigate (default 8971).", + ) class NetworkingConfig(FrigateBaseModel): - ipv6: IPv6Config = Field(default_factory=IPv6Config, title="Network configuration") + ipv6: IPv6Config = Field( + default_factory=IPv6Config, + title="IPv6 configuration", + description="IPv6-specific settings for Frigate network services.", + ) + listen: ListenConfig = Field( + default_factory=ListenConfig, + title="Listening ports configuration", + description="Configuration for internal and external listening ports. This is for advanced users. For the majority of use cases it's recommended to change the ports section of your Docker compose file.", + ) diff --git a/frigate/config/proxy.py b/frigate/config/proxy.py index a46b7b897..2426fcf10 100644 --- a/frigate/config/proxy.py +++ b/frigate/config/proxy.py @@ -10,36 +10,47 @@ __all__ = ["ProxyConfig", "HeaderMappingConfig"] class HeaderMappingConfig(FrigateBaseModel): user: str = Field( - default=None, title="Header name from upstream proxy to identify user." + default=None, + title="User header", + description="Header containing the authenticated username provided by the upstream proxy.", ) role: str = Field( default=None, - title="Header name from upstream proxy to identify user role.", + title="Role header", + description="Header containing the authenticated user's role or groups from the upstream proxy.", ) role_map: Optional[dict[str, list[str]]] = Field( default_factory=dict, - title=("Mapping of Frigate roles to upstream group values. "), + title=("Role mapping"), + description="Map upstream group values to Frigate roles (for example map admin groups to the admin role).", ) class ProxyConfig(FrigateBaseModel): header_map: HeaderMappingConfig = Field( default_factory=HeaderMappingConfig, - title="Header mapping definitions for proxy user passing.", + title="Header mapping", + description="Map incoming proxy headers to Frigate user and role fields for proxy-based auth.", ) logout_url: Optional[str] = Field( - default=None, title="Redirect url for logging out with proxy." + default=None, + title="Logout URL", + description="URL to redirect users to when logging out via the proxy.", ) auth_secret: Optional[EnvString] = Field( default=None, - title="Secret value for proxy authentication.", + title="Proxy secret", + description="Optional secret checked against the X-Proxy-Secret header to verify trusted proxies.", ) default_role: Optional[str] = Field( - default="viewer", title="Default role for proxy users." + default="viewer", + title="Default role", + description="Default role assigned to proxy-authenticated users when no role mapping applies (admin or viewer).", ) separator: Optional[str] = Field( default=",", - title="The character used to separate values in a mapped header.", + title="Separator character", + description="Character used to split multiple values provided in proxy headers.", ) @field_validator("separator", mode="before") diff --git a/frigate/config/telemetry.py b/frigate/config/telemetry.py index ab18831e1..41c3f7bbc 100644 --- a/frigate/config/telemetry.py +++ b/frigate/config/telemetry.py @@ -8,22 +8,41 @@ __all__ = ["TelemetryConfig", "StatsConfig"] class StatsConfig(FrigateBaseModel): - amd_gpu_stats: bool = Field(default=True, title="Enable AMD GPU stats.") - intel_gpu_stats: bool = Field(default=True, title="Enable Intel GPU stats.") + amd_gpu_stats: bool = Field( + default=True, + title="AMD GPU stats", + description="Enable collection of AMD GPU statistics if an AMD GPU is present.", + ) + intel_gpu_stats: bool = Field( + default=True, + title="Intel GPU stats", + description="Enable collection of Intel GPU statistics if an Intel GPU is present.", + ) network_bandwidth: bool = Field( - default=False, title="Enable network bandwidth for ffmpeg processes." + default=False, + title="Network bandwidth", + description="Enable per-process network bandwidth monitoring for camera ffmpeg processes and detectors (requires capabilities).", ) intel_gpu_device: Optional[str] = Field( - default=None, title="Define the device to use when gathering SR-IOV stats." + default=None, + title="SR-IOV device", + description="Device identifier used when treating Intel GPUs as SR-IOV to fix GPU stats.", ) class TelemetryConfig(FrigateBaseModel): network_interfaces: list[str] = Field( default=[], - title="Enabled network interfaces for bandwidth calculation.", + title="Network interfaces", + description="List of network interface name prefixes to monitor for bandwidth statistics.", ) stats: StatsConfig = Field( - default_factory=StatsConfig, title="System Stats Configuration" + default_factory=StatsConfig, + title="System stats", + description="Options to enable/disable collection of various system and GPU statistics.", + ) + version_check: bool = Field( + default=True, + title="Version check", + description="Enable an outbound check to detect if a newer Frigate version is available.", ) - version_check: bool = Field(default=True, title="Enable latest version check.") diff --git a/frigate/config/tls.py b/frigate/config/tls.py index 673e105e9..cada11087 100644 --- a/frigate/config/tls.py +++ b/frigate/config/tls.py @@ -6,4 +6,8 @@ __all__ = ["TlsConfig"] class TlsConfig(FrigateBaseModel): - enabled: bool = Field(default=True, title="Enable TLS for port 8971") + enabled: bool = Field( + default=True, + title="Enable TLS", + description="Enable TLS for Frigate's web UI and API on the configured TLS port.", + ) diff --git a/frigate/config/ui.py b/frigate/config/ui.py index 8e0d4d77d..2c3104bbc 100644 --- a/frigate/config/ui.py +++ b/frigate/config/ui.py @@ -27,16 +27,28 @@ class UnitSystemEnum(str, Enum): class UIConfig(FrigateBaseModel): - timezone: Optional[str] = Field(default=None, title="Override UI timezone.") + timezone: Optional[str] = Field( + default=None, + title="Timezone", + description="Optional timezone to display across the UI (defaults to browser local time if unset).", + ) time_format: TimeFormatEnum = Field( - default=TimeFormatEnum.browser, title="Override UI time format." + default=TimeFormatEnum.browser, + title="Time format", + description="Time format to use in the UI (browser, 12hour, or 24hour).", ) date_style: DateTimeStyleEnum = Field( - default=DateTimeStyleEnum.short, title="Override UI dateStyle." + default=DateTimeStyleEnum.short, + title="Date style", + description="Date style to use in the UI (full, long, medium, short).", ) time_style: DateTimeStyleEnum = Field( - default=DateTimeStyleEnum.medium, title="Override UI timeStyle." + default=DateTimeStyleEnum.medium, + title="Time style", + description="Time style to use in the UI (full, long, medium, short).", ) unit_system: UnitSystemEnum = Field( - default=UnitSystemEnum.metric, title="The unit system to use for measurements." + default=UnitSystemEnum.metric, + title="Unit system", + description="Unit system for display (metric or imperial) used in the UI and MQTT.", ) diff --git a/frigate/const.py b/frigate/const.py index 41c24f087..6b1e227d5 100644 --- a/frigate/const.py +++ b/frigate/const.py @@ -14,7 +14,8 @@ RECORD_DIR = f"{BASE_DIR}/recordings" TRIGGER_DIR = f"{CLIPS_DIR}/triggers" BIRDSEYE_PIPE = "/tmp/cache/birdseye" CACHE_DIR = "/tmp/cache" -FRIGATE_LOCALHOST = "http://127.0.0.1:5000" +REPLAY_CAMERA_PREFIX = "_replay_" +REPLAY_DIR = os.path.join(CACHE_DIR, "replay") PLUS_ENV_VAR = "PLUS_API_KEY" PLUS_API_HOST = "https://api.frigate.video" @@ -122,6 +123,7 @@ UPDATE_REVIEW_DESCRIPTION = "update_review_description" UPDATE_MODEL_STATE = "update_model_state" UPDATE_EMBEDDINGS_REINDEX_PROGRESS = "handle_embeddings_reindex_progress" UPDATE_BIRDSEYE_LAYOUT = "update_birdseye_layout" +UPDATE_JOB_STATE = "update_job_state" NOTIFICATION_TEST = "notification_test" # IO Nice Values diff --git a/frigate/data_processing/common/license_plate/mixin.py b/frigate/data_processing/common/license_plate/mixin.py index b56c66a19..ae06c0d0a 100644 --- a/frigate/data_processing/common/license_plate/mixin.py +++ b/frigate/data_processing/common/license_plate/mixin.py @@ -1220,7 +1220,7 @@ class LicensePlateProcessingMixin: rgb = cv2.cvtColor(frame, cv2.COLOR_YUV2BGR_I420) # apply motion mask - rgb[self.config.cameras[obj_data].motion.mask == 0] = [0, 0, 0] + rgb[self.config.cameras[obj_data].motion.rasterized_mask == 0] = [0, 0, 0] if WRITE_DEBUG_IMAGES: cv2.imwrite( @@ -1324,7 +1324,7 @@ class LicensePlateProcessingMixin: rgb = cv2.cvtColor(frame, cv2.COLOR_YUV2BGR_I420) # apply motion mask - rgb[self.config.cameras[camera].motion.mask == 0] = [0, 0, 0] + rgb[self.config.cameras[camera].motion.rasterized_mask == 0] = [0, 0, 0] left, top, right, bottom = car_box car = rgb[top:bottom, left:right] diff --git a/frigate/data_processing/real_time/bird.py b/frigate/data_processing/real_time/bird.py index 7851c0997..520440005 100644 --- a/frigate/data_processing/real_time/bird.py +++ b/frigate/data_processing/real_time/bird.py @@ -22,7 +22,7 @@ from .api import RealTimeProcessorApi try: from tflite_runtime.interpreter import Interpreter except ModuleNotFoundError: - from tensorflow.lite.python.interpreter import Interpreter + from ai_edge_litert.interpreter import Interpreter logger = logging.getLogger(__name__) diff --git a/frigate/data_processing/real_time/custom_classification.py b/frigate/data_processing/real_time/custom_classification.py index 229383d9f..1a2512e43 100644 --- a/frigate/data_processing/real_time/custom_classification.py +++ b/frigate/data_processing/real_time/custom_classification.py @@ -32,7 +32,7 @@ from .api import RealTimeProcessorApi try: from tflite_runtime.interpreter import Interpreter except ModuleNotFoundError: - from tensorflow.lite.python.interpreter import Interpreter + from ai_edge_litert.interpreter import Interpreter logger = logging.getLogger(__name__) @@ -73,11 +73,6 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi): self.__build_detector() def __build_detector(self) -> None: - try: - from tflite_runtime.interpreter import Interpreter - except ModuleNotFoundError: - from tensorflow.lite.python.interpreter import Interpreter - model_path = os.path.join(self.model_dir, "model.tflite") labelmap_path = os.path.join(self.model_dir, "labelmap.txt") diff --git a/frigate/debug_replay.py b/frigate/debug_replay.py new file mode 100644 index 000000000..504184667 --- /dev/null +++ b/frigate/debug_replay.py @@ -0,0 +1,443 @@ +"""Debug replay camera management for replaying recordings with detection overlays.""" + +import logging +import os +import shutil +import subprocess as sp +import threading + +from ruamel.yaml import YAML + +from frigate.config import FrigateConfig +from frigate.config.camera.updater import ( + CameraConfigUpdateEnum, + CameraConfigUpdatePublisher, + CameraConfigUpdateTopic, +) +from frigate.const import ( + CLIPS_DIR, + RECORD_DIR, + REPLAY_CAMERA_PREFIX, + REPLAY_DIR, + THUMB_DIR, +) +from frigate.models import Event, Recordings, ReviewSegment, Timeline +from frigate.util.config import find_config_file + +logger = logging.getLogger(__name__) + + +class DebugReplayManager: + """Manages a single debug replay session.""" + + def __init__(self) -> None: + self._lock = threading.Lock() + self.replay_camera_name: str | None = None + self.source_camera: str | None = None + self.clip_path: str | None = None + self.start_ts: float | None = None + self.end_ts: float | None = None + + @property + def active(self) -> bool: + """Whether a replay session is currently active.""" + return self.replay_camera_name is not None + + def start( + self, + source_camera: str, + start_ts: float, + end_ts: float, + frigate_config: FrigateConfig, + config_publisher: CameraConfigUpdatePublisher, + ) -> str: + """Start a debug replay session. + + Args: + source_camera: Name of the source camera to replay + start_ts: Start timestamp + end_ts: End timestamp + frigate_config: Current Frigate configuration + config_publisher: Publisher for camera config updates + + Returns: + The replay camera name + + Raises: + ValueError: If a session is already active or parameters are invalid + RuntimeError: If clip generation fails + """ + with self._lock: + return self._start_locked( + source_camera, start_ts, end_ts, frigate_config, config_publisher + ) + + def _start_locked( + self, + source_camera: str, + start_ts: float, + end_ts: float, + frigate_config: FrigateConfig, + config_publisher: CameraConfigUpdatePublisher, + ) -> str: + if self.active: + raise ValueError("A replay session is already active") + + if source_camera not in frigate_config.cameras: + raise ValueError(f"Camera '{source_camera}' not found") + + if end_ts <= start_ts: + raise ValueError("End time must be after start time") + + # Query recordings for the source camera in the time range + recordings = ( + Recordings.select( + Recordings.path, + Recordings.start_time, + Recordings.end_time, + ) + .where( + Recordings.start_time.between(start_ts, end_ts) + | Recordings.end_time.between(start_ts, end_ts) + | ((start_ts > Recordings.start_time) & (end_ts < Recordings.end_time)) + ) + .where(Recordings.camera == source_camera) + .order_by(Recordings.start_time.asc()) + ) + + if not recordings.count(): + raise ValueError( + f"No recordings found for camera '{source_camera}' in the specified time range" + ) + + # Create replay directory + os.makedirs(REPLAY_DIR, exist_ok=True) + + # Generate replay camera name + replay_name = f"{REPLAY_CAMERA_PREFIX}{source_camera}" + + # Build concat file for ffmpeg + concat_file = os.path.join(REPLAY_DIR, f"{replay_name}_concat.txt") + clip_path = os.path.join(REPLAY_DIR, f"{replay_name}.mp4") + + with open(concat_file, "w") as f: + for recording in recordings: + f.write(f"file '{recording.path}'\n") + + # Concatenate recordings into a single clip with -c copy (fast) + ffmpeg_cmd = [ + frigate_config.ffmpeg.ffmpeg_path, + "-hide_banner", + "-y", + "-f", + "concat", + "-safe", + "0", + "-i", + concat_file, + "-c", + "copy", + "-movflags", + "+faststart", + clip_path, + ] + + logger.info( + "Generating replay clip for %s (%.1f - %.1f)", + source_camera, + start_ts, + end_ts, + ) + + try: + result = sp.run( + ffmpeg_cmd, + capture_output=True, + text=True, + timeout=120, + ) + if result.returncode != 0: + logger.error("FFmpeg error: %s", result.stderr) + raise RuntimeError( + f"Failed to generate replay clip: {result.stderr[-500:]}" + ) + except sp.TimeoutExpired: + raise RuntimeError("Clip generation timed out") + finally: + # Clean up concat file + if os.path.exists(concat_file): + os.remove(concat_file) + + if not os.path.exists(clip_path): + raise RuntimeError("Clip file was not created") + + # Build camera config dict for the replay camera + source_config = frigate_config.cameras[source_camera] + camera_dict = self._build_camera_config_dict( + source_config, replay_name, clip_path + ) + + # Build an in-memory config with the replay camera added + config_file = find_config_file() + yaml_parser = YAML() + with open(config_file, "r") as f: + config_data = yaml_parser.load(f) + + if "cameras" not in config_data or config_data["cameras"] is None: + config_data["cameras"] = {} + config_data["cameras"][replay_name] = camera_dict + + try: + new_config = FrigateConfig.parse_object(config_data) + except Exception as e: + raise RuntimeError(f"Failed to validate replay camera config: {e}") + + # Update the running config + frigate_config.cameras[replay_name] = new_config.cameras[replay_name] + + # Publish the add event + config_publisher.publish_update( + CameraConfigUpdateTopic(CameraConfigUpdateEnum.add, replay_name), + new_config.cameras[replay_name], + ) + + # Store session state + self.replay_camera_name = replay_name + self.source_camera = source_camera + self.clip_path = clip_path + self.start_ts = start_ts + self.end_ts = end_ts + + logger.info("Debug replay started: %s -> %s", source_camera, replay_name) + return replay_name + + def stop( + self, + frigate_config: FrigateConfig, + config_publisher: CameraConfigUpdatePublisher, + ) -> None: + """Stop the active replay session and clean up all artifacts. + + Args: + frigate_config: Current Frigate configuration + config_publisher: Publisher for camera config updates + """ + with self._lock: + self._stop_locked(frigate_config, config_publisher) + + def _stop_locked( + self, + frigate_config: FrigateConfig, + config_publisher: CameraConfigUpdatePublisher, + ) -> None: + if not self.active: + logger.warning("No active replay session to stop") + return + + replay_name = self.replay_camera_name + + # Publish remove event so subscribers stop and remove from their config + if replay_name in frigate_config.cameras: + config_publisher.publish_update( + CameraConfigUpdateTopic(CameraConfigUpdateEnum.remove, replay_name), + frigate_config.cameras[replay_name], + ) + # Do NOT pop here — let subscribers handle removal from the shared + # config dict when they process the ZMQ message to avoid race conditions + + # Defensive DB cleanup + self._cleanup_db(replay_name) + + # Remove filesystem artifacts + self._cleanup_files(replay_name) + + # Reset state + self.replay_camera_name = None + self.source_camera = None + self.clip_path = None + self.start_ts = None + self.end_ts = None + + logger.info("Debug replay stopped and cleaned up: %s", replay_name) + + def _build_camera_config_dict( + self, + source_config, + replay_name: str, + clip_path: str, + ) -> dict: + """Build a camera config dictionary for the replay camera. + + Args: + source_config: Source camera's CameraConfig + replay_name: Name for the replay camera + clip_path: Path to the replay clip file + + Returns: + Camera config as a dictionary + """ + # Extract detect config (exclude computed fields) + detect_dict = source_config.detect.model_dump( + exclude={"min_initialized", "max_disappeared", "enabled_in_config"} + ) + + # Extract objects config, using .dict() on filters to convert + # RuntimeFilterConfig ndarray masks back to string coordinates + objects_dict = { + "track": source_config.objects.track, + "mask": { + mask_id: ( + mask_cfg.model_dump( + exclude={"raw_coordinates", "enabled_in_config"} + ) + if mask_cfg is not None + else None + ) + for mask_id, mask_cfg in source_config.objects.mask.items() + } + if source_config.objects.mask + else {}, + "filters": { + name: filt.dict() if hasattr(filt, "dict") else filt.model_dump() + for name, filt in source_config.objects.filters.items() + }, + } + + # Extract zones (exclude_defaults avoids serializing empty defaults + # like distances=[] that fail validation on re-parse) + zones_dict = {} + for zone_name, zone_config in source_config.zones.items(): + zone_dump = zone_config.model_dump( + exclude={"contour", "color"}, exclude_defaults=True + ) + # Always include required fields + zone_dump.setdefault("coordinates", zone_config.coordinates) + zones_dict[zone_name] = zone_dump + + # Extract motion config (exclude runtime fields) + motion_dict = {} + if source_config.motion is not None: + motion_dict = source_config.motion.model_dump( + exclude={ + "frame_shape", + "raw_mask", + "mask", + "improved_contrast_enabled", + "rasterized_mask", + } + ) + + return { + "enabled": True, + "ffmpeg": { + "inputs": [ + { + "path": clip_path, + "roles": ["detect"], + "input_args": "-re -stream_loop -1 -fflags +genpts", + } + ], + "hwaccel_args": [], + }, + "detect": detect_dict, + "objects": objects_dict, + "zones": zones_dict, + "motion": motion_dict, + "record": {"enabled": False}, + "snapshots": {"enabled": False}, + "review": { + "alerts": {"enabled": False}, + "detections": {"enabled": False}, + }, + "birdseye": {"enabled": False}, + "audio": {"enabled": False}, + "lpr": {"enabled": False}, + "face_recognition": {"enabled": False}, + } + + def _cleanup_db(self, camera_name: str) -> None: + """Defensively remove any database rows for the replay camera.""" + try: + Event.delete().where(Event.camera == camera_name).execute() + except Exception as e: + logger.error("Failed to delete replay events: %s", e) + + try: + Timeline.delete().where(Timeline.camera == camera_name).execute() + except Exception as e: + logger.error("Failed to delete replay timeline: %s", e) + + try: + Recordings.delete().where(Recordings.camera == camera_name).execute() + except Exception as e: + logger.error("Failed to delete replay recordings: %s", e) + + try: + ReviewSegment.delete().where(ReviewSegment.camera == camera_name).execute() + except Exception as e: + logger.error("Failed to delete replay review segments: %s", e) + + def _cleanup_files(self, camera_name: str) -> None: + """Remove filesystem artifacts for the replay camera.""" + dirs_to_clean = [ + os.path.join(RECORD_DIR, camera_name), + os.path.join(CLIPS_DIR, camera_name), + os.path.join(THUMB_DIR, camera_name), + ] + + for dir_path in dirs_to_clean: + if os.path.exists(dir_path): + try: + shutil.rmtree(dir_path) + logger.debug("Removed replay directory: %s", dir_path) + except Exception as e: + logger.error("Failed to remove %s: %s", dir_path, e) + + # Remove replay clip and any related files + if os.path.exists(REPLAY_DIR): + try: + shutil.rmtree(REPLAY_DIR) + logger.debug("Removed replay cache directory") + except Exception as e: + logger.error("Failed to remove replay cache: %s", e) + + +def cleanup_replay_cameras() -> None: + """Remove any stale replay camera artifacts on startup. + + Since replay cameras are memory-only and never written to YAML, they + won't appear in the config after a restart. This function cleans up + filesystem and database artifacts from any replay that was running when + the process stopped. + + Must be called AFTER the database is bound. + """ + stale_cameras: set[str] = set() + + # Scan filesystem for leftover replay artifacts to derive camera names + for dir_path in [RECORD_DIR, CLIPS_DIR, THUMB_DIR]: + if os.path.isdir(dir_path): + for entry in os.listdir(dir_path): + if entry.startswith(REPLAY_CAMERA_PREFIX): + stale_cameras.add(entry) + + if os.path.isdir(REPLAY_DIR): + for entry in os.listdir(REPLAY_DIR): + if entry.startswith(REPLAY_CAMERA_PREFIX) and entry.endswith(".mp4"): + stale_cameras.add(entry.removesuffix(".mp4")) + + if not stale_cameras: + return + + logger.info("Cleaning up stale replay camera artifacts: %s", list(stale_cameras)) + + manager = DebugReplayManager() + for camera_name in stale_cameras: + manager._cleanup_db(camera_name) + manager._cleanup_files(camera_name) + + if os.path.exists(REPLAY_DIR): + try: + shutil.rmtree(REPLAY_DIR) + except Exception as e: + logger.error("Failed to remove replay cache directory: %s", e) diff --git a/frigate/detectors/detection_runners.py b/frigate/detectors/detection_runners.py index 7565c9a3d..22b758f57 100644 --- a/frigate/detectors/detection_runners.py +++ b/frigate/detectors/detection_runners.py @@ -135,10 +135,8 @@ class ONNXModelRunner(BaseModelRunner): return model_type in [ EnrichmentModelTypeEnum.paddleocr.value, - EnrichmentModelTypeEnum.yolov9_license_plate.value, - EnrichmentModelTypeEnum.jina_v1.value, EnrichmentModelTypeEnum.jina_v2.value, - EnrichmentModelTypeEnum.facenet.value, + EnrichmentModelTypeEnum.arcface.value, ModelTypeEnum.rfdetr.value, ModelTypeEnum.dfine.value, ] diff --git a/frigate/detectors/detector_config.py b/frigate/detectors/detector_config.py index aa92f28f4..22623c7d7 100644 --- a/frigate/detectors/detector_config.py +++ b/frigate/detectors/detector_config.py @@ -45,30 +45,55 @@ class ModelTypeEnum(str, Enum): class ModelConfig(BaseModel): - path: Optional[str] = Field(None, title="Custom Object detection model path.") - labelmap_path: Optional[str] = Field( - None, title="Label map for custom object detector." + path: Optional[str] = Field( + None, + title="Custom Object detection model path", + description="Path to a custom detection model file (or plus:// for Frigate+ models).", + ) + labelmap_path: Optional[str] = Field( + None, + title="Label map for custom object detector", + description="Path to a labelmap file that maps numeric classes to string labels for the detector.", + ) + width: int = Field( + default=320, + title="Object detection model input width", + description="Width of the model input tensor in pixels.", + ) + height: int = Field( + default=320, + title="Object detection model input height", + description="Height of the model input tensor in pixels.", ) - width: int = Field(default=320, title="Object detection model input width.") - height: int = Field(default=320, title="Object detection model input height.") labelmap: Dict[int, str] = Field( - default_factory=dict, title="Labelmap customization." + default_factory=dict, + title="Labelmap customization", + description="Overrides or remapping entries to merge into the standard labelmap.", ) attributes_map: Dict[str, list[str]] = Field( default=DEFAULT_ATTRIBUTE_LABEL_MAP, - title="Map of object labels to their attribute labels.", + title="Map of object labels to their attribute labels", + description="Mapping from object labels to attribute labels used to attach metadata (for example 'car' -> ['license_plate']).", ) input_tensor: InputTensorEnum = Field( - default=InputTensorEnum.nhwc, title="Model Input Tensor Shape" + default=InputTensorEnum.nhwc, + title="Model Input Tensor Shape", + description="Tensor format expected by the model: 'nhwc' or 'nchw'.", ) input_pixel_format: PixelFormatEnum = Field( - default=PixelFormatEnum.rgb, title="Model Input Pixel Color Format" + default=PixelFormatEnum.rgb, + title="Model Input Pixel Color Format", + description="Pixel colorspace expected by the model: 'rgb', 'bgr', or 'yuv'.", ) input_dtype: InputDTypeEnum = Field( - default=InputDTypeEnum.int, title="Model Input D Type" + default=InputDTypeEnum.int, + title="Model Input D Type", + description="Data type of the model input tensor (for example 'float32').", ) model_type: ModelTypeEnum = Field( - default=ModelTypeEnum.ssd, title="Object Detection Model Type" + default=ModelTypeEnum.ssd, + title="Object Detection Model Type", + description="Detector model architecture type (ssd, yolox, yolonas) used by some detectors for optimization.", ) _merged_labelmap: Optional[Dict[int, str]] = PrivateAttr() _colormap: Dict[int, Tuple[int, int, int]] = PrivateAttr() @@ -210,12 +235,20 @@ class ModelConfig(BaseModel): class BaseDetectorConfig(BaseModel): # the type field must be defined in all subclasses - type: str = Field(default="cpu", title="Detector Type") + type: str = Field( + default="cpu", + title="Detector Type", + description="Type of detector to use for object detection (for example 'cpu', 'edgetpu', 'openvino').", + ) model: Optional[ModelConfig] = Field( - default=None, title="Detector specific model configuration." + default=None, + title="Detector specific model configuration", + description="Detector-specific model configuration options (path, input size, etc.).", ) model_path: Optional[str] = Field( - default=None, title="Detector specific model path." + default=None, + title="Detector specific model path", + description="File path to the detector model binary if required by the chosen detector.", ) model_config = ConfigDict( extra="allow", arbitrary_types_allowed=True, protected_namespaces=() diff --git a/frigate/detectors/detector_utils.py b/frigate/detectors/detector_utils.py index d732de871..d8930b2ae 100644 --- a/frigate/detectors/detector_utils.py +++ b/frigate/detectors/detector_utils.py @@ -6,7 +6,7 @@ import numpy as np try: from tflite_runtime.interpreter import Interpreter, load_delegate except ModuleNotFoundError: - from tensorflow.lite.python.interpreter import Interpreter, load_delegate + from ai_edge_litert.interpreter import Interpreter, load_delegate logger = logging.getLogger(__name__) diff --git a/frigate/detectors/plugins/cpu_tfl.py b/frigate/detectors/plugins/cpu_tfl.py index 00351f519..2224a2bda 100644 --- a/frigate/detectors/plugins/cpu_tfl.py +++ b/frigate/detectors/plugins/cpu_tfl.py @@ -1,6 +1,6 @@ import logging -from pydantic import Field +from pydantic import ConfigDict, Field from typing_extensions import Literal from frigate.detectors.detection_api import DetectionApi @@ -12,7 +12,7 @@ from ..detector_utils import tflite_detect_raw, tflite_init try: from tflite_runtime.interpreter import Interpreter except ModuleNotFoundError: - from tensorflow.lite.python.interpreter import Interpreter + from ai_edge_litert.interpreter import Interpreter logger = logging.getLogger(__name__) @@ -21,8 +21,18 @@ DETECTOR_KEY = "cpu" class CpuDetectorConfig(BaseDetectorConfig): + """CPU TFLite detector that runs TensorFlow Lite models on the host CPU without hardware acceleration. Not recommended.""" + + model_config = ConfigDict( + title="CPU", + ) + type: Literal[DETECTOR_KEY] - num_threads: int = Field(default=3, title="Number of detection threads") + num_threads: int = Field( + default=3, + title="Number of detection threads", + description="The number of threads used for CPU-based inference.", + ) class CpuTfl(DetectionApi): diff --git a/frigate/detectors/plugins/deepstack.py b/frigate/detectors/plugins/deepstack.py index e00a4e70d..9b5fcd5af 100644 --- a/frigate/detectors/plugins/deepstack.py +++ b/frigate/detectors/plugins/deepstack.py @@ -4,7 +4,7 @@ import logging import numpy as np import requests from PIL import Image -from pydantic import Field +from pydantic import ConfigDict, Field from typing_extensions import Literal from frigate.detectors.detection_api import DetectionApi @@ -16,12 +16,28 @@ DETECTOR_KEY = "deepstack" class DeepstackDetectorConfig(BaseDetectorConfig): + """DeepStack/CodeProject.AI detector that sends images to a remote DeepStack HTTP API for inference. Not recommended.""" + + model_config = ConfigDict( + title="DeepStack", + ) + type: Literal[DETECTOR_KEY] api_url: str = Field( - default="http://localhost:80/v1/vision/detection", title="DeepStack API URL" + default="http://localhost:80/v1/vision/detection", + title="DeepStack API URL", + description="The URL of the DeepStack API.", + ) + api_timeout: float = Field( + default=0.1, + title="DeepStack API timeout (in seconds)", + description="Maximum time allowed for a DeepStack API request.", + ) + api_key: str = Field( + default="", + title="DeepStack API key (if required)", + description="Optional API key for authenticated DeepStack services.", ) - api_timeout: float = Field(default=0.1, title="DeepStack API timeout (in seconds)") - api_key: str = Field(default="", title="DeepStack API key (if required)") class DeepStack(DetectionApi): diff --git a/frigate/detectors/plugins/degirum.py b/frigate/detectors/plugins/degirum.py index 28a13389f..5afb32a3a 100644 --- a/frigate/detectors/plugins/degirum.py +++ b/frigate/detectors/plugins/degirum.py @@ -2,7 +2,7 @@ import logging import queue import numpy as np -from pydantic import Field +from pydantic import ConfigDict, Field from typing_extensions import Literal from frigate.detectors.detection_api import DetectionApi @@ -14,10 +14,28 @@ DETECTOR_KEY = "degirum" ### DETECTOR CONFIG ### class DGDetectorConfig(BaseDetectorConfig): + """DeGirum detector for running models via DeGirum cloud or local inference services.""" + + model_config = ConfigDict( + title="DeGirum", + ) + type: Literal[DETECTOR_KEY] - location: str = Field(default=None, title="Inference Location") - zoo: str = Field(default=None, title="Model Zoo") - token: str = Field(default=None, title="DeGirum Cloud Token") + location: str = Field( + default=None, + title="Inference Location", + description="Location of the DeGirim inference engine (e.g. '@cloud', '127.0.0.1').", + ) + zoo: str = Field( + default=None, + title="Model Zoo", + description="Path or URL to the DeGirum model zoo.", + ) + token: str = Field( + default=None, + title="DeGirum Cloud Token", + description="Token for DeGirum Cloud access.", + ) ### ACTUAL DETECTOR ### diff --git a/frigate/detectors/plugins/edgetpu_tfl.py b/frigate/detectors/plugins/edgetpu_tfl.py index 2b94fde39..02bd9f5ec 100644 --- a/frigate/detectors/plugins/edgetpu_tfl.py +++ b/frigate/detectors/plugins/edgetpu_tfl.py @@ -4,7 +4,7 @@ import os import cv2 import numpy as np -from pydantic import Field +from pydantic import ConfigDict, Field from typing_extensions import Literal from frigate.detectors.detection_api import DetectionApi @@ -13,7 +13,7 @@ from frigate.detectors.detector_config import BaseDetectorConfig, ModelTypeEnum try: from tflite_runtime.interpreter import Interpreter, load_delegate except ModuleNotFoundError: - from tensorflow.lite.python.interpreter import Interpreter, load_delegate + from ai_edge_litert.interpreter import Interpreter, load_delegate logger = logging.getLogger(__name__) @@ -21,8 +21,18 @@ DETECTOR_KEY = "edgetpu" class EdgeTpuDetectorConfig(BaseDetectorConfig): + """EdgeTPU detector that runs TensorFlow Lite models compiled for Coral EdgeTPU using the EdgeTPU delegate.""" + + model_config = ConfigDict( + title="EdgeTPU", + ) + type: Literal[DETECTOR_KEY] - device: str = Field(default=None, title="Device Type") + device: str = Field( + default=None, + title="Device Type", + description="The device to use for EdgeTPU inference (e.g. 'usb', 'pci').", + ) class EdgeTpuTfl(DetectionApi): diff --git a/frigate/detectors/plugins/hailo8l.py b/frigate/detectors/plugins/hailo8l.py index cafc809c9..bbe84d52f 100755 --- a/frigate/detectors/plugins/hailo8l.py +++ b/frigate/detectors/plugins/hailo8l.py @@ -8,7 +8,7 @@ from typing import Dict, List, Optional, Tuple import cv2 import numpy as np -from pydantic import Field +from pydantic import ConfigDict, Field from typing_extensions import Literal from frigate.const import MODEL_CACHE_DIR @@ -410,5 +410,15 @@ class HailoDetector(DetectionApi): # ----------------- HailoDetectorConfig Class ----------------- # class HailoDetectorConfig(BaseDetectorConfig): + """Hailo-8/Hailo-8L detector using HEF models and the HailoRT SDK for inference on Hailo hardware.""" + + model_config = ConfigDict( + title="Hailo-8/Hailo-8L", + ) + type: Literal[DETECTOR_KEY] - device: str = Field(default="PCIe", title="Device Type") + device: str = Field( + default="PCIe", + title="Device Type", + description="The device to use for Hailo inference (e.g. 'PCIe', 'M.2').", + ) diff --git a/frigate/detectors/plugins/memryx.py b/frigate/detectors/plugins/memryx.py index a93888f8a..e0ad401cb 100644 --- a/frigate/detectors/plugins/memryx.py +++ b/frigate/detectors/plugins/memryx.py @@ -8,7 +8,7 @@ from queue import Queue import cv2 import numpy as np -from pydantic import BaseModel, Field +from pydantic import BaseModel, ConfigDict, Field from typing_extensions import Literal from frigate.detectors.detection_api import DetectionApi @@ -30,8 +30,18 @@ class ModelConfig(BaseModel): class MemryXDetectorConfig(BaseDetectorConfig): + """MemryX MX3 detector that runs compiled DFP models on MemryX accelerators.""" + + model_config = ConfigDict( + title="MemryX", + ) + type: Literal[DETECTOR_KEY] - device: str = Field(default="PCIe", title="Device Path") + device: str = Field( + default="PCIe", + title="Device Path", + description="The device to use for MemryX inference (e.g. 'PCIe').", + ) class MemryXDetector(DetectionApi): diff --git a/frigate/detectors/plugins/onnx.py b/frigate/detectors/plugins/onnx.py index 6c9e510ce..c52480642 100644 --- a/frigate/detectors/plugins/onnx.py +++ b/frigate/detectors/plugins/onnx.py @@ -1,7 +1,7 @@ import logging import numpy as np -from pydantic import Field +from pydantic import ConfigDict, Field from typing_extensions import Literal from frigate.detectors.detection_api import DetectionApi @@ -23,8 +23,18 @@ DETECTOR_KEY = "onnx" class ONNXDetectorConfig(BaseDetectorConfig): + """ONNX detector for running ONNX models; will use available acceleration backends (CUDA/ROCm/OpenVINO) when available.""" + + model_config = ConfigDict( + title="ONNX", + ) + type: Literal[DETECTOR_KEY] - device: str = Field(default="AUTO", title="Device Type") + device: str = Field( + default="AUTO", + title="Device Type", + description="The device to use for ONNX inference (e.g. 'AUTO', 'CPU', 'GPU').", + ) class ONNXDetector(DetectionApi): diff --git a/frigate/detectors/plugins/openvino.py b/frigate/detectors/plugins/openvino.py index bda5c8871..f73b7cb0c 100644 --- a/frigate/detectors/plugins/openvino.py +++ b/frigate/detectors/plugins/openvino.py @@ -2,7 +2,7 @@ import logging import numpy as np import openvino as ov -from pydantic import Field +from pydantic import ConfigDict, Field from typing_extensions import Literal from frigate.detectors.detection_api import DetectionApi @@ -20,8 +20,18 @@ DETECTOR_KEY = "openvino" class OvDetectorConfig(BaseDetectorConfig): + """OpenVINO detector for AMD and Intel CPUs, Intel GPUs and Intel VPU hardware.""" + + model_config = ConfigDict( + title="OpenVINO", + ) + type: Literal[DETECTOR_KEY] - device: str = Field(default=None, title="Device Type") + device: str = Field( + default=None, + title="Device Type", + description="The device to use for OpenVINO inference (e.g. 'CPU', 'GPU', 'NPU').", + ) class OvDetector(DetectionApi): diff --git a/frigate/detectors/plugins/rknn.py b/frigate/detectors/plugins/rknn.py index c16df507e..15ab93dcb 100644 --- a/frigate/detectors/plugins/rknn.py +++ b/frigate/detectors/plugins/rknn.py @@ -6,7 +6,7 @@ from typing import Literal import cv2 import numpy as np -from pydantic import Field +from pydantic import ConfigDict, Field from frigate.const import MODEL_CACHE_DIR, SUPPORTED_RK_SOCS from frigate.detectors.detection_api import DetectionApi @@ -29,8 +29,20 @@ model_cache_dir = os.path.join(MODEL_CACHE_DIR, "rknn_cache/") class RknnDetectorConfig(BaseDetectorConfig): + """RKNN detector for Rockchip NPUs; runs compiled RKNN models on Rockchip hardware.""" + + model_config = ConfigDict( + title="RKNN", + ) + type: Literal[DETECTOR_KEY] - num_cores: int = Field(default=0, ge=0, le=3, title="Number of NPU cores to use.") + num_cores: int = Field( + default=0, + ge=0, + le=3, + title="Number of NPU cores to use.", + description="The number of NPU cores to use (0 for auto).", + ) class Rknn(DetectionApi): diff --git a/frigate/detectors/plugins/synaptics.py b/frigate/detectors/plugins/synaptics.py index 6181b16d7..e6983a29c 100644 --- a/frigate/detectors/plugins/synaptics.py +++ b/frigate/detectors/plugins/synaptics.py @@ -2,6 +2,7 @@ import logging import os import numpy as np +from pydantic import ConfigDict from typing_extensions import Literal from frigate.detectors.detection_api import DetectionApi @@ -27,6 +28,12 @@ DETECTOR_KEY = "synaptics" class SynapDetectorConfig(BaseDetectorConfig): + """Synaptics NPU detector for models in .synap format using the Synap SDK on Synaptics hardware.""" + + model_config = ConfigDict( + title="Synaptics", + ) + type: Literal[DETECTOR_KEY] diff --git a/frigate/detectors/plugins/teflon_tfl.py b/frigate/detectors/plugins/teflon_tfl.py index 7e29d6630..370d08817 100644 --- a/frigate/detectors/plugins/teflon_tfl.py +++ b/frigate/detectors/plugins/teflon_tfl.py @@ -1,5 +1,6 @@ import logging +from pydantic import ConfigDict from typing_extensions import Literal from frigate.detectors.detection_api import DetectionApi @@ -18,6 +19,12 @@ DETECTOR_KEY = "teflon_tfl" class TeflonDetectorConfig(BaseDetectorConfig): + """Teflon delegate detector for TFLite using Mesa Teflon delegate library to accelerate inference on supported GPUs.""" + + model_config = ConfigDict( + title="Teflon", + ) + type: Literal[DETECTOR_KEY] diff --git a/frigate/detectors/plugins/tensorrt.py b/frigate/detectors/plugins/tensorrt.py index bf0eb6fa8..087331a2d 100644 --- a/frigate/detectors/plugins/tensorrt.py +++ b/frigate/detectors/plugins/tensorrt.py @@ -14,7 +14,7 @@ try: except ModuleNotFoundError: TRT_SUPPORT = False -from pydantic import Field +from pydantic import ConfigDict, Field from typing_extensions import Literal from frigate.detectors.detection_api import DetectionApi @@ -46,8 +46,16 @@ if TRT_SUPPORT: class TensorRTDetectorConfig(BaseDetectorConfig): + """TensorRT detector for Nvidia Jetson devices using serialized TensorRT engines for accelerated inference.""" + + model_config = ConfigDict( + title="TensorRT", + ) + type: Literal[DETECTOR_KEY] - device: int = Field(default=0, title="GPU Device Index") + device: int = Field( + default=0, title="GPU Device Index", description="The GPU device index to use." + ) class HostDeviceMem(object): diff --git a/frigate/detectors/plugins/zmq_ipc.py b/frigate/detectors/plugins/zmq_ipc.py index cd397aefa..b0e568eff 100644 --- a/frigate/detectors/plugins/zmq_ipc.py +++ b/frigate/detectors/plugins/zmq_ipc.py @@ -5,7 +5,7 @@ from typing import Any, List import numpy as np import zmq -from pydantic import Field +from pydantic import ConfigDict, Field from typing_extensions import Literal from frigate.detectors.detection_api import DetectionApi @@ -17,14 +17,28 @@ DETECTOR_KEY = "zmq" class ZmqDetectorConfig(BaseDetectorConfig): + """ZMQ IPC detector that offloads inference to an external process via a ZeroMQ IPC endpoint.""" + + model_config = ConfigDict( + title="ZMQ IPC", + ) + type: Literal[DETECTOR_KEY] endpoint: str = Field( - default="ipc:///tmp/cache/zmq_detector", title="ZMQ IPC endpoint" + default="ipc:///tmp/cache/zmq_detector", + title="ZMQ IPC endpoint", + description="The ZMQ endpoint to connect to.", ) request_timeout_ms: int = Field( - default=200, title="ZMQ request timeout in milliseconds" + default=200, + title="ZMQ request timeout in milliseconds", + description="Timeout for ZMQ requests in milliseconds.", + ) + linger_ms: int = Field( + default=0, + title="ZMQ socket linger in milliseconds", + description="Socket linger period in milliseconds.", ) - linger_ms: int = Field(default=0, title="ZMQ socket linger in milliseconds") class ZmqIpcDetector(DetectionApi): diff --git a/frigate/embeddings/maintainer.py b/frigate/embeddings/maintainer.py index bd707de15..8e45af498 100644 --- a/frigate/embeddings/maintainer.py +++ b/frigate/embeddings/maintainer.py @@ -59,7 +59,7 @@ from frigate.data_processing.real_time.license_plate import ( from frigate.data_processing.types import DataProcessorMetrics, PostProcessDataEnum from frigate.db.sqlitevecq import SqliteVecQueueDatabase from frigate.events.types import EventTypeEnum, RegenerateDescriptionEnum -from frigate.genai import get_genai_client +from frigate.genai import GenAIClientManager from frigate.models import Event, Recordings, ReviewSegment, Trigger from frigate.util.builtin import serialize from frigate.util.file import get_event_thumbnail_bytes @@ -144,7 +144,7 @@ class EmbeddingMaintainer(threading.Thread): self.frame_manager = SharedMemoryFrameManager() self.detected_license_plates: dict[str, dict[str, Any]] = {} - self.genai_client = get_genai_client(config) + self.genai_manager = GenAIClientManager(config) # model runners to share between realtime and post processors if self.config.lpr.enabled: @@ -203,12 +203,15 @@ class EmbeddingMaintainer(threading.Thread): # post processors self.post_processors: list[PostProcessorApi] = [] - if self.genai_client is not None and any( + if self.genai_manager.vision_client is not None and any( c.review.genai.enabled_in_config for c in self.config.cameras.values() ): self.post_processors.append( ReviewDescriptionProcessor( - self.config, self.requestor, self.metrics, self.genai_client + self.config, + self.requestor, + self.metrics, + self.genai_manager.vision_client, ) ) @@ -246,7 +249,7 @@ class EmbeddingMaintainer(threading.Thread): ) self.post_processors.append(semantic_trigger_processor) - if self.genai_client is not None and any( + if self.genai_manager.vision_client is not None and any( c.objects.genai.enabled_in_config for c in self.config.cameras.values() ): self.post_processors.append( @@ -255,7 +258,7 @@ class EmbeddingMaintainer(threading.Thread): self.embeddings, self.requestor, self.metrics, - self.genai_client, + self.genai_manager.vision_client, semantic_trigger_processor, ) ) @@ -418,7 +421,9 @@ class EmbeddingMaintainer(threading.Thread): if self.config.semantic_search.enabled: self.embeddings.update_stats() - camera_config = self.config.cameras[camera] + camera_config = self.config.cameras.get(camera) + if camera_config is None: + return # no need to process updated objects if no processors are active if len(self.realtime_processors) == 0 and len(self.post_processors) == 0: @@ -636,7 +641,10 @@ class EmbeddingMaintainer(threading.Thread): if not camera or camera not in self.config.cameras: return - camera_config = self.config.cameras[camera] + camera_config = self.config.cameras.get(camera) + if camera_config is None: + return + dedicated_lpr_enabled = ( camera_config.type == CameraTypeEnum.lpr and "license_plate" not in camera_config.objects.track diff --git a/frigate/embeddings/onnx/face_embedding.py b/frigate/embeddings/onnx/face_embedding.py index 04d756897..75dfedc94 100644 --- a/frigate/embeddings/onnx/face_embedding.py +++ b/frigate/embeddings/onnx/face_embedding.py @@ -17,7 +17,7 @@ from .base_embedding import BaseEmbedding try: from tflite_runtime.interpreter import Interpreter except ModuleNotFoundError: - from tensorflow.lite.python.interpreter import Interpreter + from ai_edge_litert.interpreter import Interpreter logger = logging.getLogger(__name__) diff --git a/frigate/events/audio.py b/frigate/events/audio.py index e88f2ae71..ad87d19c1 100644 --- a/frigate/events/audio.py +++ b/frigate/events/audio.py @@ -43,7 +43,7 @@ from frigate.video import start_or_restart_ffmpeg, stop_ffmpeg try: from tflite_runtime.interpreter import Interpreter except ModuleNotFoundError: - from tensorflow.lite.python.interpreter import Interpreter + from ai_edge_litert.interpreter import Interpreter logger = logging.getLogger(__name__) diff --git a/frigate/events/maintainer.py b/frigate/events/maintainer.py index f6ab777c1..77f6eee5f 100644 --- a/frigate/events/maintainer.py +++ b/frigate/events/maintainer.py @@ -7,6 +7,7 @@ from typing import Dict from frigate.comms.events_updater import EventEndPublisher, EventUpdateSubscriber from frigate.config import FrigateConfig from frigate.config.classification import ObjectClassificationType +from frigate.const import REPLAY_CAMERA_PREFIX from frigate.events.types import EventStateEnum, EventTypeEnum from frigate.models import Event from frigate.util.builtin import to_relative_box @@ -146,7 +147,9 @@ class EventProcessor(threading.Thread): if should_update_db(self.events_in_process[event_data["id"]], event_data): updated_db = True - camera_config = self.config.cameras[camera] + camera_config = self.config.cameras.get(camera) + if camera_config is None: + return width = camera_config.detect.width height = camera_config.detect.height first_detector = list(self.config.detectors.values())[0] @@ -283,6 +286,10 @@ class EventProcessor(threading.Thread): def handle_external_detection( self, event_type: EventStateEnum, event_data: Event ) -> None: + # Skip replay cameras + if event_data.get("camera", "").startswith(REPLAY_CAMERA_PREFIX): + return + if event_type == EventStateEnum.start: event = { Event.id: event_data["id"], diff --git a/frigate/genai/__init__.py b/frigate/genai/__init__.py index be1f6d1e7..f52a19e45 100644 --- a/frigate/genai/__init__.py +++ b/frigate/genai/__init__.py @@ -9,13 +9,24 @@ from typing import Any, Optional from playhouse.shortcuts import model_to_dict -from frigate.config import CameraConfig, FrigateConfig, GenAIConfig, GenAIProviderEnum +from frigate.config import CameraConfig, GenAIConfig, GenAIProviderEnum from frigate.const import CLIPS_DIR from frigate.data_processing.post.types import ReviewMetadata +from frigate.genai.manager import GenAIClientManager from frigate.models import Event logger = logging.getLogger(__name__) +__all__ = [ + "GenAIClient", + "GenAIClientManager", + "GenAIConfig", + "GenAIProviderEnum", + "PROVIDERS", + "load_providers", + "register_genai_provider", +] + PROVIDERS = {} @@ -69,7 +80,7 @@ class GenAIClient: return "\n- (No objects detected)" context_prompt = f""" -Your task is to analyze the sequence of images ({len(thumbnails)} total) taken in chronological order from the perspective of the {review_data["camera"]} security camera. +Your task is to analyze a sequence of images taken in chronological order from a security camera. ## Normal Activity Patterns for This Property @@ -108,7 +119,8 @@ Your response MUST be a flat JSON object with: ## Sequence Details -- Frame 1 = earliest, Frame {len(thumbnails)} = latest +- Camera: {review_data["camera"]} +- Total frames: {len(thumbnails)} (Frame 1 = earliest, Frame {len(thumbnails)} = latest) - Activity started at {review_data["start"]} and lasted {review_data["duration"]} seconds - Zones involved: {", ".join(review_data["zones"]) if review_data["zones"] else "None"} @@ -292,18 +304,63 @@ Guidelines: """Get the context window size for this provider in tokens.""" return 4096 + def chat_with_tools( + self, + messages: list[dict[str, Any]], + tools: Optional[list[dict[str, Any]]] = None, + tool_choice: Optional[str] = "auto", + ) -> dict[str, Any]: + """ + Send chat messages to LLM with optional tool definitions. -def get_genai_client(config: FrigateConfig) -> Optional[GenAIClient]: - """Get the GenAI client.""" - if not config.genai.provider: - return None + This method handles conversation-style interactions with the LLM, + including function calling/tool usage capabilities. - load_providers() - provider = PROVIDERS.get(config.genai.provider) - if provider: - return provider(config.genai) + Args: + messages: List of message dictionaries. Each message should have: + - 'role': str - One of 'user', 'assistant', 'system', or 'tool' + - 'content': str - The message content + - 'tool_call_id': Optional[str] - For tool responses, the ID of the tool call + - 'name': Optional[str] - For tool messages, the tool name + tools: Optional list of tool definitions in OpenAI-compatible format. + Each tool should have 'type': 'function' and 'function' with: + - 'name': str - Tool name + - 'description': str - Tool description + - 'parameters': dict - JSON schema for parameters + tool_choice: How the model should handle tools: + - 'auto': Model decides whether to call tools + - 'none': Model must not call tools + - 'required': Model must call at least one tool + - Or a dict specifying a specific tool to call + **kwargs: Additional provider-specific parameters. - return None + Returns: + Dictionary with: + - 'content': Optional[str] - The text response from the LLM, None if tool calls + - 'tool_calls': Optional[List[Dict]] - List of tool calls if LLM wants to call tools. + Each tool call dict has: + - 'id': str - Unique identifier for this tool call + - 'name': str - Tool name to call + - 'arguments': dict - Arguments for the tool call (parsed JSON) + - 'finish_reason': str - Reason generation stopped: + - 'stop': Normal completion + - 'tool_calls': LLM wants to call tools + - 'length': Hit token limit + - 'error': An error occurred + + Raises: + NotImplementedError: If the provider doesn't implement this method. + """ + # Base implementation - each provider should override this + logger.warning( + f"{self.__class__.__name__} does not support chat_with_tools. " + "This method should be overridden by the provider implementation." + ) + return { + "content": None, + "tool_calls": None, + "finish_reason": "error", + } def load_providers(): diff --git a/frigate/genai/azure-openai.py b/frigate/genai/azure-openai.py index eb08f7786..9122ca14e 100644 --- a/frigate/genai/azure-openai.py +++ b/frigate/genai/azure-openai.py @@ -1,8 +1,9 @@ """Azure OpenAI Provider for Frigate AI.""" import base64 +import json import logging -from typing import Optional +from typing import Any, Optional from urllib.parse import parse_qs, urlparse from openai import AzureOpenAI @@ -76,3 +77,213 @@ class OpenAIClient(GenAIClient): def get_context_size(self) -> int: """Get the context window size for Azure OpenAI.""" return 128000 + + def chat_with_tools( + self, + messages: list[dict[str, Any]], + tools: Optional[list[dict[str, Any]]] = None, + tool_choice: Optional[str] = "auto", + ) -> dict[str, Any]: + try: + openai_tool_choice = None + if tool_choice: + if tool_choice == "none": + openai_tool_choice = "none" + elif tool_choice == "auto": + openai_tool_choice = "auto" + elif tool_choice == "required": + openai_tool_choice = "required" + + request_params = { + "model": self.genai_config.model, + "messages": messages, + "timeout": self.timeout, + } + + if tools: + request_params["tools"] = tools + if openai_tool_choice is not None: + request_params["tool_choice"] = openai_tool_choice + + result = self.provider.chat.completions.create(**request_params) + + if ( + result is None + or not hasattr(result, "choices") + or len(result.choices) == 0 + ): + return { + "content": None, + "tool_calls": None, + "finish_reason": "error", + } + + choice = result.choices[0] + message = choice.message + + content = message.content.strip() if message.content else None + + tool_calls = None + if message.tool_calls: + tool_calls = [] + for tool_call in message.tool_calls: + try: + arguments = json.loads(tool_call.function.arguments) + except (json.JSONDecodeError, AttributeError) as e: + logger.warning( + f"Failed to parse tool call arguments: {e}, " + f"tool: {tool_call.function.name if hasattr(tool_call.function, 'name') else 'unknown'}" + ) + arguments = {} + + tool_calls.append( + { + "id": tool_call.id if hasattr(tool_call, "id") else "", + "name": tool_call.function.name + if hasattr(tool_call.function, "name") + else "", + "arguments": arguments, + } + ) + + finish_reason = "error" + if hasattr(choice, "finish_reason") and choice.finish_reason: + finish_reason = choice.finish_reason + elif tool_calls: + finish_reason = "tool_calls" + elif content: + finish_reason = "stop" + + return { + "content": content, + "tool_calls": tool_calls, + "finish_reason": finish_reason, + } + + except Exception as e: + logger.warning("Azure OpenAI returned an error: %s", str(e)) + return { + "content": None, + "tool_calls": None, + "finish_reason": "error", + } + + async def chat_with_tools_stream( + self, + messages: list[dict[str, Any]], + tools: Optional[list[dict[str, Any]]] = None, + tool_choice: Optional[str] = "auto", + ): + """ + Stream chat with tools; yields content deltas then final message. + + Implements streaming function calling/tool usage for Azure OpenAI models. + """ + try: + openai_tool_choice = None + if tool_choice: + if tool_choice == "none": + openai_tool_choice = "none" + elif tool_choice == "auto": + openai_tool_choice = "auto" + elif tool_choice == "required": + openai_tool_choice = "required" + + request_params = { + "model": self.genai_config.model, + "messages": messages, + "timeout": self.timeout, + "stream": True, + } + + if tools: + request_params["tools"] = tools + if openai_tool_choice is not None: + request_params["tool_choice"] = openai_tool_choice + + # Use streaming API + content_parts: list[str] = [] + tool_calls_by_index: dict[int, dict[str, Any]] = {} + finish_reason = "stop" + + stream = self.provider.chat.completions.create(**request_params) + + for chunk in stream: + if not chunk or not chunk.choices: + continue + + choice = chunk.choices[0] + delta = choice.delta + + # Check for finish reason + if choice.finish_reason: + finish_reason = choice.finish_reason + + # Extract content deltas + if delta.content: + content_parts.append(delta.content) + yield ("content_delta", delta.content) + + # Extract tool calls + if delta.tool_calls: + for tc in delta.tool_calls: + idx = tc.index + fn = tc.function + + if idx not in tool_calls_by_index: + tool_calls_by_index[idx] = { + "id": tc.id or "", + "name": fn.name if fn and fn.name else "", + "arguments": "", + } + + t = tool_calls_by_index[idx] + if tc.id: + t["id"] = tc.id + if fn and fn.name: + t["name"] = fn.name + if fn and fn.arguments: + t["arguments"] += fn.arguments + + # Build final message + full_content = "".join(content_parts).strip() or None + + # Convert tool calls to list format + tool_calls_list = None + if tool_calls_by_index: + tool_calls_list = [] + for tc in tool_calls_by_index.values(): + try: + # Parse accumulated arguments as JSON + parsed_args = json.loads(tc["arguments"]) + except (json.JSONDecodeError, Exception): + parsed_args = tc["arguments"] + + tool_calls_list.append( + { + "id": tc["id"], + "name": tc["name"], + "arguments": parsed_args, + } + ) + finish_reason = "tool_calls" + + yield ( + "message", + { + "content": full_content, + "tool_calls": tool_calls_list, + "finish_reason": finish_reason, + }, + ) + + except Exception as e: + logger.warning("Azure OpenAI streaming returned an error: %s", str(e)) + yield ( + "message", + { + "content": None, + "tool_calls": None, + "finish_reason": "error", + }, + ) diff --git a/frigate/genai/gemini.py b/frigate/genai/gemini.py index b700c33a4..418d633b2 100644 --- a/frigate/genai/gemini.py +++ b/frigate/genai/gemini.py @@ -1,7 +1,8 @@ """Gemini Provider for Frigate AI.""" +import json import logging -from typing import Optional +from typing import Any, Optional from google import genai from google.genai import errors, types @@ -76,3 +77,436 @@ class GeminiClient(GenAIClient): """Get the context window size for Gemini.""" # Gemini Pro Vision has a 1M token context window return 1000000 + + def chat_with_tools( + self, + messages: list[dict[str, Any]], + tools: Optional[list[dict[str, Any]]] = None, + tool_choice: Optional[str] = "auto", + ) -> dict[str, Any]: + """ + Send chat messages to Gemini with optional tool definitions. + + Implements function calling/tool usage for Gemini models. + """ + try: + # Convert messages to Gemini format + gemini_messages = [] + for msg in messages: + role = msg.get("role", "user") + content = msg.get("content", "") + + # Map roles to Gemini format + if role == "system": + # Gemini doesn't have system role, prepend to first user message + if gemini_messages and gemini_messages[0].role == "user": + gemini_messages[0].parts[ + 0 + ].text = f"{content}\n\n{gemini_messages[0].parts[0].text}" + else: + gemini_messages.append( + types.Content( + role="user", parts=[types.Part.from_text(text=content)] + ) + ) + elif role == "assistant": + gemini_messages.append( + types.Content( + role="model", parts=[types.Part.from_text(text=content)] + ) + ) + elif role == "tool": + # Handle tool response + function_response = { + "name": msg.get("name", ""), + "response": content, + } + gemini_messages.append( + types.Content( + role="function", + parts=[ + types.Part.from_function_response(function_response) + ], + ) + ) + else: # user + gemini_messages.append( + types.Content( + role="user", parts=[types.Part.from_text(text=content)] + ) + ) + + # Convert tools to Gemini format + gemini_tools = None + if tools: + gemini_tools = [] + for tool in tools: + if tool.get("type") == "function": + func = tool.get("function", {}) + gemini_tools.append( + types.Tool( + function_declarations=[ + types.FunctionDeclaration( + name=func.get("name", ""), + description=func.get("description", ""), + parameters=func.get("parameters", {}), + ) + ] + ) + ) + + # Configure tool choice + tool_config = None + if tool_choice: + if tool_choice == "none": + tool_config = types.ToolConfig( + function_calling_config=types.FunctionCallingConfig(mode="NONE") + ) + elif tool_choice == "auto": + tool_config = types.ToolConfig( + function_calling_config=types.FunctionCallingConfig(mode="AUTO") + ) + elif tool_choice == "required": + tool_config = types.ToolConfig( + function_calling_config=types.FunctionCallingConfig(mode="ANY") + ) + + # Build request config + config_params = {"candidate_count": 1} + + if gemini_tools: + config_params["tools"] = gemini_tools + + if tool_config: + config_params["tool_config"] = tool_config + + # Merge runtime_options + if isinstance(self.genai_config.runtime_options, dict): + config_params.update(self.genai_config.runtime_options) + + response = self.provider.models.generate_content( + model=self.genai_config.model, + contents=gemini_messages, + config=types.GenerateContentConfig(**config_params), + ) + + # Check if response is valid + if not response or not response.candidates: + return { + "content": None, + "tool_calls": None, + "finish_reason": "error", + } + + candidate = response.candidates[0] + content = None + tool_calls = None + + # Extract content and tool calls from response + if candidate.content and candidate.content.parts: + for part in candidate.content.parts: + if part.text: + content = part.text.strip() + elif part.function_call: + # Handle function call + if tool_calls is None: + tool_calls = [] + + try: + arguments = ( + dict(part.function_call.args) + if part.function_call.args + else {} + ) + except Exception: + arguments = {} + + tool_calls.append( + { + "id": part.function_call.name or "", + "name": part.function_call.name or "", + "arguments": arguments, + } + ) + + # Determine finish reason + finish_reason = "error" + if hasattr(candidate, "finish_reason") and candidate.finish_reason: + from google.genai.types import FinishReason + + if candidate.finish_reason == FinishReason.STOP: + finish_reason = "stop" + elif candidate.finish_reason == FinishReason.MAX_TOKENS: + finish_reason = "length" + elif candidate.finish_reason in [ + FinishReason.SAFETY, + FinishReason.RECITATION, + ]: + finish_reason = "error" + elif tool_calls: + finish_reason = "tool_calls" + elif content: + finish_reason = "stop" + elif tool_calls: + finish_reason = "tool_calls" + elif content: + finish_reason = "stop" + + return { + "content": content, + "tool_calls": tool_calls, + "finish_reason": finish_reason, + } + + except errors.APIError as e: + logger.warning("Gemini API error during chat_with_tools: %s", str(e)) + return { + "content": None, + "tool_calls": None, + "finish_reason": "error", + } + except Exception as e: + logger.warning( + "Gemini returned an error during chat_with_tools: %s", str(e) + ) + return { + "content": None, + "tool_calls": None, + "finish_reason": "error", + } + + async def chat_with_tools_stream( + self, + messages: list[dict[str, Any]], + tools: Optional[list[dict[str, Any]]] = None, + tool_choice: Optional[str] = "auto", + ): + """ + Stream chat with tools; yields content deltas then final message. + + Implements streaming function calling/tool usage for Gemini models. + """ + try: + # Convert messages to Gemini format + gemini_messages = [] + for msg in messages: + role = msg.get("role", "user") + content = msg.get("content", "") + + # Map roles to Gemini format + if role == "system": + # Gemini doesn't have system role, prepend to first user message + if gemini_messages and gemini_messages[0].role == "user": + gemini_messages[0].parts[ + 0 + ].text = f"{content}\n\n{gemini_messages[0].parts[0].text}" + else: + gemini_messages.append( + types.Content( + role="user", parts=[types.Part.from_text(text=content)] + ) + ) + elif role == "assistant": + gemini_messages.append( + types.Content( + role="model", parts=[types.Part.from_text(text=content)] + ) + ) + elif role == "tool": + # Handle tool response + function_response = { + "name": msg.get("name", ""), + "response": content, + } + gemini_messages.append( + types.Content( + role="function", + parts=[ + types.Part.from_function_response(function_response) + ], + ) + ) + else: # user + gemini_messages.append( + types.Content( + role="user", parts=[types.Part.from_text(text=content)] + ) + ) + + # Convert tools to Gemini format + gemini_tools = None + if tools: + gemini_tools = [] + for tool in tools: + if tool.get("type") == "function": + func = tool.get("function", {}) + gemini_tools.append( + types.Tool( + function_declarations=[ + types.FunctionDeclaration( + name=func.get("name", ""), + description=func.get("description", ""), + parameters=func.get("parameters", {}), + ) + ] + ) + ) + + # Configure tool choice + tool_config = None + if tool_choice: + if tool_choice == "none": + tool_config = types.ToolConfig( + function_calling_config=types.FunctionCallingConfig(mode="NONE") + ) + elif tool_choice == "auto": + tool_config = types.ToolConfig( + function_calling_config=types.FunctionCallingConfig(mode="AUTO") + ) + elif tool_choice == "required": + tool_config = types.ToolConfig( + function_calling_config=types.FunctionCallingConfig(mode="ANY") + ) + + # Build request config + config_params = {"candidate_count": 1} + + if gemini_tools: + config_params["tools"] = gemini_tools + + if tool_config: + config_params["tool_config"] = tool_config + + # Merge runtime_options + if isinstance(self.genai_config.runtime_options, dict): + config_params.update(self.genai_config.runtime_options) + + # Use streaming API + content_parts: list[str] = [] + tool_calls_by_index: dict[int, dict[str, Any]] = {} + finish_reason = "stop" + + response = self.provider.models.generate_content_stream( + model=self.genai_config.model, + contents=gemini_messages, + config=types.GenerateContentConfig(**config_params), + ) + + async for chunk in response: + if not chunk or not chunk.candidates: + continue + + candidate = chunk.candidates[0] + + # Check for finish reason + if hasattr(candidate, "finish_reason") and candidate.finish_reason: + from google.genai.types import FinishReason + + if candidate.finish_reason == FinishReason.STOP: + finish_reason = "stop" + elif candidate.finish_reason == FinishReason.MAX_TOKENS: + finish_reason = "length" + elif candidate.finish_reason in [ + FinishReason.SAFETY, + FinishReason.RECITATION, + ]: + finish_reason = "error" + + # Extract content and tool calls from chunk + if candidate.content and candidate.content.parts: + for part in candidate.content.parts: + if part.text: + content_parts.append(part.text) + yield ("content_delta", part.text) + elif part.function_call: + # Handle function call + try: + arguments = ( + dict(part.function_call.args) + if part.function_call.args + else {} + ) + except Exception: + arguments = {} + + # Store tool call + tool_call_id = part.function_call.name or "" + tool_call_name = part.function_call.name or "" + + # Check if we already have this tool call + found_index = None + for idx, tc in tool_calls_by_index.items(): + if tc["name"] == tool_call_name: + found_index = idx + break + + if found_index is None: + found_index = len(tool_calls_by_index) + tool_calls_by_index[found_index] = { + "id": tool_call_id, + "name": tool_call_name, + "arguments": "", + } + + # Accumulate arguments + if arguments: + tool_calls_by_index[found_index]["arguments"] += ( + json.dumps(arguments) + if isinstance(arguments, dict) + else str(arguments) + ) + + # Build final message + full_content = "".join(content_parts).strip() or None + + # Convert tool calls to list format + tool_calls_list = None + if tool_calls_by_index: + tool_calls_list = [] + for tc in tool_calls_by_index.values(): + try: + # Try to parse accumulated arguments as JSON + parsed_args = json.loads(tc["arguments"]) + except (json.JSONDecodeError, Exception): + parsed_args = tc["arguments"] + + tool_calls_list.append( + { + "id": tc["id"], + "name": tc["name"], + "arguments": parsed_args, + } + ) + finish_reason = "tool_calls" + + yield ( + "message", + { + "content": full_content, + "tool_calls": tool_calls_list, + "finish_reason": finish_reason, + }, + ) + + except errors.APIError as e: + logger.warning("Gemini API error during streaming: %s", str(e)) + yield ( + "message", + { + "content": None, + "tool_calls": None, + "finish_reason": "error", + }, + ) + except Exception as e: + logger.warning( + "Gemini returned an error during chat_with_tools_stream: %s", str(e) + ) + yield ( + "message", + { + "content": None, + "tool_calls": None, + "finish_reason": "error", + }, + ) diff --git a/frigate/genai/llama_cpp.py b/frigate/genai/llama_cpp.py new file mode 100644 index 000000000..24dcea2fb --- /dev/null +++ b/frigate/genai/llama_cpp.py @@ -0,0 +1,350 @@ +"""llama.cpp Provider for Frigate AI.""" + +import base64 +import json +import logging +from typing import Any, Optional + +import httpx +import requests + +from frigate.config import GenAIProviderEnum +from frigate.genai import GenAIClient, register_genai_provider +from frigate.genai.utils import parse_tool_calls_from_message + +logger = logging.getLogger(__name__) + + +@register_genai_provider(GenAIProviderEnum.llamacpp) +class LlamaCppClient(GenAIClient): + """Generative AI client for Frigate using llama.cpp server.""" + + LOCAL_OPTIMIZED_OPTIONS = { + "temperature": 0.7, + "repeat_penalty": 1.05, + "top_p": 0.8, + } + + provider: str # base_url + provider_options: dict[str, Any] + + def _init_provider(self): + """Initialize the client.""" + self.provider_options = { + **self.LOCAL_OPTIMIZED_OPTIONS, + **self.genai_config.provider_options, + } + return ( + self.genai_config.base_url.rstrip("/") + if self.genai_config.base_url + else None + ) + + def _send(self, prompt: str, images: list[bytes]) -> Optional[str]: + """Submit a request to llama.cpp server.""" + if self.provider is None: + logger.warning( + "llama.cpp provider has not been initialized, a description will not be generated. Check your llama.cpp configuration." + ) + return None + + try: + content = [] + for image in images: + encoded_image = base64.b64encode(image).decode("utf-8") + content.append( + { + "type": "image_url", + "image_url": { + "url": f"data:image/jpeg;base64,{encoded_image}", + }, + } + ) + content.append( + { + "type": "text", + "text": prompt, + } + ) + + # Build request payload with llama.cpp native options + payload = { + "model": self.genai_config.model, + "messages": [ + { + "role": "user", + "content": content, + }, + ], + **self.provider_options, + } + + response = requests.post( + f"{self.provider}/v1/chat/completions", + json=payload, + timeout=self.timeout, + ) + response.raise_for_status() + result = response.json() + + if ( + result is not None + and "choices" in result + and len(result["choices"]) > 0 + ): + choice = result["choices"][0] + if "message" in choice and "content" in choice["message"]: + return choice["message"]["content"].strip() + return None + except Exception as e: + logger.warning("llama.cpp returned an error: %s", str(e)) + return None + + def get_context_size(self) -> int: + """Get the context window size for llama.cpp.""" + return int(self.provider_options.get("context_size", 4096)) + + def _build_payload( + self, + messages: list[dict[str, Any]], + tools: Optional[list[dict[str, Any]]], + tool_choice: Optional[str], + stream: bool = False, + ) -> dict[str, Any]: + """Build request payload for chat completions (sync or stream).""" + openai_tool_choice = None + if tool_choice: + if tool_choice == "none": + openai_tool_choice = "none" + elif tool_choice == "auto": + openai_tool_choice = "auto" + elif tool_choice == "required": + openai_tool_choice = "required" + + payload: dict[str, Any] = { + "messages": messages, + "model": self.genai_config.model, + } + if stream: + payload["stream"] = True + if tools: + payload["tools"] = tools + if openai_tool_choice is not None: + payload["tool_choice"] = openai_tool_choice + provider_opts = { + k: v for k, v in self.provider_options.items() if k != "context_size" + } + payload.update(provider_opts) + return payload + + def _message_from_choice(self, choice: dict[str, Any]) -> dict[str, Any]: + """Parse OpenAI-style choice into {content, tool_calls, finish_reason}.""" + message = choice.get("message", {}) + content = message.get("content") + content = content.strip() if content else None + tool_calls = parse_tool_calls_from_message(message) + finish_reason = choice.get("finish_reason") or ( + "tool_calls" if tool_calls else "stop" if content else "error" + ) + return { + "content": content, + "tool_calls": tool_calls, + "finish_reason": finish_reason, + } + + @staticmethod + def _streamed_tool_calls_to_list( + tool_calls_by_index: dict[int, dict[str, Any]], + ) -> Optional[list[dict[str, Any]]]: + """Convert streamed tool_calls index map to list of {id, name, arguments}.""" + if not tool_calls_by_index: + return None + result = [] + for idx in sorted(tool_calls_by_index.keys()): + t = tool_calls_by_index[idx] + args_str = t.get("arguments") or "{}" + try: + arguments = json.loads(args_str) + except json.JSONDecodeError: + arguments = {} + result.append( + { + "id": t.get("id", ""), + "name": t.get("name", ""), + "arguments": arguments, + } + ) + return result if result else None + + def chat_with_tools( + self, + messages: list[dict[str, Any]], + tools: Optional[list[dict[str, Any]]] = None, + tool_choice: Optional[str] = "auto", + ) -> dict[str, Any]: + """ + Send chat messages to llama.cpp server with optional tool definitions. + + Uses the OpenAI-compatible endpoint but passes through all native llama.cpp + parameters (like slot_id, temperature, etc.) via provider_options. + """ + if self.provider is None: + logger.warning( + "llama.cpp provider has not been initialized. Check your llama.cpp configuration." + ) + return { + "content": None, + "tool_calls": None, + "finish_reason": "error", + } + try: + payload = self._build_payload(messages, tools, tool_choice, stream=False) + response = requests.post( + f"{self.provider}/v1/chat/completions", + json=payload, + timeout=self.timeout, + ) + response.raise_for_status() + result = response.json() + if result is None or "choices" not in result or len(result["choices"]) == 0: + return { + "content": None, + "tool_calls": None, + "finish_reason": "error", + } + return self._message_from_choice(result["choices"][0]) + except requests.exceptions.Timeout as e: + logger.warning("llama.cpp request timed out: %s", str(e)) + return { + "content": None, + "tool_calls": None, + "finish_reason": "error", + } + except requests.exceptions.RequestException as e: + error_detail = str(e) + if hasattr(e, "response") and e.response is not None: + try: + error_detail = f"{str(e)} - Response: {e.response.text[:500]}" + except Exception: + pass + logger.warning("llama.cpp returned an error: %s", error_detail) + return { + "content": None, + "tool_calls": None, + "finish_reason": "error", + } + except Exception as e: + logger.warning("Unexpected error in llama.cpp chat_with_tools: %s", str(e)) + return { + "content": None, + "tool_calls": None, + "finish_reason": "error", + } + + async def chat_with_tools_stream( + self, + messages: list[dict[str, Any]], + tools: Optional[list[dict[str, Any]]] = None, + tool_choice: Optional[str] = "auto", + ): + """Stream chat with tools via OpenAI-compatible streaming API.""" + if self.provider is None: + logger.warning( + "llama.cpp provider has not been initialized. Check your llama.cpp configuration." + ) + yield ( + "message", + { + "content": None, + "tool_calls": None, + "finish_reason": "error", + }, + ) + return + try: + payload = self._build_payload(messages, tools, tool_choice, stream=True) + content_parts: list[str] = [] + tool_calls_by_index: dict[int, dict[str, Any]] = {} + finish_reason = "stop" + + async with httpx.AsyncClient(timeout=float(self.timeout)) as client: + async with client.stream( + "POST", + f"{self.provider}/v1/chat/completions", + json=payload, + ) as response: + response.raise_for_status() + async for line in response.aiter_lines(): + if not line.startswith("data: "): + continue + data_str = line[6:].strip() + if data_str == "[DONE]": + break + try: + data = json.loads(data_str) + except json.JSONDecodeError: + continue + choices = data.get("choices") or [] + if not choices: + continue + delta = choices[0].get("delta", {}) + if choices[0].get("finish_reason"): + finish_reason = choices[0]["finish_reason"] + if delta.get("content"): + content_parts.append(delta["content"]) + yield ("content_delta", delta["content"]) + for tc in delta.get("tool_calls") or []: + idx = tc.get("index", 0) + fn = tc.get("function") or {} + if idx not in tool_calls_by_index: + tool_calls_by_index[idx] = { + "id": tc.get("id", ""), + "name": tc.get("name") or fn.get("name", ""), + "arguments": "", + } + t = tool_calls_by_index[idx] + if tc.get("id"): + t["id"] = tc["id"] + name = tc.get("name") or fn.get("name") + if name: + t["name"] = name + arg = tc.get("arguments") or fn.get("arguments") + if arg is not None: + t["arguments"] += ( + arg if isinstance(arg, str) else json.dumps(arg) + ) + + full_content = "".join(content_parts).strip() or None + tool_calls_list = self._streamed_tool_calls_to_list(tool_calls_by_index) + if tool_calls_list: + finish_reason = "tool_calls" + yield ( + "message", + { + "content": full_content, + "tool_calls": tool_calls_list, + "finish_reason": finish_reason, + }, + ) + except httpx.HTTPStatusError as e: + logger.warning("llama.cpp streaming HTTP error: %s", e) + yield ( + "message", + { + "content": None, + "tool_calls": None, + "finish_reason": "error", + }, + ) + except Exception as e: + logger.warning( + "Unexpected error in llama.cpp chat_with_tools_stream: %s", str(e) + ) + yield ( + "message", + { + "content": None, + "tool_calls": None, + "finish_reason": "error", + }, + ) diff --git a/frigate/genai/manager.py b/frigate/genai/manager.py new file mode 100644 index 000000000..01daa35e0 --- /dev/null +++ b/frigate/genai/manager.py @@ -0,0 +1,88 @@ +"""GenAI client manager for Frigate. + +Manages GenAI provider clients from Frigate config. Configuration is read only +in _update_config(); no other code should read config.genai. Exposes clients +by role: tool_client, vision_client, embeddings_client. +""" + +import logging +from typing import TYPE_CHECKING, Optional + +from frigate.config import FrigateConfig +from frigate.config.camera.genai import GenAIRoleEnum + +if TYPE_CHECKING: + from frigate.genai import GenAIClient + +logger = logging.getLogger(__name__) + + +class GenAIClientManager: + """Manages GenAI provider clients from Frigate config.""" + + def __init__(self, config: FrigateConfig) -> None: + self._tool_client: Optional[GenAIClient] = None + self._vision_client: Optional[GenAIClient] = None + self._embeddings_client: Optional[GenAIClient] = None + self.update_config(config) + + def update_config(self, config: FrigateConfig) -> None: + """Build role clients from current Frigate config.genai. + + Called from __init__ and can be called again when config is reloaded. + Each role (tools, vision, embeddings) gets the client for the provider + that has that role in its roles list. + """ + from frigate.genai import PROVIDERS, load_providers + + self._tool_client = None + self._vision_client = None + self._embeddings_client = None + + if not config.genai: + return + + load_providers() + + for _name, genai_cfg in config.genai.items(): + if not genai_cfg.provider: + continue + provider_cls = PROVIDERS.get(genai_cfg.provider) + if not provider_cls: + logger.warning( + "Unknown GenAI provider %s in config, skipping.", + genai_cfg.provider, + ) + continue + try: + client = provider_cls(genai_cfg) + except Exception as e: + logger.exception( + "Failed to create GenAI client for provider %s: %s", + genai_cfg.provider, + e, + ) + continue + + for role in genai_cfg.roles: + if role == GenAIRoleEnum.tools: + self._tool_client = client + elif role == GenAIRoleEnum.vision: + self._vision_client = client + elif role == GenAIRoleEnum.embeddings: + self._embeddings_client = client + + @property + def tool_client(self) -> "Optional[GenAIClient]": + """Client configured for the tools role (e.g. chat with function calling).""" + return self._tool_client + + @property + def vision_client(self) -> "Optional[GenAIClient]": + """Client configured for the vision role (e.g. review descriptions, object descriptions).""" + return self._vision_client + + @property + def embeddings_client(self) -> "Optional[GenAIClient]": + """Client configured for the embeddings role.""" + return self._embeddings_client diff --git a/frigate/genai/ollama.py b/frigate/genai/ollama.py index ab6d3c0b3..e98f6ab07 100644 --- a/frigate/genai/ollama.py +++ b/frigate/genai/ollama.py @@ -1,14 +1,17 @@ """Ollama Provider for Frigate AI.""" +import json import logging from typing import Any, Optional from httpx import RemoteProtocolError, TimeoutException +from ollama import AsyncClient as OllamaAsyncClient from ollama import Client as ApiClient from ollama import ResponseError from frigate.config import GenAIProviderEnum from frigate.genai import GenAIClient, register_genai_provider +from frigate.genai.utils import parse_tool_calls_from_message logger = logging.getLogger(__name__) @@ -83,6 +86,235 @@ class OllamaClient(GenAIClient): def get_context_size(self) -> int: """Get the context window size for Ollama.""" - return self.genai_config.provider_options.get("options", {}).get( - "num_ctx", 4096 + return int( + self.genai_config.provider_options.get("options", {}).get("num_ctx", 4096) ) + + def _build_request_params( + self, + messages: list[dict[str, Any]], + tools: Optional[list[dict[str, Any]]], + tool_choice: Optional[str], + stream: bool = False, + ) -> dict[str, Any]: + """Build request_messages and params for chat (sync or stream).""" + request_messages = [] + for msg in messages: + msg_dict = { + "role": msg.get("role"), + "content": msg.get("content", ""), + } + if msg.get("tool_call_id"): + msg_dict["tool_call_id"] = msg["tool_call_id"] + if msg.get("name"): + msg_dict["name"] = msg["name"] + if msg.get("tool_calls"): + # Ollama requires tool call arguments as dicts, but the + # conversation format (OpenAI-style) stores them as JSON + # strings. Convert back to dicts for Ollama. + ollama_tool_calls = [] + for tc in msg["tool_calls"]: + func = tc.get("function") or {} + args = func.get("arguments") or {} + if isinstance(args, str): + try: + args = json.loads(args) + except (json.JSONDecodeError, TypeError): + args = {} + ollama_tool_calls.append( + {"function": {"name": func.get("name", ""), "arguments": args}} + ) + msg_dict["tool_calls"] = ollama_tool_calls + request_messages.append(msg_dict) + + request_params: dict[str, Any] = { + "model": self.genai_config.model, + "messages": request_messages, + **self.provider_options, + } + if stream: + request_params["stream"] = True + if tools: + request_params["tools"] = tools + return request_params + + def _message_from_response(self, response: dict[str, Any]) -> dict[str, Any]: + """Parse Ollama chat response into {content, tool_calls, finish_reason}.""" + if not response or "message" not in response: + logger.debug("Ollama response empty or missing 'message' key") + return { + "content": None, + "tool_calls": None, + "finish_reason": "error", + } + message = response["message"] + logger.debug( + "Ollama response message keys: %s, content_len=%s, thinking_len=%s, " + "tool_calls=%s, done=%s", + list(message.keys()) if hasattr(message, "keys") else "N/A", + len(message.get("content", "") or "") if message.get("content") else 0, + len(message.get("thinking", "") or "") if message.get("thinking") else 0, + bool(message.get("tool_calls")), + response.get("done"), + ) + content = message.get("content", "").strip() if message.get("content") else None + tool_calls = parse_tool_calls_from_message(message) + finish_reason = "error" + if response.get("done"): + finish_reason = ( + "tool_calls" if tool_calls else "stop" if content else "error" + ) + elif tool_calls: + finish_reason = "tool_calls" + elif content: + finish_reason = "stop" + return { + "content": content, + "tool_calls": tool_calls, + "finish_reason": finish_reason, + } + + def chat_with_tools( + self, + messages: list[dict[str, Any]], + tools: Optional[list[dict[str, Any]]] = None, + tool_choice: Optional[str] = "auto", + ) -> dict[str, Any]: + if self.provider is None: + logger.warning( + "Ollama provider has not been initialized. Check your Ollama configuration." + ) + return { + "content": None, + "tool_calls": None, + "finish_reason": "error", + } + try: + request_params = self._build_request_params( + messages, tools, tool_choice, stream=False + ) + response = self.provider.chat(**request_params) + return self._message_from_response(response) + except (TimeoutException, ResponseError, ConnectionError) as e: + logger.warning("Ollama returned an error: %s", str(e)) + return { + "content": None, + "tool_calls": None, + "finish_reason": "error", + } + except Exception as e: + logger.warning("Unexpected error in Ollama chat_with_tools: %s", str(e)) + return { + "content": None, + "tool_calls": None, + "finish_reason": "error", + } + + async def chat_with_tools_stream( + self, + messages: list[dict[str, Any]], + tools: Optional[list[dict[str, Any]]] = None, + tool_choice: Optional[str] = "auto", + ): + """Stream chat with tools; yields content deltas then final message. + + When tools are provided, Ollama streaming does not include tool_calls + in the response chunks. To work around this, we use a non-streaming + call when tools are present to ensure tool calls are captured, then + emit the content as a single delta followed by the final message. + """ + if self.provider is None: + logger.warning( + "Ollama provider has not been initialized. Check your Ollama configuration." + ) + yield ( + "message", + { + "content": None, + "tool_calls": None, + "finish_reason": "error", + }, + ) + return + try: + # Ollama does not return tool_calls in streaming mode, so fall + # back to a non-streaming call when tools are provided. + if tools: + logger.debug( + "Ollama: tools provided, using non-streaming call for tool support" + ) + request_params = self._build_request_params( + messages, tools, tool_choice, stream=False + ) + async_client = OllamaAsyncClient( + host=self.genai_config.base_url, + timeout=self.timeout, + ) + response = await async_client.chat(**request_params) + result = self._message_from_response(response) + content = result.get("content") + if content: + yield ("content_delta", content) + yield ("message", result) + return + + request_params = self._build_request_params( + messages, tools, tool_choice, stream=True + ) + async_client = OllamaAsyncClient( + host=self.genai_config.base_url, + timeout=self.timeout, + ) + content_parts: list[str] = [] + final_message: dict[str, Any] | None = None + stream = await async_client.chat(**request_params) + async for chunk in stream: + if not chunk or "message" not in chunk: + continue + msg = chunk.get("message", {}) + delta = msg.get("content") or "" + if delta: + content_parts.append(delta) + yield ("content_delta", delta) + if chunk.get("done"): + full_content = "".join(content_parts).strip() or None + final_message = { + "content": full_content, + "tool_calls": None, + "finish_reason": "stop", + } + break + + if final_message is not None: + yield ("message", final_message) + else: + yield ( + "message", + { + "content": "".join(content_parts).strip() or None, + "tool_calls": None, + "finish_reason": "stop", + }, + ) + except (TimeoutException, ResponseError, ConnectionError) as e: + logger.warning("Ollama streaming error: %s", str(e)) + yield ( + "message", + { + "content": None, + "tool_calls": None, + "finish_reason": "error", + }, + ) + except Exception as e: + logger.warning( + "Unexpected error in Ollama chat_with_tools_stream: %s", str(e) + ) + yield ( + "message", + { + "content": None, + "tool_calls": None, + "finish_reason": "error", + }, + ) diff --git a/frigate/genai/openai.py b/frigate/genai/openai.py index 1fb0dd852..b3031ff33 100644 --- a/frigate/genai/openai.py +++ b/frigate/genai/openai.py @@ -1,8 +1,9 @@ """OpenAI Provider for Frigate AI.""" import base64 +import json import logging -from typing import Optional +from typing import Any, Optional from httpx import TimeoutException from openai import OpenAI @@ -29,6 +30,10 @@ class OpenAIClient(GenAIClient): for k, v in self.genai_config.provider_options.items() if k != "context_size" } + + if self.genai_config.base_url: + provider_opts["base_url"] = self.genai_config.base_url + return OpenAI(api_key=self.genai_config.api_key, **provider_opts) def _send(self, prompt: str, images: list[bytes]) -> Optional[str]: @@ -116,3 +121,252 @@ class OpenAIClient(GenAIClient): f"Using default context size {self.context_size} for model {self.genai_config.model}" ) return self.context_size + + def chat_with_tools( + self, + messages: list[dict[str, Any]], + tools: Optional[list[dict[str, Any]]] = None, + tool_choice: Optional[str] = "auto", + ) -> dict[str, Any]: + """ + Send chat messages to OpenAI with optional tool definitions. + + Implements function calling/tool usage for OpenAI models. + """ + try: + openai_tool_choice = None + if tool_choice: + if tool_choice == "none": + openai_tool_choice = "none" + elif tool_choice == "auto": + openai_tool_choice = "auto" + elif tool_choice == "required": + openai_tool_choice = "required" + + request_params = { + "model": self.genai_config.model, + "messages": messages, + "timeout": self.timeout, + } + + if tools: + request_params["tools"] = tools + if openai_tool_choice is not None: + request_params["tool_choice"] = openai_tool_choice + + if isinstance(self.genai_config.provider_options, dict): + excluded_options = {"context_size"} + provider_opts = { + k: v + for k, v in self.genai_config.provider_options.items() + if k not in excluded_options + } + request_params.update(provider_opts) + + result = self.provider.chat.completions.create(**request_params) + + if ( + result is None + or not hasattr(result, "choices") + or len(result.choices) == 0 + ): + return { + "content": None, + "tool_calls": None, + "finish_reason": "error", + } + + choice = result.choices[0] + message = choice.message + content = message.content.strip() if message.content else None + + tool_calls = None + if message.tool_calls: + tool_calls = [] + for tool_call in message.tool_calls: + try: + arguments = json.loads(tool_call.function.arguments) + except (json.JSONDecodeError, AttributeError) as e: + logger.warning( + f"Failed to parse tool call arguments: {e}, " + f"tool: {tool_call.function.name if hasattr(tool_call.function, 'name') else 'unknown'}" + ) + arguments = {} + + tool_calls.append( + { + "id": tool_call.id if hasattr(tool_call, "id") else "", + "name": tool_call.function.name + if hasattr(tool_call.function, "name") + else "", + "arguments": arguments, + } + ) + + finish_reason = "error" + if hasattr(choice, "finish_reason") and choice.finish_reason: + finish_reason = choice.finish_reason + elif tool_calls: + finish_reason = "tool_calls" + elif content: + finish_reason = "stop" + + return { + "content": content, + "tool_calls": tool_calls, + "finish_reason": finish_reason, + } + + except TimeoutException as e: + logger.warning("OpenAI request timed out: %s", str(e)) + return { + "content": None, + "tool_calls": None, + "finish_reason": "error", + } + except Exception as e: + logger.warning("OpenAI returned an error: %s", str(e)) + return { + "content": None, + "tool_calls": None, + "finish_reason": "error", + } + + async def chat_with_tools_stream( + self, + messages: list[dict[str, Any]], + tools: Optional[list[dict[str, Any]]] = None, + tool_choice: Optional[str] = "auto", + ): + """ + Stream chat with tools; yields content deltas then final message. + + Implements streaming function calling/tool usage for OpenAI models. + """ + try: + openai_tool_choice = None + if tool_choice: + if tool_choice == "none": + openai_tool_choice = "none" + elif tool_choice == "auto": + openai_tool_choice = "auto" + elif tool_choice == "required": + openai_tool_choice = "required" + + request_params = { + "model": self.genai_config.model, + "messages": messages, + "timeout": self.timeout, + "stream": True, + } + + if tools: + request_params["tools"] = tools + if openai_tool_choice is not None: + request_params["tool_choice"] = openai_tool_choice + + if isinstance(self.genai_config.provider_options, dict): + excluded_options = {"context_size"} + provider_opts = { + k: v + for k, v in self.genai_config.provider_options.items() + if k not in excluded_options + } + request_params.update(provider_opts) + + # Use streaming API + content_parts: list[str] = [] + tool_calls_by_index: dict[int, dict[str, Any]] = {} + finish_reason = "stop" + + stream = self.provider.chat.completions.create(**request_params) + + for chunk in stream: + if not chunk or not chunk.choices: + continue + + choice = chunk.choices[0] + delta = choice.delta + + # Check for finish reason + if choice.finish_reason: + finish_reason = choice.finish_reason + + # Extract content deltas + if delta.content: + content_parts.append(delta.content) + yield ("content_delta", delta.content) + + # Extract tool calls + if delta.tool_calls: + for tc in delta.tool_calls: + idx = tc.index + fn = tc.function + + if idx not in tool_calls_by_index: + tool_calls_by_index[idx] = { + "id": tc.id or "", + "name": fn.name if fn and fn.name else "", + "arguments": "", + } + + t = tool_calls_by_index[idx] + if tc.id: + t["id"] = tc.id + if fn and fn.name: + t["name"] = fn.name + if fn and fn.arguments: + t["arguments"] += fn.arguments + + # Build final message + full_content = "".join(content_parts).strip() or None + + # Convert tool calls to list format + tool_calls_list = None + if tool_calls_by_index: + tool_calls_list = [] + for tc in tool_calls_by_index.values(): + try: + # Parse accumulated arguments as JSON + parsed_args = json.loads(tc["arguments"]) + except (json.JSONDecodeError, Exception): + parsed_args = tc["arguments"] + + tool_calls_list.append( + { + "id": tc["id"], + "name": tc["name"], + "arguments": parsed_args, + } + ) + finish_reason = "tool_calls" + + yield ( + "message", + { + "content": full_content, + "tool_calls": tool_calls_list, + "finish_reason": finish_reason, + }, + ) + + except TimeoutException as e: + logger.warning("OpenAI streaming request timed out: %s", str(e)) + yield ( + "message", + { + "content": None, + "tool_calls": None, + "finish_reason": "error", + }, + ) + except Exception as e: + logger.warning("OpenAI streaming returned an error: %s", str(e)) + yield ( + "message", + { + "content": None, + "tool_calls": None, + "finish_reason": "error", + }, + ) diff --git a/frigate/genai/utils.py b/frigate/genai/utils.py new file mode 100644 index 000000000..44f982059 --- /dev/null +++ b/frigate/genai/utils.py @@ -0,0 +1,75 @@ +"""Shared helpers for GenAI providers and chat (OpenAI-style messages, tool call parsing).""" + +import json +import logging +from typing import Any, List, Optional + +logger = logging.getLogger(__name__) + + +def parse_tool_calls_from_message( + message: dict[str, Any], +) -> Optional[list[dict[str, Any]]]: + """ + Parse tool_calls from an OpenAI-style message dict. + + Message may have "tool_calls" as a list of: + {"id": str, "function": {"name": str, "arguments": str}, ...} + + Returns a list of {"id", "name", "arguments"} with arguments parsed as dict, + or None if no tool_calls. Used by Ollama and LlamaCpp (non-stream) responses. + """ + raw = message.get("tool_calls") + if not raw or not isinstance(raw, list): + return None + result = [] + for idx, tool_call in enumerate(raw): + function_data = tool_call.get("function") or {} + raw_arguments = function_data.get("arguments") or {} + if isinstance(raw_arguments, dict): + arguments = raw_arguments + elif isinstance(raw_arguments, str): + try: + arguments = json.loads(raw_arguments) + except (json.JSONDecodeError, KeyError, TypeError) as e: + logger.warning( + "Failed to parse tool call arguments: %s, tool: %s", + e, + function_data.get("name", "unknown"), + ) + arguments = {} + else: + arguments = {} + result.append( + { + "id": tool_call.get("id", "") or f"call_{idx}", + "name": function_data.get("name", ""), + "arguments": arguments, + } + ) + return result if result else None + + +def build_assistant_message_for_conversation( + content: Any, + tool_calls_raw: Optional[List[dict[str, Any]]], +) -> dict[str, Any]: + """ + Build the assistant message dict in OpenAI format for appending to a conversation. + + tool_calls_raw: list of {"id", "name", "arguments"} (arguments as dict), or None. + """ + msg: dict[str, Any] = {"role": "assistant", "content": content} + if tool_calls_raw: + msg["tool_calls"] = [ + { + "id": tc["id"], + "type": "function", + "function": { + "name": tc["name"], + "arguments": json.dumps(tc.get("arguments") or {}), + }, + } + for tc in tool_calls_raw + ] + return msg diff --git a/frigate/jobs/__init__.py b/frigate/jobs/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/frigate/jobs/job.py b/frigate/jobs/job.py new file mode 100644 index 000000000..a445eebf5 --- /dev/null +++ b/frigate/jobs/job.py @@ -0,0 +1,21 @@ +"""Generic base class for long-running background jobs.""" + +from dataclasses import asdict, dataclass, field +from typing import Any, Optional + + +@dataclass +class Job: + """Base class for long-running background jobs.""" + + id: str = field(default_factory=lambda: __import__("uuid").uuid4().__str__()[:12]) + job_type: str = "" # Must be set by subclasses + status: str = "queued" # queued, running, success, failed, cancelled + results: Optional[dict[str, Any]] = None + start_time: Optional[float] = None + end_time: Optional[float] = None + error_message: Optional[str] = None + + def to_dict(self) -> dict[str, Any]: + """Convert to dictionary for WebSocket transmission.""" + return asdict(self) diff --git a/frigate/jobs/manager.py b/frigate/jobs/manager.py new file mode 100644 index 000000000..8aa77b3c7 --- /dev/null +++ b/frigate/jobs/manager.py @@ -0,0 +1,70 @@ +"""Generic job management for long-running background tasks.""" + +import threading +from typing import Optional + +from frigate.jobs.job import Job +from frigate.types import JobStatusTypesEnum + +# Global state and locks for enforcing single concurrent job per job type +_job_locks: dict[str, threading.Lock] = {} +_current_jobs: dict[str, Optional[Job]] = {} +# Keep completed jobs for retrieval, keyed by (job_type, job_id) +_completed_jobs: dict[tuple[str, str], Job] = {} + + +def _get_lock(job_type: str) -> threading.Lock: + """Get or create a lock for the specified job type.""" + if job_type not in _job_locks: + _job_locks[job_type] = threading.Lock() + return _job_locks[job_type] + + +def set_current_job(job: Job) -> None: + """Set the current job for a given job type.""" + lock = _get_lock(job.job_type) + with lock: + # Store the previous job if it was completed + old_job = _current_jobs.get(job.job_type) + if old_job and old_job.status in ( + JobStatusTypesEnum.success, + JobStatusTypesEnum.failed, + JobStatusTypesEnum.cancelled, + ): + _completed_jobs[(job.job_type, old_job.id)] = old_job + _current_jobs[job.job_type] = job + + +def clear_current_job(job_type: str, job_id: Optional[str] = None) -> None: + """Clear the current job for a given job type, optionally checking the ID.""" + lock = _get_lock(job_type) + with lock: + if job_type in _current_jobs: + current = _current_jobs[job_type] + if current is None or (job_id is None or current.id == job_id): + _current_jobs[job_type] = None + + +def get_current_job(job_type: str) -> Optional[Job]: + """Get the current running/queued job for a given job type, if any.""" + lock = _get_lock(job_type) + with lock: + return _current_jobs.get(job_type) + + +def get_job_by_id(job_type: str, job_id: str) -> Optional[Job]: + """Get job by ID. Checks current job first, then completed jobs.""" + lock = _get_lock(job_type) + with lock: + # Check if it's the current job + current = _current_jobs.get(job_type) + if current and current.id == job_id: + return current + # Check if it's a completed job + return _completed_jobs.get((job_type, job_id)) + + +def job_is_running(job_type: str) -> bool: + """Check if a job of the given type is currently running or queued.""" + job = get_current_job(job_type) + return job is not None and job.status in ("queued", "running") diff --git a/frigate/jobs/media_sync.py b/frigate/jobs/media_sync.py new file mode 100644 index 000000000..7c15435fd --- /dev/null +++ b/frigate/jobs/media_sync.py @@ -0,0 +1,135 @@ +"""Media sync job management with background execution.""" + +import logging +import threading +from dataclasses import dataclass, field +from datetime import datetime +from typing import Optional + +from frigate.comms.inter_process import InterProcessRequestor +from frigate.const import UPDATE_JOB_STATE +from frigate.jobs.job import Job +from frigate.jobs.manager import ( + get_current_job, + get_job_by_id, + job_is_running, + set_current_job, +) +from frigate.types import JobStatusTypesEnum +from frigate.util.media import sync_all_media + +logger = logging.getLogger(__name__) + + +@dataclass +class MediaSyncJob(Job): + """In-memory job state for media sync operations.""" + + job_type: str = "media_sync" + dry_run: bool = False + media_types: list[str] = field(default_factory=lambda: ["all"]) + force: bool = False + + +class MediaSyncRunner(threading.Thread): + """Thread-based runner for media sync jobs.""" + + def __init__(self, job: MediaSyncJob) -> None: + super().__init__(daemon=True, name="media_sync") + self.job = job + self.requestor = InterProcessRequestor() + + def run(self) -> None: + """Execute the media sync job and broadcast status updates.""" + try: + # Update job status to running + self.job.status = JobStatusTypesEnum.running + self.job.start_time = datetime.now().timestamp() + self._broadcast_status() + + # Execute sync with provided parameters + logger.debug( + f"Starting media sync job {self.job.id}: " + f"media_types={self.job.media_types}, " + f"dry_run={self.job.dry_run}, " + f"force={self.job.force}" + ) + + results = sync_all_media( + dry_run=self.job.dry_run, + media_types=self.job.media_types, + force=self.job.force, + ) + + # Store results and mark as complete + self.job.results = results.to_dict() + self.job.status = JobStatusTypesEnum.success + self.job.end_time = datetime.now().timestamp() + + logger.debug(f"Media sync job {self.job.id} completed successfully") + self._broadcast_status() + + except Exception as e: + logger.error(f"Media sync job {self.job.id} failed: {e}", exc_info=True) + self.job.status = JobStatusTypesEnum.failed + self.job.error_message = str(e) + self.job.end_time = datetime.now().timestamp() + self._broadcast_status() + + finally: + if self.requestor: + self.requestor.stop() + + def _broadcast_status(self) -> None: + """Broadcast job status update via IPC to all WebSocket subscribers.""" + try: + self.requestor.send_data( + UPDATE_JOB_STATE, + self.job.to_dict(), + ) + except Exception as e: + logger.warning(f"Failed to broadcast media sync status: {e}") + + +def start_media_sync_job( + dry_run: bool = False, + media_types: Optional[list[str]] = None, + force: bool = False, +) -> Optional[str]: + """Start a new media sync job if none is currently running. + + Returns job ID on success, None if job already running. + """ + # Check if a job is already running + if job_is_running("media_sync"): + current = get_current_job("media_sync") + logger.warning( + f"Media sync job {current.id} is already running. Rejecting new request." + ) + return None + + # Create and start new job + job = MediaSyncJob( + dry_run=dry_run, + media_types=media_types or ["all"], + force=force, + ) + + logger.debug(f"Creating new media sync job: {job.id}") + set_current_job(job) + + # Start the background runner + runner = MediaSyncRunner(job) + runner.start() + + return job.id + + +def get_current_media_sync_job() -> Optional[MediaSyncJob]: + """Get the current running/queued media sync job, if any.""" + return get_current_job("media_sync") + + +def get_media_sync_job_by_id(job_id: str) -> Optional[MediaSyncJob]: + """Get media sync job by ID. Currently only tracks the current job.""" + return get_job_by_id("media_sync", job_id) diff --git a/frigate/jobs/motion_search.py b/frigate/jobs/motion_search.py new file mode 100644 index 000000000..d7c8f8fbc --- /dev/null +++ b/frigate/jobs/motion_search.py @@ -0,0 +1,864 @@ +"""Motion search job management with background execution and parallel verification.""" + +import logging +import os +import threading +from concurrent.futures import Future, ThreadPoolExecutor, as_completed +from dataclasses import asdict, dataclass, field +from datetime import datetime +from typing import Any, Optional + +import cv2 +import numpy as np + +from frigate.comms.inter_process import InterProcessRequestor +from frigate.config import FrigateConfig +from frigate.const import UPDATE_JOB_STATE +from frigate.jobs.job import Job +from frigate.jobs.manager import ( + get_job_by_id, + set_current_job, +) +from frigate.models import Recordings +from frigate.types import JobStatusTypesEnum + +logger = logging.getLogger(__name__) + +# Constants +HEATMAP_GRID_SIZE = 16 + + +@dataclass +class MotionSearchMetrics: + """Metrics collected during motion search execution.""" + + segments_scanned: int = 0 + segments_processed: int = 0 + metadata_inactive_segments: int = 0 + heatmap_roi_skip_segments: int = 0 + fallback_full_range_segments: int = 0 + frames_decoded: int = 0 + wall_time_seconds: float = 0.0 + segments_with_errors: int = 0 + + def to_dict(self) -> dict[str, Any]: + """Convert to dictionary.""" + return asdict(self) + + +@dataclass +class MotionSearchResult: + """A single search result with timestamp and change info.""" + + timestamp: float + change_percentage: float + + def to_dict(self) -> dict[str, Any]: + """Convert to dictionary.""" + return asdict(self) + + +@dataclass +class MotionSearchJob(Job): + """Job state for motion search operations.""" + + job_type: str = "motion_search" + camera: str = "" + start_time_range: float = 0.0 + end_time_range: float = 0.0 + polygon_points: list[list[float]] = field(default_factory=list) + threshold: int = 30 + min_area: float = 5.0 + frame_skip: int = 5 + parallel: bool = False + max_results: int = 25 + + # Track progress + total_frames_processed: int = 0 + + # Metrics for observability + metrics: Optional[MotionSearchMetrics] = None + + def to_dict(self) -> dict[str, Any]: + """Convert to dictionary for WebSocket transmission.""" + d = asdict(self) + if self.metrics: + d["metrics"] = self.metrics.to_dict() + return d + + +def create_polygon_mask( + polygon_points: list[list[float]], frame_width: int, frame_height: int +) -> np.ndarray: + """Create a binary mask from normalized polygon coordinates.""" + motion_points = np.array( + [[int(p[0] * frame_width), int(p[1] * frame_height)] for p in polygon_points], + dtype=np.int32, + ) + mask = np.zeros((frame_height, frame_width), dtype=np.uint8) + cv2.fillPoly(mask, [motion_points], 255) + return mask + + +def compute_roi_bbox_normalized( + polygon_points: list[list[float]], +) -> tuple[float, float, float, float]: + """Compute the bounding box of the ROI in normalized coordinates (0-1). + + Returns (x_min, y_min, x_max, y_max) in normalized coordinates. + """ + if not polygon_points: + return (0.0, 0.0, 1.0, 1.0) + + x_coords = [p[0] for p in polygon_points] + y_coords = [p[1] for p in polygon_points] + return (min(x_coords), min(y_coords), max(x_coords), max(y_coords)) + + +def heatmap_overlaps_roi( + heatmap: dict[str, int], roi_bbox: tuple[float, float, float, float] +) -> bool: + """Check if a sparse motion heatmap has any overlap with the ROI bounding box. + + Args: + heatmap: Sparse dict mapping cell index (str) to intensity (1-255). + roi_bbox: (x_min, y_min, x_max, y_max) in normalized coordinates (0-1). + + Returns: + True if there is overlap (any active cell in the ROI region). + """ + if not isinstance(heatmap, dict): + # Invalid heatmap, assume overlap to be safe + return True + + x_min, y_min, x_max, y_max = roi_bbox + + # Convert normalized coordinates to grid cells (0-15) + grid_x_min = max(0, int(x_min * HEATMAP_GRID_SIZE)) + grid_y_min = max(0, int(y_min * HEATMAP_GRID_SIZE)) + grid_x_max = min(HEATMAP_GRID_SIZE - 1, int(x_max * HEATMAP_GRID_SIZE)) + grid_y_max = min(HEATMAP_GRID_SIZE - 1, int(y_max * HEATMAP_GRID_SIZE)) + + # Check each cell in the ROI bbox + for y in range(grid_y_min, grid_y_max + 1): + for x in range(grid_x_min, grid_x_max + 1): + idx = str(y * HEATMAP_GRID_SIZE + x) + if idx in heatmap: + return True + + return False + + +def segment_passes_activity_gate(recording: Recordings) -> bool: + """Check if a segment passes the activity gate. + + Returns True if any of motion, objects, or regions is non-zero/non-null. + Returns True if all are null (old segments without data). + """ + motion = recording.motion + objects = recording.objects + regions = recording.regions + + # Old segments without metadata - pass through (conservative) + if motion is None and objects is None and regions is None: + return True + + # Pass if any activity indicator is positive + return bool(motion) or bool(objects) or bool(regions) + + +def segment_passes_heatmap_gate( + recording: Recordings, roi_bbox: tuple[float, float, float, float] +) -> bool: + """Check if a segment passes the heatmap overlap gate. + + Returns True if: + - No heatmap is stored (old segments). + - The heatmap overlaps with the ROI bbox. + """ + heatmap = getattr(recording, "motion_heatmap", None) + if heatmap is None: + # No heatmap stored, fall back to activity gate + return True + + return heatmap_overlaps_roi(heatmap, roi_bbox) + + +class MotionSearchRunner(threading.Thread): + """Thread-based runner for motion search jobs with parallel verification.""" + + def __init__( + self, + job: MotionSearchJob, + config: FrigateConfig, + cancel_event: threading.Event, + ) -> None: + super().__init__(daemon=True, name=f"motion_search_{job.id}") + self.job = job + self.config = config + self.cancel_event = cancel_event + self.internal_stop_event = threading.Event() + self.requestor = InterProcessRequestor() + self.metrics = MotionSearchMetrics() + self.job.metrics = self.metrics + + # Worker cap: min(4, cpu_count) + cpu_count = os.cpu_count() or 1 + self.max_workers = min(4, cpu_count) + + def run(self) -> None: + """Execute the motion search job.""" + try: + self.job.status = JobStatusTypesEnum.running + self.job.start_time = datetime.now().timestamp() + self._broadcast_status() + + results = self._execute_search() + + if self.cancel_event.is_set(): + self.job.status = JobStatusTypesEnum.cancelled + else: + self.job.status = JobStatusTypesEnum.success + self.job.results = { + "results": [r.to_dict() for r in results], + "total_frames_processed": self.job.total_frames_processed, + } + + self.job.end_time = datetime.now().timestamp() + self.metrics.wall_time_seconds = self.job.end_time - self.job.start_time + self.job.metrics = self.metrics + + logger.debug( + "Motion search job %s completed: status=%s, results=%d, frames=%d", + self.job.id, + self.job.status, + len(results), + self.job.total_frames_processed, + ) + self._broadcast_status() + + except Exception as e: + logger.exception("Motion search job %s failed: %s", self.job.id, e) + self.job.status = JobStatusTypesEnum.failed + self.job.error_message = str(e) + self.job.end_time = datetime.now().timestamp() + self.metrics.wall_time_seconds = self.job.end_time - ( + self.job.start_time or 0 + ) + self.job.metrics = self.metrics + self._broadcast_status() + + finally: + if self.requestor: + self.requestor.stop() + + def _broadcast_status(self) -> None: + """Broadcast job status update via IPC to WebSocket subscribers.""" + if self.job.status == JobStatusTypesEnum.running and self.job.start_time: + self.metrics.wall_time_seconds = ( + datetime.now().timestamp() - self.job.start_time + ) + + try: + self.requestor.send_data(UPDATE_JOB_STATE, self.job.to_dict()) + except Exception as e: + logger.warning("Failed to broadcast motion search status: %s", e) + + def _should_stop(self) -> bool: + """Check if processing should stop due to cancellation or internal limits.""" + return self.cancel_event.is_set() or self.internal_stop_event.is_set() + + def _execute_search(self) -> list[MotionSearchResult]: + """Main search execution logic.""" + camera_name = self.job.camera + camera_config = self.config.cameras.get(camera_name) + if not camera_config: + raise ValueError(f"Camera {camera_name} not found") + + frame_width = camera_config.detect.width + frame_height = camera_config.detect.height + + # Create polygon mask + polygon_mask = create_polygon_mask( + self.job.polygon_points, frame_width, frame_height + ) + + if np.count_nonzero(polygon_mask) == 0: + logger.warning("Polygon mask is empty for job %s", self.job.id) + return [] + + # Compute ROI bbox in normalized coordinates for heatmap gate + roi_bbox = compute_roi_bbox_normalized(self.job.polygon_points) + + # Query recordings + recordings = list( + Recordings.select() + .where( + ( + Recordings.start_time.between( + self.job.start_time_range, self.job.end_time_range + ) + ) + | ( + Recordings.end_time.between( + self.job.start_time_range, self.job.end_time_range + ) + ) + | ( + (self.job.start_time_range > Recordings.start_time) + & (self.job.end_time_range < Recordings.end_time) + ) + ) + .where(Recordings.camera == camera_name) + .order_by(Recordings.start_time.asc()) + ) + + if not recordings: + logger.debug("No recordings found for motion search job %s", self.job.id) + return [] + + logger.debug( + "Motion search job %s: queried %d recording segments for camera %s " + "(range %.1f - %.1f)", + self.job.id, + len(recordings), + camera_name, + self.job.start_time_range, + self.job.end_time_range, + ) + + self.metrics.segments_scanned = len(recordings) + + # Apply activity and heatmap gates + filtered_recordings = [] + for recording in recordings: + if not segment_passes_activity_gate(recording): + self.metrics.metadata_inactive_segments += 1 + self.metrics.segments_processed += 1 + logger.debug( + "Motion search job %s: segment %s skipped by activity gate " + "(motion=%s, objects=%s, regions=%s)", + self.job.id, + recording.id, + recording.motion, + recording.objects, + recording.regions, + ) + continue + if not segment_passes_heatmap_gate(recording, roi_bbox): + self.metrics.heatmap_roi_skip_segments += 1 + self.metrics.segments_processed += 1 + logger.debug( + "Motion search job %s: segment %s skipped by heatmap gate " + "(heatmap present=%s, roi_bbox=%s)", + self.job.id, + recording.id, + recording.motion_heatmap is not None, + roi_bbox, + ) + continue + filtered_recordings.append(recording) + + self._broadcast_status() + + # Fallback: if all segments were filtered out, scan all segments + # This allows motion search to find things the detector missed + if not filtered_recordings and recordings: + logger.info( + "All %d segments filtered by gates, falling back to full scan", + len(recordings), + ) + self.metrics.fallback_full_range_segments = len(recordings) + filtered_recordings = recordings + + logger.debug( + "Motion search job %s: %d/%d segments passed gates " + "(activity_skipped=%d, heatmap_skipped=%d)", + self.job.id, + len(filtered_recordings), + len(recordings), + self.metrics.metadata_inactive_segments, + self.metrics.heatmap_roi_skip_segments, + ) + + if self.job.parallel: + return self._search_motion_parallel(filtered_recordings, polygon_mask) + + return self._search_motion_sequential(filtered_recordings, polygon_mask) + + def _search_motion_parallel( + self, + recordings: list[Recordings], + polygon_mask: np.ndarray, + ) -> list[MotionSearchResult]: + """Search for motion in parallel across segments, streaming results.""" + all_results: list[MotionSearchResult] = [] + total_frames = 0 + next_recording_idx_to_merge = 0 + + logger.debug( + "Motion search job %s: starting motion search with %d workers " + "across %d segments", + self.job.id, + self.max_workers, + len(recordings), + ) + + # Initialize partial results on the job so they stream to the frontend + self.job.results = {"results": [], "total_frames_processed": 0} + + with ThreadPoolExecutor(max_workers=self.max_workers) as executor: + futures: dict[Future, int] = {} + completed_segments: dict[int, tuple[list[MotionSearchResult], int]] = {} + + for idx, recording in enumerate(recordings): + if self._should_stop(): + break + + future = executor.submit( + self._process_recording_for_motion, + recording.path, + recording.start_time, + recording.end_time, + self.job.start_time_range, + self.job.end_time_range, + polygon_mask, + self.job.threshold, + self.job.min_area, + self.job.frame_skip, + ) + futures[future] = idx + + for future in as_completed(futures): + if self._should_stop(): + # Cancel remaining futures + for f in futures: + f.cancel() + break + + recording_idx = futures[future] + recording = recordings[recording_idx] + + try: + results, frames = future.result() + self.metrics.segments_processed += 1 + completed_segments[recording_idx] = (results, frames) + + while next_recording_idx_to_merge in completed_segments: + segment_results, segment_frames = completed_segments.pop( + next_recording_idx_to_merge + ) + + all_results.extend(segment_results) + total_frames += segment_frames + self.job.total_frames_processed = total_frames + self.metrics.frames_decoded = total_frames + + if segment_results: + deduped = self._deduplicate_results(all_results) + self.job.results = { + "results": [ + r.to_dict() for r in deduped[: self.job.max_results] + ], + "total_frames_processed": total_frames, + } + + self._broadcast_status() + + if segment_results and len(deduped) >= self.job.max_results: + self.internal_stop_event.set() + for pending_future in futures: + pending_future.cancel() + break + + next_recording_idx_to_merge += 1 + + if self.internal_stop_event.is_set(): + break + + except Exception as e: + self.metrics.segments_processed += 1 + self.metrics.segments_with_errors += 1 + self._broadcast_status() + logger.warning( + "Error processing segment %s: %s", + recording.path, + e, + ) + + self.job.total_frames_processed = total_frames + self.metrics.frames_decoded = total_frames + + logger.debug( + "Motion search job %s: motion search complete, " + "found %d raw results, decoded %d frames, %d segment errors", + self.job.id, + len(all_results), + total_frames, + self.metrics.segments_with_errors, + ) + + # Sort and deduplicate results + all_results.sort(key=lambda x: x.timestamp) + return self._deduplicate_results(all_results)[: self.job.max_results] + + def _search_motion_sequential( + self, + recordings: list[Recordings], + polygon_mask: np.ndarray, + ) -> list[MotionSearchResult]: + """Search for motion sequentially across segments, streaming results.""" + all_results: list[MotionSearchResult] = [] + total_frames = 0 + + logger.debug( + "Motion search job %s: starting sequential motion search across %d segments", + self.job.id, + len(recordings), + ) + + self.job.results = {"results": [], "total_frames_processed": 0} + + for recording in recordings: + if self.cancel_event.is_set(): + break + + try: + results, frames = self._process_recording_for_motion( + recording.path, + recording.start_time, + recording.end_time, + self.job.start_time_range, + self.job.end_time_range, + polygon_mask, + self.job.threshold, + self.job.min_area, + self.job.frame_skip, + ) + all_results.extend(results) + total_frames += frames + + self.job.total_frames_processed = total_frames + self.metrics.frames_decoded = total_frames + self.metrics.segments_processed += 1 + + if results: + all_results.sort(key=lambda x: x.timestamp) + deduped = self._deduplicate_results(all_results)[ + : self.job.max_results + ] + self.job.results = { + "results": [r.to_dict() for r in deduped], + "total_frames_processed": total_frames, + } + + self._broadcast_status() + + if results and len(deduped) >= self.job.max_results: + break + + except Exception as e: + self.metrics.segments_processed += 1 + self.metrics.segments_with_errors += 1 + self._broadcast_status() + logger.warning("Error processing segment %s: %s", recording.path, e) + + self.job.total_frames_processed = total_frames + self.metrics.frames_decoded = total_frames + + logger.debug( + "Motion search job %s: sequential motion search complete, " + "found %d raw results, decoded %d frames, %d segment errors", + self.job.id, + len(all_results), + total_frames, + self.metrics.segments_with_errors, + ) + + all_results.sort(key=lambda x: x.timestamp) + return self._deduplicate_results(all_results)[: self.job.max_results] + + def _deduplicate_results( + self, results: list[MotionSearchResult], min_gap: float = 1.0 + ) -> list[MotionSearchResult]: + """Deduplicate results that are too close together.""" + if not results: + return results + + deduplicated: list[MotionSearchResult] = [] + last_timestamp = 0.0 + + for result in results: + if result.timestamp - last_timestamp >= min_gap: + deduplicated.append(result) + last_timestamp = result.timestamp + + return deduplicated + + def _process_recording_for_motion( + self, + recording_path: str, + recording_start: float, + recording_end: float, + search_start: float, + search_end: float, + polygon_mask: np.ndarray, + threshold: int, + min_area: float, + frame_skip: int, + ) -> tuple[list[MotionSearchResult], int]: + """Process a single recording file for motion detection. + + This method is designed to be called from a thread pool. + + Args: + min_area: Minimum change area as a percentage of the ROI (0-100). + """ + results: list[MotionSearchResult] = [] + frames_processed = 0 + + if not os.path.exists(recording_path): + logger.warning("Recording file not found: %s", recording_path) + return results, frames_processed + + cap = cv2.VideoCapture(recording_path) + if not cap.isOpened(): + logger.error("Could not open recording: %s", recording_path) + return results, frames_processed + + try: + fps = cap.get(cv2.CAP_PROP_FPS) or 30.0 + total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) + recording_duration = recording_end - recording_start + + # Calculate frame range + start_offset = max(0, search_start - recording_start) + end_offset = min(recording_duration, search_end - recording_start) + start_frame = int(start_offset * fps) + end_frame = int(end_offset * fps) + start_frame = max(0, min(start_frame, total_frames - 1)) + end_frame = max(0, min(end_frame, total_frames)) + + if start_frame >= end_frame: + return results, frames_processed + + cap.set(cv2.CAP_PROP_POS_FRAMES, start_frame) + + # Get ROI bounding box + roi_bbox = cv2.boundingRect(polygon_mask) + roi_x, roi_y, roi_w, roi_h = roi_bbox + + prev_frame_gray = None + frame_step = max(frame_skip, 1) + frame_idx = start_frame + + while frame_idx < end_frame: + if self._should_stop(): + break + + ret, frame = cap.read() + if not ret: + frame_idx += 1 + continue + + if (frame_idx - start_frame) % frame_step != 0: + frame_idx += 1 + continue + + frames_processed += 1 + + gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) + + # Handle frame dimension changes + if gray.shape != polygon_mask.shape: + resized_mask = cv2.resize( + polygon_mask, (gray.shape[1], gray.shape[0]), cv2.INTER_NEAREST + ) + current_bbox = cv2.boundingRect(resized_mask) + else: + resized_mask = polygon_mask + current_bbox = roi_bbox + + roi_x, roi_y, roi_w, roi_h = current_bbox + cropped_gray = gray[roi_y : roi_y + roi_h, roi_x : roi_x + roi_w] + cropped_mask = resized_mask[ + roi_y : roi_y + roi_h, roi_x : roi_x + roi_w + ] + + cropped_mask_area = np.count_nonzero(cropped_mask) + if cropped_mask_area == 0: + frame_idx += 1 + continue + + # Convert percentage to pixel count for this ROI + min_area_pixels = int((min_area / 100.0) * cropped_mask_area) + + masked_gray = cv2.bitwise_and( + cropped_gray, cropped_gray, mask=cropped_mask + ) + + if prev_frame_gray is not None: + diff = cv2.absdiff(prev_frame_gray, masked_gray) + diff_blurred = cv2.GaussianBlur(diff, (3, 3), 0) + _, thresh = cv2.threshold( + diff_blurred, threshold, 255, cv2.THRESH_BINARY + ) + thresh_dilated = cv2.dilate(thresh, None, iterations=1) + thresh_masked = cv2.bitwise_and( + thresh_dilated, thresh_dilated, mask=cropped_mask + ) + + change_pixels = cv2.countNonZero(thresh_masked) + if change_pixels > min_area_pixels: + contours, _ = cv2.findContours( + thresh_masked, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE + ) + total_change_area = sum( + cv2.contourArea(c) + for c in contours + if cv2.contourArea(c) >= min_area_pixels + ) + if total_change_area > 0: + frame_time_offset = (frame_idx - start_frame) / fps + timestamp = ( + recording_start + start_offset + frame_time_offset + ) + change_percentage = ( + total_change_area / cropped_mask_area + ) * 100 + results.append( + MotionSearchResult( + timestamp=timestamp, + change_percentage=round(change_percentage, 2), + ) + ) + + prev_frame_gray = masked_gray + frame_idx += 1 + + finally: + cap.release() + + logger.debug( + "Motion search segment complete: %s, %d frames processed, %d results found", + recording_path, + frames_processed, + len(results), + ) + return results, frames_processed + + +# Module-level state for managing per-camera jobs +_motion_search_jobs: dict[str, tuple[MotionSearchJob, threading.Event]] = {} +_jobs_lock = threading.Lock() + + +def stop_all_motion_search_jobs() -> None: + """Cancel all running motion search jobs for clean shutdown.""" + with _jobs_lock: + for job_id, (job, cancel_event) in _motion_search_jobs.items(): + if job.status in (JobStatusTypesEnum.queued, JobStatusTypesEnum.running): + cancel_event.set() + logger.debug("Signalling motion search job %s to stop", job_id) + + +def start_motion_search_job( + config: FrigateConfig, + camera_name: str, + start_time: float, + end_time: float, + polygon_points: list[list[float]], + threshold: int = 30, + min_area: float = 5.0, + frame_skip: int = 5, + parallel: bool = False, + max_results: int = 25, +) -> str: + """Start a new motion search job. + + Returns the job ID. + """ + job = MotionSearchJob( + camera=camera_name, + start_time_range=start_time, + end_time_range=end_time, + polygon_points=polygon_points, + threshold=threshold, + min_area=min_area, + frame_skip=frame_skip, + parallel=parallel, + max_results=max_results, + ) + + cancel_event = threading.Event() + + with _jobs_lock: + _motion_search_jobs[job.id] = (job, cancel_event) + + set_current_job(job) + + runner = MotionSearchRunner(job, config, cancel_event) + runner.start() + + logger.debug( + "Started motion search job %s for camera %s: " + "time_range=%.1f-%.1f, threshold=%d, min_area=%.1f%%, " + "frame_skip=%d, parallel=%s, max_results=%d, polygon_points=%d vertices", + job.id, + camera_name, + start_time, + end_time, + threshold, + min_area, + frame_skip, + parallel, + max_results, + len(polygon_points), + ) + return job.id + + +def get_motion_search_job(job_id: str) -> Optional[MotionSearchJob]: + """Get a motion search job by ID.""" + with _jobs_lock: + job_entry = _motion_search_jobs.get(job_id) + if job_entry: + return job_entry[0] + # Check completed jobs via manager + return get_job_by_id("motion_search", job_id) + + +def cancel_motion_search_job(job_id: str) -> bool: + """Cancel a motion search job. + + Returns True if cancellation was initiated, False if job not found. + """ + with _jobs_lock: + job_entry = _motion_search_jobs.get(job_id) + if not job_entry: + return False + + job, cancel_event = job_entry + + if job.status not in (JobStatusTypesEnum.queued, JobStatusTypesEnum.running): + # Already finished + return True + + cancel_event.set() + job.status = JobStatusTypesEnum.cancelled + job_payload = job.to_dict() + logger.info("Cancelled motion search job %s", job_id) + + requestor: Optional[InterProcessRequestor] = None + try: + requestor = InterProcessRequestor() + requestor.send_data(UPDATE_JOB_STATE, job_payload) + except Exception as e: + logger.warning( + "Failed to broadcast cancelled motion search job %s: %s", job_id, e + ) + finally: + if requestor: + requestor.stop() + + return True diff --git a/frigate/models.py b/frigate/models.py index 93f6cb54f..d927a12c8 100644 --- a/frigate/models.py +++ b/frigate/models.py @@ -78,6 +78,15 @@ class Recordings(Model): dBFS = IntegerField(null=True) segment_size = FloatField(default=0) # this should be stored as MB regions = IntegerField(null=True) + motion_heatmap = JSONField(null=True) # 16x16 grid, 256 values (0-255) + + +class ExportCase(Model): + id = CharField(null=False, primary_key=True, max_length=30) + name = CharField(index=True, max_length=100) + description = TextField(null=True) + created_at = DateTimeField() + updated_at = DateTimeField() class Export(Model): @@ -88,6 +97,12 @@ class Export(Model): video_path = CharField(unique=True) thumb_path = CharField(unique=True) in_progress = BooleanField() + export_case = ForeignKeyField( + ExportCase, + null=True, + backref="exports", + column_name="export_case_id", + ) class ReviewSegment(Model): diff --git a/frigate/motion/frigate_motion.py b/frigate/motion/frigate_motion.py index fd362de34..d49b0e861 100644 --- a/frigate/motion/frigate_motion.py +++ b/frigate/motion/frigate_motion.py @@ -28,7 +28,7 @@ class FrigateMotionDetector(MotionDetector): self.motion_frame_count = 0 self.frame_counter = 0 resized_mask = cv2.resize( - config.mask, + config.rasterized_mask, dsize=(self.motion_frame_size[1], self.motion_frame_size[0]), interpolation=cv2.INTER_LINEAR, ) diff --git a/frigate/motion/improved_motion.py b/frigate/motion/improved_motion.py index b081d3791..b821e9532 100644 --- a/frigate/motion/improved_motion.py +++ b/frigate/motion/improved_motion.py @@ -176,11 +176,32 @@ class ImprovedMotionDetector(MotionDetector): motion_boxes = [] pct_motion = 0 + # skip motion entirely if the scene change percentage exceeds configured + # threshold. this is useful to ignore lighting storms, IR mode switches, + # etc. rather than registering them as brief motion and then recalibrating. + # note: skipping means the frame is dropped and **no recording will be + # created**, which could hide a legitimate object if the camera is actively + # auto‑tracking. the alternative is to allow motion and accept a small + # recording that can be reviewed in the timeline. disabled by default (None). + if ( + self.config.skip_motion_threshold is not None + and pct_motion > self.config.skip_motion_threshold + ): + # force a recalibration so we transition to the new background + self.calibrating = True + return [] + # once the motion is less than 5% and the number of contours is < 4, assume its calibrated if pct_motion < 0.05 and len(motion_boxes) <= 4: self.calibrating = False - # if calibrating or the motion contours are > 80% of the image area (lightning, ir, ptz) recalibrate + # if calibrating or the motion contours are > 80% of the image area + # (lightning, ir, ptz) recalibrate. the lightning threshold does **not** + # stop motion detection entirely; it simply halts additional processing for + # the current frame once the percentage crosses the threshold. this helps + # reduce false positive object detections and CPU usage during high‑motion + # events. recordings continue to be generated because users expect data + # while a PTZ camera is moving. if self.calibrating or pct_motion > self.config.lightning_threshold: self.calibrating = True @@ -233,7 +254,7 @@ class ImprovedMotionDetector(MotionDetector): def update_mask(self) -> None: resized_mask = cv2.resize( - self.config.mask, + self.config.rasterized_mask, dsize=(self.motion_frame_size[1], self.motion_frame_size[0]), interpolation=cv2.INTER_AREA, ) diff --git a/frigate/output/birdseye.py b/frigate/output/birdseye.py index eb23c2573..d3717d281 100644 --- a/frigate/output/birdseye.py +++ b/frigate/output/birdseye.py @@ -420,7 +420,8 @@ class BirdsEyeFrameManager: [ cam for cam, cam_data in self.cameras.items() - if self.config.cameras[cam].birdseye.enabled + if cam in self.config.cameras + and self.config.cameras[cam].birdseye.enabled and self.config.cameras[cam].enabled_in_config and self.config.cameras[cam].enabled and cam_data["last_active_frame"] > 0 @@ -723,8 +724,11 @@ class BirdsEyeFrameManager: Update birdseye for a specific camera with new frame data. Returns (frame_changed, layout_changed) to indicate if the frame or layout changed. """ - # don't process if birdseye is disabled for this camera - camera_config = self.config.cameras[camera] + # don't process if camera was removed or birdseye is disabled + camera_config = self.config.cameras.get(camera) + if camera_config is None: + return False, False + force_update = False # disabling birdseye is a little tricky diff --git a/frigate/output/output.py b/frigate/output/output.py index a44415000..38b1ddc52 100644 --- a/frigate/output/output.py +++ b/frigate/output/output.py @@ -22,7 +22,12 @@ from frigate.config.camera.updater import ( CameraConfigUpdateEnum, CameraConfigUpdateSubscriber, ) -from frigate.const import CACHE_DIR, CLIPS_DIR, PROCESS_PRIORITY_MED +from frigate.const import ( + CACHE_DIR, + CLIPS_DIR, + PROCESS_PRIORITY_MED, + REPLAY_CAMERA_PREFIX, +) from frigate.output.birdseye import Birdseye from frigate.output.camera import JsmpegCamera from frigate.output.preview import PreviewRecorder @@ -79,6 +84,32 @@ class OutputProcess(FrigateProcess): ) self.config = config + def is_debug_replay_camera(self, camera: str) -> bool: + return camera.startswith(REPLAY_CAMERA_PREFIX) + + def add_camera( + self, + camera: str, + websocket_server: WSGIServer, + jsmpeg_cameras: dict[str, JsmpegCamera], + preview_recorders: dict[str, PreviewRecorder], + preview_write_times: dict[str, float], + birdseye: Birdseye | None, + ) -> None: + camera_config = self.config.cameras[camera] + jsmpeg_cameras[camera] = JsmpegCamera( + camera_config, self.stop_event, websocket_server + ) + preview_recorders[camera] = PreviewRecorder(camera_config) + preview_write_times[camera] = 0 + + if ( + birdseye is not None + and self.config.birdseye.enabled + and camera_config.birdseye.enabled + ): + birdseye.add_camera(camera) + def run(self) -> None: self.pre_run_setup(self.config.logger) @@ -118,14 +149,17 @@ class OutputProcess(FrigateProcess): move_preview_frames("cache") for camera, cam_config in self.config.cameras.items(): - if not cam_config.enabled_in_config: + if not cam_config.enabled_in_config or self.is_debug_replay_camera(camera): continue - jsmpeg_cameras[camera] = JsmpegCamera( - cam_config, self.stop_event, websocket_server + self.add_camera( + camera, + websocket_server, + jsmpeg_cameras, + preview_recorders, + preview_write_times, + birdseye, ) - preview_recorders[camera] = PreviewRecorder(cam_config) - preview_write_times[camera] = 0 if self.config.birdseye.enabled: birdseye = Birdseye(self.config, self.stop_event, websocket_server) @@ -138,19 +172,15 @@ class OutputProcess(FrigateProcess): if CameraConfigUpdateEnum.add in updates: for camera in updates["add"]: - jsmpeg_cameras[camera] = JsmpegCamera( - self.config.cameras[camera], self.stop_event, websocket_server - ) - preview_recorders[camera] = PreviewRecorder( - self.config.cameras[camera] - ) - preview_write_times[camera] = 0 - - if ( - self.config.birdseye.enabled - and self.config.cameras[camera].birdseye.enabled - ): - birdseye.add_camera(camera) + if not self.is_debug_replay_camera(camera): + self.add_camera( + camera, + websocket_server, + jsmpeg_cameras, + preview_recorders, + preview_write_times, + birdseye, + ) (topic, data) = detection_subscriber.check_for_update(timeout=1) now = datetime.datetime.now().timestamp() @@ -174,7 +204,11 @@ class OutputProcess(FrigateProcess): _, ) = data - if not self.config.cameras[camera].enabled: + if ( + camera not in self.config.cameras + or not self.config.cameras[camera].enabled + or self.is_debug_replay_camera(camera) + ): continue frame = frame_manager.get( diff --git a/frigate/output/preview.py b/frigate/output/preview.py index 6dfd90904..b66c1298a 100644 --- a/frigate/output/preview.py +++ b/frigate/output/preview.py @@ -47,6 +47,15 @@ PREVIEW_QUALITY_BIT_RATES = { RecordQualityEnum.high: 9864, RecordQualityEnum.very_high: 10096, } +# the -qmax param for ffmpeg prevents the encoder from overly compressing frames while still trying to hit the bitrate target +# lower values are higher quality. This is especially important for iniitial frames in the segment +PREVIEW_QMAX_PARAM = { + RecordQualityEnum.very_low: "", + RecordQualityEnum.low: "", + RecordQualityEnum.medium: "", + RecordQualityEnum.high: " -qmax 25", + RecordQualityEnum.very_high: " -qmax 25", +} def get_cache_image_name(camera: str, frame_time: float) -> str: @@ -57,6 +66,51 @@ def get_cache_image_name(camera: str, frame_time: float) -> str: ) +def get_most_recent_preview_frame(camera: str, before: float = None) -> str | None: + """Get the most recent preview frame for a camera.""" + if not os.path.exists(PREVIEW_CACHE_DIR): + return None + + try: + # files are named preview_{camera}-{timestamp}.webp + # we want the largest timestamp that is less than or equal to before + preview_files = [ + f + for f in os.listdir(PREVIEW_CACHE_DIR) + if f.startswith(f"preview_{camera}-") + and f.endswith(f".{PREVIEW_FRAME_TYPE}") + ] + + if not preview_files: + return None + + # sort by timestamp in descending order + # filenames are like preview_front-1712345678.901234.webp + preview_files.sort(reverse=True) + + if before is None: + return os.path.join(PREVIEW_CACHE_DIR, preview_files[0]) + + for file_name in preview_files: + try: + # Extract timestamp: preview_front-1712345678.901234.webp + # Split by dash and extension + timestamp_part = file_name.split("-")[-1].split( + f".{PREVIEW_FRAME_TYPE}" + )[0] + timestamp = float(timestamp_part) + + if timestamp <= before: + return os.path.join(PREVIEW_CACHE_DIR, file_name) + except (ValueError, IndexError): + continue + + return None + except Exception as e: + logger.error(f"Error searching for most recent preview frame: {e}") + return None + + class FFMpegConverter(threading.Thread): """Convert a list of still frames into a vfr mp4.""" @@ -80,7 +134,7 @@ class FFMpegConverter(threading.Thread): config.ffmpeg.ffmpeg_path, "default", input="-f concat -y -protocol_whitelist pipe,file -safe 0 -threads 1 -i /dev/stdin", - output=f"-threads 1 -g {PREVIEW_KEYFRAME_INTERVAL} -bf 0 -b:v {PREVIEW_QUALITY_BIT_RATES[self.config.record.preview.quality]} {FPS_VFR_PARAM} -movflags +faststart -pix_fmt yuv420p {self.path}", + output=f"-threads 1 -g {PREVIEW_KEYFRAME_INTERVAL} -bf 0 -b:v {PREVIEW_QUALITY_BIT_RATES[self.config.record.preview.quality]}{PREVIEW_QMAX_PARAM[self.config.record.preview.quality]} {FPS_VFR_PARAM} -movflags +faststart -pix_fmt yuv420p {self.path}", type=EncodeTypeEnum.preview, ) diff --git a/frigate/ptz/autotrack.py b/frigate/ptz/autotrack.py index 6e86ecbf2..eb2d16940 100644 --- a/frigate/ptz/autotrack.py +++ b/frigate/ptz/autotrack.py @@ -116,7 +116,9 @@ class PtzMotionEstimator: mask[y1:y2, x1:x2] = 0 # merge camera config motion mask with detections. Norfair function needs 0,1 mask - mask = np.bitwise_and(mask, self.camera_config.motion.mask).clip(max=1) + mask = np.bitwise_and(mask, self.camera_config.motion.rasterized_mask).clip( + max=1 + ) # Norfair estimator function needs color so it can convert it right back to gray frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2BGRA) diff --git a/frigate/record/cleanup.py b/frigate/record/cleanup.py index 94dd43eba..15a0ba7e8 100644 --- a/frigate/record/cleanup.py +++ b/frigate/record/cleanup.py @@ -13,9 +13,8 @@ from playhouse.sqlite_ext import SqliteExtDatabase from frigate.config import CameraConfig, FrigateConfig, RetainModeEnum from frigate.const import CACHE_DIR, CLIPS_DIR, MAX_WAL_SIZE, RECORD_DIR from frigate.models import Previews, Recordings, ReviewSegment, UserReviewStatus -from frigate.record.util import remove_empty_directories, sync_recordings from frigate.util.builtin import clear_and_unlink -from frigate.util.time import get_tomorrow_at_time +from frigate.util.media import remove_empty_directories logger = logging.getLogger(__name__) @@ -61,7 +60,7 @@ class RecordingCleanup(threading.Thread): db.execute_sql("PRAGMA wal_checkpoint(TRUNCATE);") db.close() - def expire_review_segments(self, config: CameraConfig, now: datetime) -> None: + def expire_review_segments(self, config: CameraConfig, now: datetime) -> set[Path]: """Delete review segments that are expired""" alert_expire_date = ( now - datetime.timedelta(days=config.record.alerts.retain.days) @@ -85,9 +84,12 @@ class RecordingCleanup(threading.Thread): .namedtuples() ) + maybe_empty_dirs = set() thumbs_to_delete = list(map(lambda x: x[1], expired_reviews)) for thumb_path in thumbs_to_delete: - Path(thumb_path).unlink(missing_ok=True) + thumb_path = Path(thumb_path) + thumb_path.unlink(missing_ok=True) + maybe_empty_dirs.add(thumb_path.parent) max_deletes = 100000 deleted_reviews_list = list(map(lambda x: x[0], expired_reviews)) @@ -100,13 +102,15 @@ class RecordingCleanup(threading.Thread): << deleted_reviews_list[i : i + max_deletes] ).execute() + return maybe_empty_dirs + def expire_existing_camera_recordings( self, continuous_expire_date: float, motion_expire_date: float, config: CameraConfig, reviews: ReviewSegment, - ) -> None: + ) -> set[Path]: """Delete recordings for existing camera based on retention config.""" # Get the timestamp for cutoff of retained days @@ -137,6 +141,8 @@ class RecordingCleanup(threading.Thread): .iterator() ) + maybe_empty_dirs = set() + # loop over recordings and see if they overlap with any non-expired reviews # TODO: expire segments based on segment stats according to config review_start = 0 @@ -191,8 +197,10 @@ class RecordingCleanup(threading.Thread): ) or (mode == RetainModeEnum.active_objects and recording.objects == 0) ): - Path(recording.path).unlink(missing_ok=True) + recording_path = Path(recording.path) + recording_path.unlink(missing_ok=True) deleted_recordings.add(recording.id) + maybe_empty_dirs.add(recording_path.parent) else: kept_recordings.append((recording.start_time, recording.end_time)) @@ -253,8 +261,10 @@ class RecordingCleanup(threading.Thread): # Delete previews without any relevant recordings if not keep: - Path(preview.path).unlink(missing_ok=True) + preview_path = Path(preview.path) + preview_path.unlink(missing_ok=True) deleted_previews.add(preview.id) + maybe_empty_dirs.add(preview_path.parent) # expire previews logger.debug(f"Expiring {len(deleted_previews)} previews") @@ -266,7 +276,9 @@ class RecordingCleanup(threading.Thread): Previews.id << deleted_previews_list[i : i + max_deletes] ).execute() - def expire_recordings(self) -> None: + return maybe_empty_dirs + + def expire_recordings(self) -> set[Path]: """Delete recordings based on retention config.""" logger.debug("Start expire recordings.") logger.debug("Start deleted cameras.") @@ -291,10 +303,14 @@ class RecordingCleanup(threading.Thread): .iterator() ) + maybe_empty_dirs = set() + deleted_recordings = set() for recording in no_camera_recordings: - Path(recording.path).unlink(missing_ok=True) + recording_path = Path(recording.path) + recording_path.unlink(missing_ok=True) deleted_recordings.add(recording.id) + maybe_empty_dirs.add(recording_path.parent) logger.debug(f"Expiring {len(deleted_recordings)} recordings") # delete up to 100,000 at a time @@ -311,7 +327,7 @@ class RecordingCleanup(threading.Thread): logger.debug(f"Start camera: {camera}.") now = datetime.datetime.now() - self.expire_review_segments(config, now) + maybe_empty_dirs |= self.expire_review_segments(config, now) continuous_expire_date = ( now - datetime.timedelta(days=config.record.continuous.days) ).timestamp() @@ -341,7 +357,7 @@ class RecordingCleanup(threading.Thread): .namedtuples() ) - self.expire_existing_camera_recordings( + maybe_empty_dirs |= self.expire_existing_camera_recordings( continuous_expire_date, motion_expire_date, config, reviews ) logger.debug(f"End camera: {camera}.") @@ -349,12 +365,9 @@ class RecordingCleanup(threading.Thread): logger.debug("End all cameras.") logger.debug("End expire recordings.") - def run(self) -> None: - # on startup sync recordings with disk if enabled - if self.config.record.sync_recordings: - sync_recordings(limited=False) - next_sync = get_tomorrow_at_time(3) + return maybe_empty_dirs + def run(self) -> None: # Expire tmp clips every minute, recordings and clean directories every hour. for counter in itertools.cycle(range(self.config.record.expire_interval)): if self.stop_event.wait(60): @@ -363,16 +376,8 @@ class RecordingCleanup(threading.Thread): self.clean_tmp_previews() - if ( - self.config.record.sync_recordings - and datetime.datetime.now().astimezone(datetime.timezone.utc) - > next_sync - ): - sync_recordings(limited=True) - next_sync = get_tomorrow_at_time(3) - if counter == 0: self.clean_tmp_clips() - self.expire_recordings() - remove_empty_directories(RECORD_DIR) + maybe_empty_dirs = self.expire_recordings() + remove_empty_directories(Path(RECORD_DIR), maybe_empty_dirs) self.truncate_wal() diff --git a/frigate/record/export.py b/frigate/record/export.py index d4b49bb4b..c1c478ef4 100644 --- a/frigate/record/export.py +++ b/frigate/record/export.py @@ -33,6 +33,7 @@ from frigate.util.time import is_current_hour logger = logging.getLogger(__name__) +DEFAULT_TIME_LAPSE_FFMPEG_ARGS = "-vf setpts=0.04*PTS -r 30" TIMELAPSE_DATA_INPUT_ARGS = "-an -skip_frame nokey" @@ -40,11 +41,6 @@ def lower_priority(): os.nice(PROCESS_PRIORITY_LOW) -class PlaybackFactorEnum(str, Enum): - realtime = "realtime" - timelapse_25x = "timelapse_25x" - - class PlaybackSourceEnum(str, Enum): recordings = "recordings" preview = "preview" @@ -62,8 +58,11 @@ class RecordingExporter(threading.Thread): image: Optional[str], start_time: int, end_time: int, - playback_factor: PlaybackFactorEnum, playback_source: PlaybackSourceEnum, + export_case_id: Optional[str] = None, + ffmpeg_input_args: Optional[str] = None, + ffmpeg_output_args: Optional[str] = None, + cpu_fallback: bool = False, ) -> None: super().__init__() self.config = config @@ -73,8 +72,11 @@ class RecordingExporter(threading.Thread): self.user_provided_image = image self.start_time = start_time self.end_time = end_time - self.playback_factor = playback_factor self.playback_source = playback_source + self.export_case_id = export_case_id + self.ffmpeg_input_args = ffmpeg_input_args + self.ffmpeg_output_args = ffmpeg_output_args + self.cpu_fallback = cpu_fallback # ensure export thumb dir Path(os.path.join(CLIPS_DIR, "export")).mkdir(exist_ok=True) @@ -179,9 +181,16 @@ class RecordingExporter(threading.Thread): return thumb_path - def get_record_export_command(self, video_path: str) -> list[str]: + def get_record_export_command( + self, video_path: str, use_hwaccel: bool = True + ) -> list[str]: + # handle case where internal port is a string with ip:port + internal_port = self.config.networking.listen.internal + if type(internal_port) is str: + internal_port = int(internal_port.split(":")[-1]) + if (self.end_time - self.start_time) <= MAX_PLAYLIST_SECONDS: - playlist_lines = f"http://127.0.0.1:5000/vod/{self.camera}/start/{self.start_time}/end/{self.end_time}/index.m3u8" + playlist_lines = f"http://127.0.0.1:{internal_port}/vod/{self.camera}/start/{self.start_time}/end/{self.end_time}/index.m3u8" ffmpeg_input = ( f"-y -protocol_whitelist pipe,file,http,tcp -i {playlist_lines}" ) @@ -213,25 +222,30 @@ class RecordingExporter(threading.Thread): for page in range(1, num_pages + 1): playlist = export_recordings.paginate(page, page_size) playlist_lines.append( - f"file 'http://127.0.0.1:5000/vod/{self.camera}/start/{float(playlist[0].start_time)}/end/{float(playlist[-1].end_time)}/index.m3u8'" + f"file 'http://127.0.0.1:{internal_port}/vod/{self.camera}/start/{float(playlist[0].start_time)}/end/{float(playlist[-1].end_time)}/index.m3u8'" ) ffmpeg_input = "-y -protocol_whitelist pipe,file,http,tcp -f concat -safe 0 -i /dev/stdin" - if self.playback_factor == PlaybackFactorEnum.realtime: - ffmpeg_cmd = ( - f"{self.config.ffmpeg.ffmpeg_path} -hide_banner {ffmpeg_input} -c copy -movflags +faststart" - ).split(" ") - elif self.playback_factor == PlaybackFactorEnum.timelapse_25x: + if self.ffmpeg_input_args is not None and self.ffmpeg_output_args is not None: + hwaccel_args = ( + self.config.cameras[self.camera].record.export.hwaccel_args + if use_hwaccel + else None + ) ffmpeg_cmd = ( parse_preset_hardware_acceleration_encode( self.config.ffmpeg.ffmpeg_path, - self.config.ffmpeg.hwaccel_args, - f"-an {ffmpeg_input}", - f"{self.config.cameras[self.camera].record.export.timelapse_args} -movflags +faststart", + hwaccel_args, + f"{self.ffmpeg_input_args} -an {ffmpeg_input}".strip(), + f"{self.ffmpeg_output_args} -movflags +faststart".strip(), EncodeTypeEnum.timelapse, ) ).split(" ") + else: + ffmpeg_cmd = ( + f"{self.config.ffmpeg.ffmpeg_path} -hide_banner {ffmpeg_input} -c copy -movflags +faststart" + ).split(" ") # add metadata title = f"Frigate Recording for {self.camera}, {self.get_datetime_from_timestamp(self.start_time)} - {self.get_datetime_from_timestamp(self.end_time)}" @@ -241,7 +255,9 @@ class RecordingExporter(threading.Thread): return ffmpeg_cmd, playlist_lines - def get_preview_export_command(self, video_path: str) -> list[str]: + def get_preview_export_command( + self, video_path: str, use_hwaccel: bool = True + ) -> list[str]: playlist_lines = [] codec = "-c copy" @@ -309,20 +325,25 @@ class RecordingExporter(threading.Thread): "-y -protocol_whitelist pipe,file,tcp -f concat -safe 0 -i /dev/stdin" ) - if self.playback_factor == PlaybackFactorEnum.realtime: - ffmpeg_cmd = ( - f"{self.config.ffmpeg.ffmpeg_path} -hide_banner {ffmpeg_input} {codec} -movflags +faststart {video_path}" - ).split(" ") - elif self.playback_factor == PlaybackFactorEnum.timelapse_25x: + if self.ffmpeg_input_args is not None and self.ffmpeg_output_args is not None: + hwaccel_args = ( + self.config.cameras[self.camera].record.export.hwaccel_args + if use_hwaccel + else None + ) ffmpeg_cmd = ( parse_preset_hardware_acceleration_encode( self.config.ffmpeg.ffmpeg_path, - self.config.ffmpeg.hwaccel_args, - f"{TIMELAPSE_DATA_INPUT_ARGS} {ffmpeg_input}", - f"{self.config.cameras[self.camera].record.export.timelapse_args} -movflags +faststart {video_path}", + hwaccel_args, + f"{self.ffmpeg_input_args} {TIMELAPSE_DATA_INPUT_ARGS} {ffmpeg_input}".strip(), + f"{self.ffmpeg_output_args} -movflags +faststart {video_path}".strip(), EncodeTypeEnum.timelapse, ) ).split(" ") + else: + ffmpeg_cmd = ( + f"{self.config.ffmpeg.ffmpeg_path} -hide_banner {ffmpeg_input} {codec} -movflags +faststart {video_path}" + ).split(" ") # add metadata title = f"Frigate Preview for {self.camera}, {self.get_datetime_from_timestamp(self.start_time)} - {self.get_datetime_from_timestamp(self.end_time)}" @@ -348,17 +369,20 @@ class RecordingExporter(threading.Thread): video_path = f"{EXPORT_DIR}/{self.camera}_{filename_start_datetime}-{filename_end_datetime}_{cleaned_export_id}.mp4" thumb_path = self.save_thumbnail(self.export_id) - Export.insert( - { - Export.id: self.export_id, - Export.camera: self.camera, - Export.name: export_name, - Export.date: self.start_time, - Export.video_path: video_path, - Export.thumb_path: thumb_path, - Export.in_progress: True, - } - ).execute() + export_values = { + Export.id: self.export_id, + Export.camera: self.camera, + Export.name: export_name, + Export.date: self.start_time, + Export.video_path: video_path, + Export.thumb_path: thumb_path, + Export.in_progress: True, + } + + if self.export_case_id is not None: + export_values[Export.export_case] = self.export_case_id + + Export.insert(export_values).execute() try: if self.playback_source == PlaybackSourceEnum.recordings: @@ -376,6 +400,34 @@ class RecordingExporter(threading.Thread): capture_output=True, ) + # If export failed and cpu_fallback is enabled, retry without hwaccel + if ( + p.returncode != 0 + and self.cpu_fallback + and self.ffmpeg_input_args is not None + and self.ffmpeg_output_args is not None + ): + logger.warning( + f"Export with hardware acceleration failed, retrying without hwaccel for {self.export_id}" + ) + + if self.playback_source == PlaybackSourceEnum.recordings: + ffmpeg_cmd, playlist_lines = self.get_record_export_command( + video_path, use_hwaccel=False + ) + else: + ffmpeg_cmd, playlist_lines = self.get_preview_export_command( + video_path, use_hwaccel=False + ) + + p = sp.run( + ffmpeg_cmd, + input="\n".join(playlist_lines), + encoding="ascii", + preexec_fn=lower_priority, + capture_output=True, + ) + if p.returncode != 0: logger.error( f"Failed to export {self.playback_source.value} for command {' '.join(ffmpeg_cmd)}" diff --git a/frigate/record/maintainer.py b/frigate/record/maintainer.py index a90d1edc1..68040476a 100644 --- a/frigate/record/maintainer.py +++ b/frigate/record/maintainer.py @@ -50,11 +50,13 @@ class SegmentInfo: active_object_count: int, region_count: int, average_dBFS: int, + motion_heatmap: dict[str, int] | None = None, ) -> None: self.motion_count = motion_count self.active_object_count = active_object_count self.region_count = region_count self.average_dBFS = average_dBFS + self.motion_heatmap = motion_heatmap def should_discard_segment(self, retain_mode: RetainModeEnum) -> bool: keep = False @@ -287,11 +289,12 @@ class RecordingMaintainer(threading.Thread): ) # publish most recently available recording time and None if disabled + camera_cfg = self.config.cameras.get(camera) self.recordings_publisher.publish( ( camera, recordings[0]["start_time"].timestamp() - if self.config.cameras[camera].record.enabled + if camera_cfg and camera_cfg.record.enabled else None, None, ), @@ -315,9 +318,8 @@ class RecordingMaintainer(threading.Thread): ) -> Optional[Recordings]: cache_path: str = recording["cache_path"] start_time: datetime.datetime = recording["start_time"] - record_config = self.config.cameras[camera].record - # Just delete files if recordings are turned off + # Just delete files if camera removed or recordings are turned off if ( camera not in self.config.cameras or not self.config.cameras[camera].record.enabled @@ -454,6 +456,59 @@ class RecordingMaintainer(threading.Thread): if end_time < retain_cutoff: self.drop_segment(cache_path) + def _compute_motion_heatmap( + self, camera: str, motion_boxes: list[tuple[int, int, int, int]] + ) -> dict[str, int] | None: + """Compute a 16x16 motion intensity heatmap from motion boxes. + + Returns a sparse dict mapping cell index (as string) to intensity (1-255). + Only cells with motion are included. + + Args: + camera: Camera name to get detect dimensions from. + motion_boxes: List of (x1, y1, x2, y2) pixel coordinates. + + Returns: + Sparse dict like {"45": 3, "46": 5}, or None if no boxes. + """ + if not motion_boxes: + return None + + camera_config = self.config.cameras.get(camera) + if not camera_config: + return None + + frame_width = camera_config.detect.width + frame_height = camera_config.detect.height + + if frame_width <= 0 or frame_height <= 0: + return None + + GRID_SIZE = 16 + counts: dict[int, int] = {} + + for box in motion_boxes: + if len(box) < 4: + continue + x1, y1, x2, y2 = box + + # Convert pixel coordinates to grid cells + grid_x1 = max(0, int((x1 / frame_width) * GRID_SIZE)) + grid_y1 = max(0, int((y1 / frame_height) * GRID_SIZE)) + grid_x2 = min(GRID_SIZE - 1, int((x2 / frame_width) * GRID_SIZE)) + grid_y2 = min(GRID_SIZE - 1, int((y2 / frame_height) * GRID_SIZE)) + + for y in range(grid_y1, grid_y2 + 1): + for x in range(grid_x1, grid_x2 + 1): + idx = y * GRID_SIZE + x + counts[idx] = min(255, counts.get(idx, 0) + 1) + + if not counts: + return None + + # Convert to string keys for JSON storage + return {str(k): v for k, v in counts.items()} + def segment_stats( self, camera: str, start_time: datetime.datetime, end_time: datetime.datetime ) -> SegmentInfo: @@ -461,6 +516,8 @@ class RecordingMaintainer(threading.Thread): active_count = 0 region_count = 0 motion_count = 0 + all_motion_boxes: list[tuple[int, int, int, int]] = [] + for frame in self.object_recordings_info[camera]: # frame is after end time of segment if frame[0] > end_time.timestamp(): @@ -479,6 +536,8 @@ class RecordingMaintainer(threading.Thread): ) motion_count += len(frame[2]) region_count += len(frame[3]) + # Collect motion boxes for heatmap computation + all_motion_boxes.extend(frame[2]) audio_values = [] for frame in self.audio_recordings_info[camera]: @@ -498,8 +557,14 @@ class RecordingMaintainer(threading.Thread): average_dBFS = 0 if not audio_values else np.average(audio_values) + motion_heatmap = self._compute_motion_heatmap(camera, all_motion_boxes) + return SegmentInfo( - motion_count, active_count, region_count, round(average_dBFS) + motion_count, + active_count, + region_count, + round(average_dBFS), + motion_heatmap, ) async def move_segment( @@ -590,6 +655,7 @@ class RecordingMaintainer(threading.Thread): Recordings.regions.name: segment_info.region_count, Recordings.dBFS.name: segment_info.average_dBFS, Recordings.segment_size.name: segment_size, + Recordings.motion_heatmap.name: segment_info.motion_heatmap, } except Exception as e: logger.error(f"Unable to store recording segment {cache_path}") diff --git a/frigate/record/util.py b/frigate/record/util.py deleted file mode 100644 index 6a91c1aaf..000000000 --- a/frigate/record/util.py +++ /dev/null @@ -1,147 +0,0 @@ -"""Recordings Utilities.""" - -import datetime -import logging -import os - -from peewee import DatabaseError, chunked - -from frigate.const import RECORD_DIR -from frigate.models import Recordings, RecordingsToDelete - -logger = logging.getLogger(__name__) - - -def remove_empty_directories(directory: str) -> None: - # list all directories recursively and sort them by path, - # longest first - paths = sorted( - [x[0] for x in os.walk(directory)], - key=lambda p: len(str(p)), - reverse=True, - ) - for path in paths: - # don't delete the parent - if path == directory: - continue - if len(os.listdir(path)) == 0: - os.rmdir(path) - - -def sync_recordings(limited: bool) -> None: - """Check the db for stale recordings entries that don't exist in the filesystem.""" - - def delete_db_entries_without_file(check_timestamp: float) -> bool: - """Delete db entries where file was deleted outside of frigate.""" - - if limited: - recordings = Recordings.select(Recordings.id, Recordings.path).where( - Recordings.start_time >= check_timestamp - ) - else: - # get all recordings in the db - recordings = Recordings.select(Recordings.id, Recordings.path) - - # Use pagination to process records in chunks - page_size = 1000 - num_pages = (recordings.count() + page_size - 1) // page_size - recordings_to_delete = set() - - for page in range(num_pages): - for recording in recordings.paginate(page, page_size): - if not os.path.exists(recording.path): - recordings_to_delete.add(recording.id) - - if len(recordings_to_delete) == 0: - return True - - logger.info( - f"Deleting {len(recordings_to_delete)} recording DB entries with missing files" - ) - - # convert back to list of dictionaries for insertion - recordings_to_delete = [ - {"id": recording_id} for recording_id in recordings_to_delete - ] - - if float(len(recordings_to_delete)) / max(1, recordings.count()) > 0.5: - logger.warning( - f"Deleting {(len(recordings_to_delete) / max(1, recordings.count()) * 100):.2f}% of recordings DB entries, could be due to configuration error. Aborting..." - ) - return False - - # create a temporary table for deletion - RecordingsToDelete.create_table(temporary=True) - - # insert ids to the temporary table - max_inserts = 1000 - for batch in chunked(recordings_to_delete, max_inserts): - RecordingsToDelete.insert_many(batch).execute() - - try: - # delete records in the main table that exist in the temporary table - query = Recordings.delete().where( - Recordings.id.in_(RecordingsToDelete.select(RecordingsToDelete.id)) - ) - query.execute() - except DatabaseError as e: - logger.error(f"Database error during recordings db cleanup: {e}") - - return True - - def delete_files_without_db_entry(files_on_disk: list[str]): - """Delete files where file is not inside frigate db.""" - files_to_delete = [] - - for file in files_on_disk: - if not Recordings.select().where(Recordings.path == file).exists(): - files_to_delete.append(file) - - if len(files_to_delete) == 0: - return True - - logger.info( - f"Deleting {len(files_to_delete)} recordings files with missing DB entries" - ) - - if float(len(files_to_delete)) / max(1, len(files_on_disk)) > 0.5: - logger.debug( - f"Deleting {(len(files_to_delete) / max(1, len(files_on_disk)) * 100):.2f}% of recordings DB entries, could be due to configuration error. Aborting..." - ) - return False - - for file in files_to_delete: - os.unlink(file) - - return True - - logger.debug("Start sync recordings.") - - # start checking on the hour 36 hours ago - check_point = datetime.datetime.now().replace( - minute=0, second=0, microsecond=0 - ).astimezone(datetime.timezone.utc) - datetime.timedelta(hours=36) - db_success = delete_db_entries_without_file(check_point.timestamp()) - - # only try to cleanup files if db cleanup was successful - if db_success: - if limited: - # get recording files from last 36 hours - hour_check = f"{RECORD_DIR}/{check_point.strftime('%Y-%m-%d/%H')}" - files_on_disk = { - os.path.join(root, file) - for root, _, files in os.walk(RECORD_DIR) - for file in files - if root > hour_check - } - else: - # get all recordings files on disk and put them in a set - files_on_disk = { - os.path.join(root, file) - for root, _, files in os.walk(RECORD_DIR) - for file in files - } - - delete_files_without_db_entry(files_on_disk) - - logger.debug("End sync recordings.") diff --git a/frigate/review/maintainer.py b/frigate/review/maintainer.py index 917c0c5ac..a51c73f88 100644 --- a/frigate/review/maintainer.py +++ b/frigate/review/maintainer.py @@ -394,7 +394,11 @@ class ReviewSegmentMaintainer(threading.Thread): if activity.has_activity_category(SeverityEnum.alert): # update current time for last alert activity - segment.last_alert_time = frame_time + if ( + segment.last_alert_time is None + or frame_time > segment.last_alert_time + ): + segment.last_alert_time = frame_time if segment.severity != SeverityEnum.alert: # if segment is not alert category but current activity is @@ -404,7 +408,11 @@ class ReviewSegmentMaintainer(threading.Thread): should_update_image = True if activity.has_activity_category(SeverityEnum.detection): - segment.last_detection_time = frame_time + if ( + segment.last_detection_time is None + or frame_time > segment.last_detection_time + ): + segment.last_detection_time = frame_time for object in activity.get_all_objects(): # Alert-level objects should always be added (they extend/upgrade the segment) @@ -644,6 +652,9 @@ class ReviewSegmentMaintainer(threading.Thread): if camera not in self.indefinite_events: self.indefinite_events[camera] = {} + if camera not in self.config.cameras: + continue + if ( not self.config.cameras[camera].enabled or not self.config.cameras[camera].record.enabled @@ -695,17 +706,28 @@ class ReviewSegmentMaintainer(threading.Thread): current_segment.detections[manual_info["event_id"]] = ( manual_info["label"] ) - if ( - topic == DetectionTypeEnum.api - and self.config.cameras[camera].review.alerts.enabled - ): - current_segment.severity = SeverityEnum.alert + if topic == DetectionTypeEnum.api: + # manual_info["label"] contains 'label: sub_label' + # so split out the label without modifying manual_info + if ( + self.config.cameras[camera].review.detections.enabled + and manual_info["label"].split(": ")[0] + in self.config.cameras[camera].review.detections.labels + ): + current_segment.last_detection_time = manual_info[ + "end_time" + ] + elif self.config.cameras[camera].review.alerts.enabled: + current_segment.severity = SeverityEnum.alert + current_segment.last_alert_time = manual_info[ + "end_time" + ] elif ( topic == DetectionTypeEnum.lpr and self.config.cameras[camera].review.detections.enabled ): current_segment.severity = SeverityEnum.detection - current_segment.last_alert_time = manual_info["end_time"] + current_segment.last_alert_time = manual_info["end_time"] elif manual_info["state"] == ManualEventState.start: self.indefinite_events[camera][manual_info["event_id"]] = ( manual_info["label"] @@ -717,7 +739,18 @@ class ReviewSegmentMaintainer(threading.Thread): topic == DetectionTypeEnum.api and self.config.cameras[camera].review.alerts.enabled ): - current_segment.severity = SeverityEnum.alert + # manual_info["label"] contains 'label: sub_label' + # so split out the label without modifying manual_info + if ( + not self.config.cameras[ + camera + ].review.detections.enabled + or manual_info["label"].split(": ")[0] + not in self.config.cameras[ + camera + ].review.detections.labels + ): + current_segment.severity = SeverityEnum.alert elif ( topic == DetectionTypeEnum.lpr and self.config.cameras[camera].review.detections.enabled @@ -789,11 +822,23 @@ class ReviewSegmentMaintainer(threading.Thread): detections, ) elif topic == DetectionTypeEnum.api: - if self.config.cameras[camera].review.alerts.enabled: + severity = None + # manual_info["label"] contains 'label: sub_label' + # so split out the label without modifying manual_info + if ( + self.config.cameras[camera].review.detections.enabled + and manual_info["label"].split(": ")[0] + in self.config.cameras[camera].review.detections.labels + ): + severity = SeverityEnum.detection + elif self.config.cameras[camera].review.alerts.enabled: + severity = SeverityEnum.alert + + if severity: self.active_review_segments[camera] = PendingReviewSegment( camera, frame_time, - SeverityEnum.alert, + severity, {manual_info["event_id"]: manual_info["label"]}, {}, [], @@ -820,7 +865,7 @@ class ReviewSegmentMaintainer(threading.Thread): ].last_detection_time = manual_info["end_time"] else: logger.warning( - f"Manual event API has been called for {camera}, but alerts are disabled. This manual event will not appear as an alert." + f"Manual event API has been called for {camera}, but alerts and detections are disabled. This manual event will not appear as an alert or detection." ) elif topic == DetectionTypeEnum.lpr: if self.config.cameras[camera].review.detections.enabled: diff --git a/frigate/stats/util.py b/frigate/stats/util.py index 410350d96..40337268e 100644 --- a/frigate/stats/util.py +++ b/frigate/stats/util.py @@ -22,6 +22,7 @@ from frigate.util.services import ( get_bandwidth_stats, get_cpu_stats, get_fs_type, + get_hailo_temps, get_intel_gpu_stats, get_jetson_stats, get_nvidia_gpu_stats, @@ -90,9 +91,80 @@ def get_temperatures() -> dict[str, float]: if temp is not None: temps[apex] = temp + # Get temperatures for Hailo devices + temps.update(get_hailo_temps()) + return temps +def get_detector_temperature( + detector_type: str, + detector_index_by_type: dict[str, int], +) -> Optional[float]: + """Get temperature for a specific detector based on its type.""" + if detector_type == "edgetpu": + # Get temperatures for all attached Corals + base = "/sys/class/apex/" + if os.path.isdir(base): + apex_devices = sorted(os.listdir(base)) + index = detector_index_by_type.get("edgetpu", 0) + if index < len(apex_devices): + apex_name = apex_devices[index] + temp = read_temperature(os.path.join(base, apex_name, "temp")) + if temp is not None: + return temp + elif detector_type == "hailo8l": + # Get temperatures for Hailo devices + hailo_temps = get_hailo_temps() + if hailo_temps: + hailo_device_names = sorted(hailo_temps.keys()) + index = detector_index_by_type.get("hailo8l", 0) + if index < len(hailo_device_names): + device_name = hailo_device_names[index] + return hailo_temps[device_name] + elif detector_type == "rknn": + # Rockchip temperatures are handled by the GPU / NPU stats + # as there are not detector specific temperatures + pass + + return None + + +def get_detector_stats( + stats_tracking: StatsTrackingTypes, +) -> dict[str, dict[str, Any]]: + """Get stats for all detectors, including temperatures based on detector type.""" + detector_stats: dict[str, dict[str, Any]] = {} + detector_type_indices: dict[str, int] = {} + + for name, detector in stats_tracking["detectors"].items(): + pid = detector.detect_process.pid if detector.detect_process else None + detector_type = detector.detector_config.type + + # Keep track of the index for each detector type to match temperatures correctly + current_index = detector_type_indices.get(detector_type, 0) + detector_type_indices[detector_type] = current_index + 1 + + detector_stat = { + "inference_speed": round(detector.avg_inference_speed.value * 1000, 2), # type: ignore[attr-defined] + # issue https://github.com/python/typeshed/issues/8799 + # from mypy 0.981 onwards + "detection_start": detector.detection_start.value, # type: ignore[attr-defined] + # issue https://github.com/python/typeshed/issues/8799 + # from mypy 0.981 onwards + "pid": pid, + } + + temp = get_detector_temperature(detector_type, {detector_type: current_index}) + + if temp is not None: + detector_stat["temperature"] = round(temp, 1) + + detector_stats[name] = detector_stat + + return detector_stats + + def get_processing_stats( config: FrigateConfig, stats: dict[str, str], hwaccel_errors: list[str] ) -> None: @@ -173,6 +245,7 @@ async def set_gpu_stats( "mem": str(round(float(nvidia_usage[i]["mem"]), 2)) + "%", "enc": str(round(float(nvidia_usage[i]["enc"]), 2)) + "%", "dec": str(round(float(nvidia_usage[i]["dec"]), 2)) + "%", + "temp": str(nvidia_usage[i]["temp"]), } else: @@ -267,6 +340,9 @@ def stats_snapshot( stats["cameras"] = {} for name, camera_stats in camera_metrics.items(): + if name not in config.cameras: + continue + total_camera_fps += camera_stats.camera_fps.value total_process_fps += camera_stats.process_fps.value total_skipped_fps += camera_stats.skipped_fps.value @@ -278,6 +354,32 @@ def stats_snapshot( if camera_stats.capture_process_pid.value else None ) + # Calculate connection quality based on current state + # This is computed at stats-collection time so offline cameras + # correctly show as unusable rather than excellent + expected_fps = config.cameras[name].detect.fps + current_fps = camera_stats.camera_fps.value + reconnects = camera_stats.reconnects_last_hour.value + stalls = camera_stats.stalls_last_hour.value + + if current_fps < 0.1: + quality_str = "unusable" + elif reconnects == 0 and current_fps >= 0.9 * expected_fps and stalls < 5: + quality_str = "excellent" + elif reconnects <= 2 and current_fps >= 0.6 * expected_fps: + quality_str = "fair" + elif reconnects > 10 or current_fps < 1.0 or stalls > 100: + quality_str = "unusable" + else: + quality_str = "poor" + + connection_quality = { + "connection_quality": quality_str, + "expected_fps": expected_fps, + "reconnects_last_hour": reconnects, + "stalls_last_hour": stalls, + } + stats["cameras"][name] = { "camera_fps": round(camera_stats.camera_fps.value, 2), "process_fps": round(camera_stats.process_fps.value, 2), @@ -289,20 +391,10 @@ def stats_snapshot( "ffmpeg_pid": ffmpeg_pid, "audio_rms": round(camera_stats.audio_rms.value, 4), "audio_dBFS": round(camera_stats.audio_dBFS.value, 4), + **connection_quality, } - stats["detectors"] = {} - for name, detector in stats_tracking["detectors"].items(): - pid = detector.detect_process.pid if detector.detect_process else None - stats["detectors"][name] = { - "inference_speed": round(detector.avg_inference_speed.value * 1000, 2), # type: ignore[attr-defined] - # issue https://github.com/python/typeshed/issues/8799 - # from mypy 0.981 onwards - "detection_start": detector.detection_start.value, # type: ignore[attr-defined] - # issue https://github.com/python/typeshed/issues/8799 - # from mypy 0.981 onwards - "pid": pid, - } + stats["detectors"] = get_detector_stats(stats_tracking) stats["camera_fps"] = round(total_camera_fps, 2) stats["process_fps"] = round(total_process_fps, 2) stats["skipped_fps"] = round(total_skipped_fps, 2) @@ -388,7 +480,6 @@ def stats_snapshot( "version": VERSION, "latest_version": stats_tracking["latest_frigate_version"], "storage": {}, - "temperatures": get_temperatures(), "last_updated": int(time.time()), } diff --git a/frigate/storage.py b/frigate/storage.py index feabe06ff..93463c542 100644 --- a/frigate/storage.py +++ b/frigate/storage.py @@ -8,7 +8,7 @@ from pathlib import Path from peewee import SQL, fn from frigate.config import FrigateConfig -from frigate.const import RECORD_DIR +from frigate.const import RECORD_DIR, REPLAY_CAMERA_PREFIX from frigate.models import Event, Recordings from frigate.util.builtin import clear_and_unlink @@ -32,6 +32,10 @@ class StorageMaintainer(threading.Thread): def calculate_camera_bandwidth(self) -> None: """Calculate an average MB/hr for each camera.""" for camera in self.config.cameras.keys(): + # Skip replay cameras + if camera.startswith(REPLAY_CAMERA_PREFIX): + continue + # cameras with < 50 segments should be refreshed to keep size accurate # when few segments are available if self.camera_storage_stats.get(camera, {}).get("needs_refresh", True): @@ -77,6 +81,10 @@ class StorageMaintainer(threading.Thread): usages: dict[str, dict] = {} for camera in self.config.cameras.keys(): + # Skip replay cameras + if camera.startswith(REPLAY_CAMERA_PREFIX): + continue + camera_storage = ( Recordings.select(fn.SUM(Recordings.segment_size)) .where(Recordings.camera == camera, Recordings.segment_size != 0) diff --git a/frigate/test/http_api/base_http_test.py b/frigate/test/http_api/base_http_test.py index 16ded63f8..2ca4aafd0 100644 --- a/frigate/test/http_api/base_http_test.py +++ b/frigate/test/http_api/base_http_test.py @@ -13,6 +13,7 @@ from pydantic import Json from frigate.api.fastapi_app import create_fastapi_app from frigate.config import FrigateConfig from frigate.const import BASE_DIR, CACHE_DIR +from frigate.debug_replay import DebugReplayManager from frigate.models import Event, Recordings, ReviewSegment from frigate.review.types import SeverityEnum from frigate.test.const import TEST_DB, TEST_DB_CLEANUPS @@ -141,6 +142,7 @@ class BaseTestHttp(unittest.TestCase): stats, event_metadata_publisher, None, + DebugReplayManager(), enforce_default_admin=False, ) diff --git a/frigate/test/http_api/test_http_app.py b/frigate/test/http_api/test_http_app.py index b04b1cf55..bf8e9c72a 100644 --- a/frigate/test/http_api/test_http_app.py +++ b/frigate/test/http_api/test_http_app.py @@ -22,3 +22,32 @@ class TestHttpApp(BaseTestHttp): response = client.get("/stats") response_json = response.json() assert response_json == self.test_stats + + def test_config_set_in_memory_replaces_objects_track_list(self): + self.minimal_config["cameras"]["front_door"]["objects"] = { + "track": ["person", "car"], + } + app = super().create_app() + app.config_publisher = Mock() + + with AuthTestClient(app) as client: + response = client.put( + "/config/set", + json={ + "requires_restart": 0, + "skip_save": True, + "update_topic": "config/cameras/front_door/objects", + "config_data": { + "cameras": { + "front_door": { + "objects": { + "track": ["person"], + } + } + } + }, + }, + ) + + assert response.status_code == 200 + assert app.frigate_config.cameras["front_door"].objects.track == ["person"] diff --git a/frigate/test/http_api/test_http_latest_frame.py b/frigate/test/http_api/test_http_latest_frame.py new file mode 100644 index 000000000..755ee6eb1 --- /dev/null +++ b/frigate/test/http_api/test_http_latest_frame.py @@ -0,0 +1,107 @@ +import os +import shutil +from unittest.mock import MagicMock + +import cv2 +import numpy as np + +from frigate.output.preview import PREVIEW_CACHE_DIR, PREVIEW_FRAME_TYPE +from frigate.test.http_api.base_http_test import AuthTestClient, BaseTestHttp + + +class TestHttpLatestFrame(BaseTestHttp): + def setUp(self): + super().setUp([]) + self.app = super().create_app() + self.app.detected_frames_processor = MagicMock() + + if os.path.exists(PREVIEW_CACHE_DIR): + shutil.rmtree(PREVIEW_CACHE_DIR) + os.makedirs(PREVIEW_CACHE_DIR) + + def tearDown(self): + if os.path.exists(PREVIEW_CACHE_DIR): + shutil.rmtree(PREVIEW_CACHE_DIR) + super().tearDown() + + def test_latest_frame_fallback_to_preview(self): + camera = "front_door" + # 1. Mock frame processor to return None (simulating offline/missing frame) + self.app.detected_frames_processor.get_current_frame.return_value = None + # Return a timestamp that is after our dummy preview frame + self.app.detected_frames_processor.get_current_frame_time.return_value = ( + 1234567891.0 + ) + + # 2. Create a dummy preview file + dummy_frame = np.zeros((180, 320, 3), np.uint8) + cv2.putText( + dummy_frame, + "PREVIEW", + (50, 50), + cv2.FONT_HERSHEY_SIMPLEX, + 1, + (255, 255, 255), + 2, + ) + preview_path = os.path.join( + PREVIEW_CACHE_DIR, f"preview_{camera}-1234567890.0.{PREVIEW_FRAME_TYPE}" + ) + cv2.imwrite(preview_path, dummy_frame) + + with AuthTestClient(self.app) as client: + response = client.get(f"/{camera}/latest.webp") + assert response.status_code == 200 + assert response.headers.get("X-Frigate-Offline") == "true" + # Verify we got an image (webp) + assert response.headers.get("content-type") == "image/webp" + + def test_latest_frame_no_fallback_when_live(self): + camera = "front_door" + # 1. Mock frame processor to return a live frame + dummy_frame = np.zeros((180, 320, 3), np.uint8) + self.app.detected_frames_processor.get_current_frame.return_value = dummy_frame + self.app.detected_frames_processor.get_current_frame_time.return_value = ( + 2000000000.0 # Way in the future + ) + + with AuthTestClient(self.app) as client: + response = client.get(f"/{camera}/latest.webp") + assert response.status_code == 200 + assert "X-Frigate-Offline" not in response.headers + + def test_latest_frame_stale_falls_back_to_preview(self): + camera = "front_door" + # 1. Mock frame processor to return a stale frame + dummy_frame = np.zeros((180, 320, 3), np.uint8) + self.app.detected_frames_processor.get_current_frame.return_value = dummy_frame + # Return a timestamp that is after our dummy preview frame, but way in the past + self.app.detected_frames_processor.get_current_frame_time.return_value = 1000.0 + + # 2. Create a dummy preview file + preview_path = os.path.join( + PREVIEW_CACHE_DIR, f"preview_{camera}-999.0.{PREVIEW_FRAME_TYPE}" + ) + cv2.imwrite(preview_path, dummy_frame) + + with AuthTestClient(self.app) as client: + response = client.get(f"/{camera}/latest.webp") + assert response.status_code == 200 + assert response.headers.get("X-Frigate-Offline") == "true" + + def test_latest_frame_no_preview_found(self): + camera = "front_door" + # 1. Mock frame processor to return None + self.app.detected_frames_processor.get_current_frame.return_value = None + + # 2. No preview file created + + with AuthTestClient(self.app) as client: + response = client.get(f"/{camera}/latest.webp") + # Should fall back to camera-error.jpg (which might not exist in test env, but let's see) + # If camera-error.jpg is not found, it returns 500 "Unable to get valid frame" in latest_frame + # OR it uses request.app.camera_error_image if already loaded. + + # Since we didn't provide camera-error.jpg, it might 500 if glob fails or return 500 if frame is None. + assert response.status_code in [200, 500] + assert "X-Frigate-Offline" not in response.headers diff --git a/frigate/test/test_config.py b/frigate/test/test_config.py index afe577f2f..e903c2ac3 100644 --- a/frigate/test/test_config.py +++ b/frigate/test/test_config.py @@ -151,6 +151,22 @@ class TestConfig(unittest.TestCase): frigate_config = FrigateConfig(**config) assert "dog" in frigate_config.cameras["back"].objects.track + def test_deep_merge_override_replaces_list_values(self): + base = {"objects": {"track": ["person", "face"]}} + update = {"objects": {"track": ["person"]}} + + merged = deep_merge(base, update, override=True) + + assert merged["objects"]["track"] == ["person"] + + def test_deep_merge_merge_lists_still_appends(self): + base = {"track": ["person"]} + update = {"track": ["face"]} + + merged = deep_merge(base, update, override=True, merge_lists=True) + + assert merged["track"] == ["person", "face"] + def test_override_birdseye(self): config = { "mqtt": {"host": "mqtt"}, @@ -343,8 +359,24 @@ class TestConfig(unittest.TestCase): "fps": 5, }, "objects": { - "mask": "0,0,1,1,0,1", - "filters": {"dog": {"mask": "1,1,1,1,1,1"}}, + "mask": { + "global_mask_1": { + "friendly_name": "Global Mask 1", + "enabled": True, + "coordinates": "0,0,1,1,0,1", + } + }, + "filters": { + "dog": { + "mask": { + "dog_mask_1": { + "friendly_name": "Dog Mask 1", + "enabled": True, + "coordinates": "1,1,1,1,1,1", + } + } + } + }, }, } }, @@ -353,8 +385,10 @@ class TestConfig(unittest.TestCase): frigate_config = FrigateConfig(**config) back_camera = frigate_config.cameras["back"] assert "dog" in back_camera.objects.filters - assert len(back_camera.objects.filters["dog"].raw_mask) == 2 - assert len(back_camera.objects.filters["person"].raw_mask) == 1 + # dog filter has its own mask + global mask merged + assert len(back_camera.objects.filters["dog"].mask) == 2 + # person filter only has the global mask + assert len(back_camera.objects.filters["person"].mask) == 1 def test_motion_mask_relative_matches_explicit(self): config = { @@ -373,9 +407,13 @@ class TestConfig(unittest.TestCase): "fps": 5, }, "motion": { - "mask": [ - "0,0,200,100,600,300,800,400", - ] + "mask": { + "explicit_mask": { + "friendly_name": "Explicit Mask", + "enabled": True, + "coordinates": "0,0,200,100,600,300,800,400", + } + } }, }, "relative": { @@ -390,9 +428,13 @@ class TestConfig(unittest.TestCase): "fps": 5, }, "motion": { - "mask": [ - "0.0,0.0,0.25,0.25,0.75,0.75,1.0,1.0", - ] + "mask": { + "relative_mask": { + "friendly_name": "Relative Mask", + "enabled": True, + "coordinates": "0.0,0.0,0.25,0.25,0.75,0.75,1.0,1.0", + } + } }, }, }, @@ -400,8 +442,8 @@ class TestConfig(unittest.TestCase): frigate_config = FrigateConfig(**config) assert np.array_equal( - frigate_config.cameras["explicit"].motion.mask, - frigate_config.cameras["relative"].motion.mask, + frigate_config.cameras["explicit"].motion.rasterized_mask, + frigate_config.cameras["relative"].motion.rasterized_mask, ) def test_default_input_args(self): diff --git a/frigate/test/test_motion_detector.py b/frigate/test/test_motion_detector.py new file mode 100644 index 000000000..cdf4210a5 --- /dev/null +++ b/frigate/test/test_motion_detector.py @@ -0,0 +1,91 @@ +import unittest + +import numpy as np + +from frigate.config.camera.motion import MotionConfig +from frigate.motion.improved_motion import ImprovedMotionDetector + + +class TestImprovedMotionDetector(unittest.TestCase): + def setUp(self): + # small frame for testing; actual frames are grayscale + self.frame_shape = (100, 100) # height, width + self.config = MotionConfig() + # motion detector assumes a rasterized_mask attribute exists on config + # when update_mask() is called; add one manually by bypassing pydantic. + object.__setattr__( + self.config, + "rasterized_mask", + np.ones((self.frame_shape[0], self.frame_shape[1]), dtype=np.uint8), + ) + + # create minimal PTZ metrics stub to satisfy detector checks + class _Stub: + def __init__(self, value=False): + self.value = value + + def is_set(self): + return bool(self.value) + + class DummyPTZ: + def __init__(self): + self.autotracker_enabled = _Stub(False) + self.motor_stopped = _Stub(False) + self.stop_time = _Stub(0) + + self.detector = ImprovedMotionDetector( + self.frame_shape, self.config, fps=30, ptz_metrics=DummyPTZ() + ) + + # establish a baseline frame (all zeros) + base_frame = np.zeros( + (self.frame_shape[0], self.frame_shape[1]), dtype=np.uint8 + ) + self.detector.detect(base_frame) + + def _half_change_frame(self) -> np.ndarray: + """Produce a frame where roughly half of the pixels are different.""" + frame = np.zeros((self.frame_shape[0], self.frame_shape[1]), dtype=np.uint8) + # flip the top half to white + frame[: self.frame_shape[0] // 2, :] = 255 + return frame + + def test_skip_motion_threshold_default(self): + """With the default (None) setting, motion should always be reported.""" + frame = self._half_change_frame() + boxes = self.detector.detect(frame) + self.assertTrue( + boxes, "Expected motion boxes when skip threshold is unset (disabled)" + ) + + def test_skip_motion_threshold_applied(self): + """Setting a low skip threshold should prevent any boxes from being returned.""" + # change the config and update the detector reference + self.config.skip_motion_threshold = 0.4 + self.detector.config = self.config + self.detector.update_mask() + + frame = self._half_change_frame() + boxes = self.detector.detect(frame) + self.assertEqual( + boxes, + [], + "Motion boxes should be empty when scene change exceeds skip threshold", + ) + + def test_skip_motion_threshold_does_not_affect_calibration(self): + """Even when skipping, the detector should go into calibrating state.""" + self.config.skip_motion_threshold = 0.4 + self.detector.config = self.config + self.detector.update_mask() + + frame = self._half_change_frame() + _ = self.detector.detect(frame) + self.assertTrue( + self.detector.calibrating, + "Detector should be in calibrating state after skip event", + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/frigate/test/test_preview_loader.py b/frigate/test/test_preview_loader.py new file mode 100644 index 000000000..e2062fce1 --- /dev/null +++ b/frigate/test/test_preview_loader.py @@ -0,0 +1,80 @@ +import os +import shutil +import unittest + +from frigate.output.preview import ( + PREVIEW_CACHE_DIR, + PREVIEW_FRAME_TYPE, + get_most_recent_preview_frame, +) + + +class TestPreviewLoader(unittest.TestCase): + def setUp(self): + if os.path.exists(PREVIEW_CACHE_DIR): + shutil.rmtree(PREVIEW_CACHE_DIR) + os.makedirs(PREVIEW_CACHE_DIR) + + def tearDown(self): + if os.path.exists(PREVIEW_CACHE_DIR): + shutil.rmtree(PREVIEW_CACHE_DIR) + + def test_get_most_recent_preview_frame_missing(self): + self.assertIsNone(get_most_recent_preview_frame("test_camera")) + + def test_get_most_recent_preview_frame_exists(self): + camera = "test_camera" + # create dummy preview files + for ts in ["1000.0", "2000.0", "1500.0"]: + with open( + os.path.join( + PREVIEW_CACHE_DIR, f"preview_{camera}-{ts}.{PREVIEW_FRAME_TYPE}" + ), + "w", + ) as f: + f.write(f"test_{ts}") + + expected_path = os.path.join( + PREVIEW_CACHE_DIR, f"preview_{camera}-2000.0.{PREVIEW_FRAME_TYPE}" + ) + self.assertEqual(get_most_recent_preview_frame(camera), expected_path) + + def test_get_most_recent_preview_frame_before(self): + camera = "test_camera" + # create dummy preview files + for ts in ["1000.0", "2000.0"]: + with open( + os.path.join( + PREVIEW_CACHE_DIR, f"preview_{camera}-{ts}.{PREVIEW_FRAME_TYPE}" + ), + "w", + ) as f: + f.write(f"test_{ts}") + + # Test finding frame before or at 1500 + expected_path = os.path.join( + PREVIEW_CACHE_DIR, f"preview_{camera}-1000.0.{PREVIEW_FRAME_TYPE}" + ) + self.assertEqual( + get_most_recent_preview_frame(camera, before=1500.0), expected_path + ) + + # Test finding frame before or at 999 + self.assertIsNone(get_most_recent_preview_frame(camera, before=999.0)) + + def test_get_most_recent_preview_frame_other_camera(self): + camera = "test_camera" + other_camera = "other_camera" + with open( + os.path.join( + PREVIEW_CACHE_DIR, f"preview_{other_camera}-3000.0.{PREVIEW_FRAME_TYPE}" + ), + "w", + ) as f: + f.write("test") + + self.assertIsNone(get_most_recent_preview_frame(camera)) + + def test_get_most_recent_preview_frame_no_directory(self): + shutil.rmtree(PREVIEW_CACHE_DIR) + self.assertIsNone(get_most_recent_preview_frame("test_camera")) diff --git a/frigate/timeline.py b/frigate/timeline.py index cf2f5e8c7..3ec866176 100644 --- a/frigate/timeline.py +++ b/frigate/timeline.py @@ -86,7 +86,9 @@ class TimelineProcessor(threading.Thread): event_data: dict[Any, Any], ) -> bool: """Handle object detection.""" - camera_config = self.config.cameras[camera] + camera_config = self.config.cameras.get(camera) + if camera_config is None: + return False event_id = event_data["id"] # Base timeline entry data that all entries will share diff --git a/frigate/track/object_processing.py b/frigate/track/object_processing.py index e0ee74228..a699fab23 100644 --- a/frigate/track/object_processing.py +++ b/frigate/track/object_processing.py @@ -185,7 +185,7 @@ class TrackedObjectProcessor(threading.Thread): def snapshot(camera: str, obj: TrackedObject) -> bool: mqtt_config: CameraMqttConfig = self.config.cameras[camera].mqtt if mqtt_config.enabled and self.should_mqtt_snapshot(camera, obj): - jpg_bytes = obj.get_img_bytes( + jpg_bytes, _ = obj.get_img_bytes( ext="jpg", timestamp=mqtt_config.timestamp, bounding_box=mqtt_config.bounding_box, @@ -515,6 +515,7 @@ class TrackedObjectProcessor(threading.Thread): duration, source_type, draw, + pre_capture, ) = payload # save the snapshot image @@ -522,6 +523,11 @@ class TrackedObjectProcessor(threading.Thread): None, event_id, label, draw ) end_time = frame_time + duration if duration is not None else None + start_time = ( + frame_time - self.config.cameras[camera_name].record.event_pre_capture + if pre_capture is None + else frame_time - pre_capture + ) # send event to event maintainer self.event_sender.publish( @@ -536,8 +542,7 @@ class TrackedObjectProcessor(threading.Thread): "sub_label": sub_label, "score": score, "camera": camera_name, - "start_time": frame_time - - self.config.cameras[camera_name].record.event_pre_capture, + "start_time": start_time, "end_time": end_time, "has_clip": self.config.cameras[camera_name].record.enabled and include_recording, @@ -685,9 +690,13 @@ class TrackedObjectProcessor(threading.Thread): self.create_camera_state(camera) elif "remove" in updated_topics: for camera in updated_topics["remove"]: - camera_state = self.camera_states[camera] - camera_state.shutdown() + removed_camera_state = self.camera_states[camera] + removed_camera_state.shutdown() self.camera_states.pop(camera) + self.camera_activity.pop(camera, None) + self.last_motion_detected.pop(camera, None) + + self.requestor.send_data(UPDATE_CAMERA_ACTIVITY, self.camera_activity) # manage camera disabled state for camera, config in self.config.cameras.items(): @@ -695,6 +704,10 @@ class TrackedObjectProcessor(threading.Thread): continue current_enabled = config.enabled + camera_state = self.camera_states.get(camera) + if camera_state is None: + continue + camera_state = self.camera_states[camera] if camera_state.prev_enabled and not current_enabled: @@ -747,7 +760,11 @@ class TrackedObjectProcessor(threading.Thread): except queue.Empty: continue - if not self.config.cameras[camera].enabled: + camera_config = self.config.cameras.get(camera) + if camera_config is None: + continue + + if not camera_config.enabled: logger.debug(f"Camera {camera} disabled, skipping update") continue diff --git a/frigate/track/tracked_object.py b/frigate/track/tracked_object.py index a95221bbd..4eb600fb8 100644 --- a/frigate/track/tracked_object.py +++ b/frigate/track/tracked_object.py @@ -16,7 +16,7 @@ from frigate.config import ( SnapshotsConfig, UIConfig, ) -from frigate.const import CLIPS_DIR, THUMB_DIR +from frigate.const import CLIPS_DIR, REPLAY_CAMERA_PREFIX, THUMB_DIR from frigate.detectors.detector_config import ModelConfig from frigate.review.types import SeverityEnum from frigate.util.builtin import sanitize_float @@ -188,6 +188,10 @@ class TrackedObject: # check each zone for name, zone in self.camera_config.zones.items(): + # skip disabled zones + if not zone.enabled: + continue + # if the zone is not for this object type, skip if len(zone.objects) > 0 and obj_data["label"] not in zone.objects: continue @@ -434,7 +438,7 @@ class TrackedObject: return count > (self.camera_config.detect.stationary.threshold or 50) def get_thumbnail(self, ext: str) -> bytes | None: - img_bytes = self.get_img_bytes( + img_bytes, _ = self.get_img_bytes( ext, timestamp=False, bounding_box=False, crop=True, height=175 ) @@ -475,20 +479,21 @@ class TrackedObject: crop: bool = False, height: int | None = None, quality: int | None = None, - ) -> bytes | None: + ) -> tuple[bytes | None, float | None]: if self.thumbnail_data is None: - return None + return None, None try: + frame_time = self.thumbnail_data["frame_time"] best_frame = cv2.cvtColor( - self.frame_cache[self.thumbnail_data["frame_time"]]["frame"], + self.frame_cache[frame_time]["frame"], cv2.COLOR_YUV2BGR_I420, ) except KeyError: logger.warning( - f"Unable to create jpg because frame {self.thumbnail_data['frame_time']} is not in the cache" + f"Unable to create jpg because frame {frame_time} is not in the cache" ) - return None + return None, None if bounding_box: thickness = 2 @@ -570,13 +575,13 @@ class TrackedObject: ret, jpg = cv2.imencode(f".{ext}", best_frame, quality_params) if ret: - return jpg.tobytes() + return jpg.tobytes(), frame_time else: - return None + return None, None def write_snapshot_to_disk(self) -> None: snapshot_config: SnapshotsConfig = self.camera_config.snapshots - jpg_bytes = self.get_img_bytes( + jpg_bytes, _ = self.get_img_bytes( ext="jpg", timestamp=snapshot_config.timestamp, bounding_box=snapshot_config.bounding_box, @@ -616,6 +621,9 @@ class TrackedObject: if not self.camera_config.name: return + if self.camera_config.name.startswith(REPLAY_CAMERA_PREFIX): + return + directory = os.path.join(THUMB_DIR, self.camera_config.name) if not os.path.exists(directory): diff --git a/frigate/types.py b/frigate/types.py index 6c5135616..77bb50845 100644 --- a/frigate/types.py +++ b/frigate/types.py @@ -26,6 +26,15 @@ class ModelStatusTypesEnum(str, Enum): failed = "failed" +class JobStatusTypesEnum(str, Enum): + pending = "pending" + queued = "queued" + running = "running" + success = "success" + failed = "failed" + cancelled = "cancelled" + + class TrackedObjectUpdateTypesEnum(str, Enum): description = "description" face = "face" diff --git a/frigate/util/builtin.py b/frigate/util/builtin.py index 867d2533d..aa2417a5c 100644 --- a/frigate/util/builtin.py +++ b/frigate/util/builtin.py @@ -84,7 +84,8 @@ def deep_merge(dct1: dict, dct2: dict, override=False, merge_lists=False) -> dic """ :param dct1: First dict to merge :param dct2: Second dict to merge - :param override: if same key exists in both dictionaries, should override? otherwise ignore. (default=True) + :param override: if same key exists in both dictionaries, should override? otherwise ignore. + :param merge_lists: if True, lists will be merged. :return: The merge dictionary """ merged = copy.deepcopy(dct1) @@ -96,6 +97,8 @@ def deep_merge(dct1: dict, dct2: dict, override=False, merge_lists=False) -> dic elif isinstance(v1, list) and isinstance(v2, list): if merge_lists: merged[k] = v1 + v2 + elif override: + merged[k] = copy.deepcopy(v2) else: if override: merged[k] = copy.deepcopy(v2) @@ -195,7 +198,8 @@ def flatten_config_data( ) -> Dict[str, Any]: items = [] for key, value in config_data.items(): - new_key = f"{parent_key}.{key}" if parent_key else key + escaped_key = escape_config_key_segment(str(key)) + new_key = f"{parent_key}.{escaped_key}" if parent_key else escaped_key if isinstance(value, dict): items.extend(flatten_config_data(value, new_key).items()) else: @@ -203,6 +207,41 @@ def flatten_config_data( return dict(items) +def escape_config_key_segment(segment: str) -> str: + """Escape dots and backslashes so they can be treated as literal key chars.""" + return segment.replace("\\", "\\\\").replace(".", "\\.") + + +def split_config_key_path(key_path_str: str) -> list[str]: + """Split a dotted config path, honoring \\. as a literal dot in a key.""" + parts: list[str] = [] + current: list[str] = [] + escaped = False + + for char in key_path_str: + if escaped: + current.append(char) + escaped = False + continue + + if char == "\\": + escaped = True + continue + + if char == ".": + parts.append("".join(current)) + current = [] + continue + + current.append(char) + + if escaped: + current.append("\\") + + parts.append("".join(current)) + return parts + + def update_yaml_file_bulk(file_path: str, updates: Dict[str, Any]): yaml = YAML() yaml.indent(mapping=2, sequence=4, offset=2) @@ -218,7 +257,7 @@ def update_yaml_file_bulk(file_path: str, updates: Dict[str, Any]): # Apply all updates for key_path_str, new_value in updates.items(): - key_path = key_path_str.split(".") + key_path = split_config_key_path(key_path_str) for i in range(len(key_path)): try: index = int(key_path[i]) diff --git a/frigate/util/config.py b/frigate/util/config.py index c3d796397..238671563 100644 --- a/frigate/util/config.py +++ b/frigate/util/config.py @@ -9,11 +9,12 @@ from typing import Any, Optional, Union from ruamel.yaml import YAML from frigate.const import CONFIG_DIR, EXPORT_DIR +from frigate.util.builtin import deep_merge from frigate.util.services import get_video_properties logger = logging.getLogger(__name__) -CURRENT_CONFIG_VERSION = "0.17-0" +CURRENT_CONFIG_VERSION = "0.18-0" DEFAULT_CONFIG_FILE = os.path.join(CONFIG_DIR, "config.yml") @@ -98,6 +99,13 @@ def migrate_frigate_config(config_file: str): yaml.dump(new_config, f) previous_version = "0.17-0" + if previous_version < "0.18-0": + logger.info(f"Migrating frigate config from {previous_version} to 0.18-0...") + new_config = migrate_018_0(config) + with open(config_file, "w") as f: + yaml.dump(new_config, f) + previous_version = "0.18-0" + logger.info("Finished frigate config migration...") @@ -427,6 +435,161 @@ def migrate_017_0(config: dict[str, dict[str, Any]]) -> dict[str, dict[str, Any] return new_config +def _convert_legacy_mask_to_dict( + mask: Optional[Union[str, list]], mask_type: str = "motion_mask", label: str = "" +) -> dict[str, dict[str, Any]]: + """Convert legacy mask format (str or list[str]) to new dict format. + + Args: + mask: Legacy mask format (string or list of strings) + mask_type: Type of mask for naming ("motion_mask" or "object_mask") + label: Optional label for object masks (e.g., "person") + + Returns: + Dictionary with mask_id as key and mask config as value + """ + if not mask: + return {} + + result = {} + + if isinstance(mask, str): + if mask: + mask_id = f"{mask_type}_1" + friendly_name = ( + f"Object Mask 1 ({label})" + if label + else f"{mask_type.replace('_', ' ').title()} 1" + ) + result[mask_id] = { + "friendly_name": friendly_name, + "enabled": True, + "coordinates": mask, + } + elif isinstance(mask, list): + for i, coords in enumerate(mask): + if coords: + mask_id = f"{mask_type}_{i + 1}" + friendly_name = ( + f"Object Mask {i + 1} ({label})" + if label + else f"{mask_type.replace('_', ' ').title()} {i + 1}" + ) + result[mask_id] = { + "friendly_name": friendly_name, + "enabled": True, + "coordinates": coords, + } + + return result + + +def migrate_018_0(config: dict[str, dict[str, Any]]) -> dict[str, dict[str, Any]]: + """Handle migrating frigate config to 0.18-0""" + new_config = config.copy() + + # Migrate GenAI to new format + genai = new_config.get("genai") + + if genai and genai.get("provider"): + genai["roles"] = ["embeddings", "vision", "tools"] + new_config["genai"] = {"default": genai} + + # Remove deprecated sync_recordings from global record config + if new_config.get("record", {}).get("sync_recordings") is not None: + del new_config["record"]["sync_recordings"] + + # Remove deprecated timelapse_args from global record export config + if new_config.get("record", {}).get("export", {}).get("timelapse_args") is not None: + del new_config["record"]["export"]["timelapse_args"] + # Remove export section if empty + if not new_config.get("record", {}).get("export"): + del new_config["record"]["export"] + # Remove record section if empty + if not new_config.get("record"): + del new_config["record"] + + # Migrate global motion masks + global_motion = new_config.get("motion", {}) + if global_motion and "mask" in global_motion: + mask = global_motion.get("mask") + if mask is not None and not isinstance(mask, dict): + new_config["motion"]["mask"] = _convert_legacy_mask_to_dict( + mask, "motion_mask" + ) + + # Migrate global object masks + global_objects = new_config.get("objects", {}) + if global_objects and "mask" in global_objects: + mask = global_objects.get("mask") + if mask is not None and not isinstance(mask, dict): + new_config["objects"]["mask"] = _convert_legacy_mask_to_dict( + mask, "object_mask" + ) + + # Migrate global object filters masks + if global_objects and "filters" in global_objects: + for obj_name, filter_config in global_objects.get("filters", {}).items(): + if isinstance(filter_config, dict) and "mask" in filter_config: + mask = filter_config.get("mask") + if mask is not None and not isinstance(mask, dict): + new_config["objects"]["filters"][obj_name]["mask"] = ( + _convert_legacy_mask_to_dict(mask, "object_mask", obj_name) + ) + + # Remove deprecated sync_recordings and migrate masks for camera-specific configs + for name, camera in config.get("cameras", {}).items(): + camera_config: dict[str, dict[str, Any]] = camera.copy() + + if camera_config.get("record", {}).get("sync_recordings") is not None: + del camera_config["record"]["sync_recordings"] + + if ( + camera_config.get("record", {}).get("export", {}).get("timelapse_args") + is not None + ): + del camera_config["record"]["export"]["timelapse_args"] + # Remove export section if empty + if not camera_config.get("record", {}).get("export"): + del camera_config["record"]["export"] + # Remove record section if empty + if not camera_config.get("record"): + del camera_config["record"] + + # Migrate camera motion masks + camera_motion = camera_config.get("motion", {}) + if camera_motion and "mask" in camera_motion: + mask = camera_motion.get("mask") + if mask is not None and not isinstance(mask, dict): + camera_config["motion"]["mask"] = _convert_legacy_mask_to_dict( + mask, "motion_mask" + ) + + # Migrate camera global object masks + camera_objects = camera_config.get("objects", {}) + if camera_objects and "mask" in camera_objects: + mask = camera_objects.get("mask") + if mask is not None and not isinstance(mask, dict): + camera_config["objects"]["mask"] = _convert_legacy_mask_to_dict( + mask, "object_mask" + ) + + # Migrate camera object filter masks + if camera_objects and "filters" in camera_objects: + for obj_name, filter_config in camera_objects.get("filters", {}).items(): + if isinstance(filter_config, dict) and "mask" in filter_config: + mask = filter_config.get("mask") + if mask is not None and not isinstance(mask, dict): + camera_config["objects"]["filters"][obj_name]["mask"] = ( + _convert_legacy_mask_to_dict(mask, "object_mask", obj_name) + ) + + new_config["cameras"][name] = camera_config + + new_config["version"] = "0.18-0" + return new_config + + def get_relative_coordinates( mask: Optional[Union[str, list]], frame_shape: tuple[int, int] ) -> Union[str, list]: @@ -526,3 +689,78 @@ class StreamInfoRetriever: info = asyncio.run(get_video_properties(ffmpeg, path)) self.stream_cache[path] = info return info + + +def apply_section_update(camera_config, section: str, update: dict) -> Optional[str]: + """Merge an update dict into a camera config section and rebuild runtime variants. + + For motion and object filter sections, the plain Pydantic models are rebuilt + as RuntimeMotionConfig / RuntimeFilterConfig so that rasterized numpy masks + are recomputed. This mirrors the logic in FrigateConfig.post_validation. + + Args: + camera_config: The CameraConfig instance to update. + section: Config section name (e.g. "motion", "objects"). + update: Nested dict of field updates to merge. + + Returns: + None on success, or an error message string on failure. + """ + from frigate.config.config import RuntimeFilterConfig, RuntimeMotionConfig + + current = getattr(camera_config, section, None) + if current is None: + return f"Section '{section}' not found on camera '{camera_config.name}'" + + try: + frame_shape = camera_config.frame_shape + + if section == "motion": + merged = deep_merge( + current.model_dump(exclude_unset=True, exclude={"rasterized_mask"}), + update, + override=True, + ) + camera_config.motion = RuntimeMotionConfig( + frame_shape=frame_shape, **merged + ) + + elif section == "objects": + merged = deep_merge( + current.model_dump( + exclude={"filters": {"__all__": {"rasterized_mask"}}} + ), + update, + override=True, + ) + new_objects = current.__class__.model_validate(merged) + + # Preserve private _all_objects from original config + try: + new_objects._all_objects = current._all_objects + except AttributeError: + pass + + # Rebuild RuntimeFilterConfig with merged global + per-object masks + for obj_name, filt in new_objects.filters.items(): + merged_mask = dict(filt.mask) + if new_objects.mask: + for gid, gmask in new_objects.mask.items(): + merged_mask[f"global_{gid}"] = gmask + + new_objects.filters[obj_name] = RuntimeFilterConfig( + frame_shape=frame_shape, + mask=merged_mask, + **filt.model_dump(exclude_unset=True, exclude={"mask", "raw_mask"}), + ) + camera_config.objects = new_objects + + else: + merged = deep_merge(current.model_dump(), update, override=True) + setattr(camera_config, section, current.__class__.model_validate(merged)) + + except Exception: + logger.exception("Config validation error") + return "Validation error. Check logs for details." + + return None diff --git a/frigate/util/media.py b/frigate/util/media.py new file mode 100644 index 000000000..c7de85c9f --- /dev/null +++ b/frigate/util/media.py @@ -0,0 +1,808 @@ +"""Recordings Utilities.""" + +import datetime +import errno +import logging +import os +from dataclasses import dataclass, field +from pathlib import Path +from typing import Iterable + +from peewee import DatabaseError, chunked + +from frigate.const import CLIPS_DIR, EXPORT_DIR, RECORD_DIR, THUMB_DIR +from frigate.models import ( + Event, + Export, + Previews, + Recordings, + RecordingsToDelete, + ReviewSegment, +) + +logger = logging.getLogger(__name__) + + +# Safety threshold - abort if more than 50% of files would be deleted +SAFETY_THRESHOLD = 0.5 + + +@dataclass +class SyncResult: + """Result of a sync operation.""" + + media_type: str + files_checked: int = 0 + orphans_found: int = 0 + orphans_deleted: int = 0 + orphan_paths: list[str] = field(default_factory=list) + aborted: bool = False + error: str | None = None + + def to_dict(self) -> dict: + return { + "media_type": self.media_type, + "files_checked": self.files_checked, + "orphans_found": self.orphans_found, + "orphans_deleted": self.orphans_deleted, + "aborted": self.aborted, + "error": self.error, + } + + +def remove_empty_directories(root: Path, paths: Iterable[Path]) -> None: + """ + Remove directories if they exist and are empty. + Silently ignores non-existent and non-empty directories. + Attempts to remove parent directories as well, stopping at the given root. + """ + count = 0 + while True: + parents = set() + for path in paths: + if path == root: + continue + + try: + path.rmdir() + count += 1 + except FileNotFoundError: + pass + except OSError as e: + if e.errno == errno.ENOTEMPTY: + continue + raise + + parents.add(path.parent) + + if not parents: + break + + paths = parents + + logger.debug("Removed {count} empty directories") + + +def sync_recordings( + limited: bool = False, dry_run: bool = False, force: bool = False +) -> SyncResult: + """Sync recordings between the database and disk using the SyncResult format.""" + + result = SyncResult(media_type="recordings") + + try: + logger.debug("Start sync recordings.") + + # start checking on the hour 36 hours ago + check_point = datetime.datetime.now().replace( + minute=0, second=0, microsecond=0 + ).astimezone(datetime.timezone.utc) - datetime.timedelta(hours=36) + + # Gather DB recordings to inspect + if limited: + recordings_query = Recordings.select(Recordings.id, Recordings.path).where( + Recordings.start_time >= check_point.timestamp() + ) + else: + recordings_query = Recordings.select(Recordings.id, Recordings.path) + + recordings_count = recordings_query.count() + page_size = 1000 + num_pages = (recordings_count + page_size - 1) // page_size + recordings_to_delete: list[dict] = [] + + for page in range(num_pages): + for recording in recordings_query.paginate(page, page_size): + if not os.path.exists(recording.path): + recordings_to_delete.append( + {"id": recording.id, "path": recording.path} + ) + + result.orphans_found += len(recordings_to_delete) + result.orphan_paths.extend( + [ + recording["path"] + for recording in recordings_to_delete + if recording.get("path") + ] + ) + + if ( + recordings_count + and len(recordings_to_delete) / recordings_count > SAFETY_THRESHOLD + ): + if force: + logger.warning( + f"Deleting {(len(recordings_to_delete) / max(1, recordings_count) * 100):.2f}% of recordings DB entries (force=True, bypassing safety threshold)" + ) + else: + logger.warning( + f"Deleting {(len(recordings_to_delete) / max(1, recordings_count) * 100):.2f}% of recordings DB entries, could be due to configuration error. Aborting..." + ) + result.aborted = True + return result + + if recordings_to_delete and not dry_run: + logger.info( + f"Deleting {len(recordings_to_delete)} recording DB entries with missing files" + ) + + RecordingsToDelete.create_table(temporary=True) + + max_inserts = 1000 + for batch in chunked(recordings_to_delete, max_inserts): + RecordingsToDelete.insert_many(batch).execute() + + try: + deleted = ( + Recordings.delete() + .where( + Recordings.id.in_( + RecordingsToDelete.select(RecordingsToDelete.id) + ) + ) + .execute() + ) + result.orphans_deleted += int(deleted) + except DatabaseError as e: + logger.error(f"Database error during recordings db cleanup: {e}") + result.error = str(e) + result.aborted = True + return result + + if result.aborted: + logger.warning("Recording DB sync aborted; skipping file cleanup.") + return result + + # Only try to cleanup files if db cleanup was successful or dry_run + if limited: + # get recording files from last 36 hours + hour_check = f"{RECORD_DIR}/{check_point.strftime('%Y-%m-%d/%H')}" + files_on_disk = { + os.path.join(root, file) + for root, _, files in os.walk(RECORD_DIR) + for file in files + if root > hour_check + } + else: + # get all recordings files on disk and put them in a set + files_on_disk = { + os.path.join(root, file) + for root, _, files in os.walk(RECORD_DIR) + for file in files + } + + result.files_checked = len(files_on_disk) + + files_to_delete: list[str] = [] + for file in files_on_disk: + if not Recordings.select().where(Recordings.path == file).exists(): + files_to_delete.append(file) + + result.orphans_found += len(files_to_delete) + result.orphan_paths.extend(files_to_delete) + + if ( + files_on_disk + and len(files_to_delete) / len(files_on_disk) > SAFETY_THRESHOLD + ): + if force: + logger.warning( + f"Deleting {(len(files_to_delete) / max(1, len(files_on_disk)) * 100):.2f}% of recordings files (force=True, bypassing safety threshold)" + ) + else: + logger.warning( + f"Deleting {(len(files_to_delete) / max(1, len(files_on_disk)) * 100):.2f}% of recordings files, could be due to configuration error. Aborting..." + ) + result.aborted = True + return result + + if dry_run: + logger.info( + f"Recordings sync (dry run): Found {len(files_to_delete)} orphaned files" + ) + return result + + # Delete orphans + logger.info(f"Deleting {len(files_to_delete)} orphaned recordings files") + for file in files_to_delete: + try: + os.unlink(file) + result.orphans_deleted += 1 + except OSError as e: + logger.error(f"Failed to delete {file}: {e}") + + logger.debug("End sync recordings.") + + except Exception as e: + logger.error(f"Error syncing recordings: {e}") + result.error = str(e) + + return result + + +def sync_event_snapshots(dry_run: bool = False, force: bool = False) -> SyncResult: + """Sync event snapshots - delete files not referenced by any event. + + Event snapshots are stored at: CLIPS_DIR/{camera}-{event_id}.jpg + Also checks for clean variants: {camera}-{event_id}-clean.webp and -clean.png + """ + result = SyncResult(media_type="event_snapshots") + + try: + # Get all event IDs with snapshots from DB + events_with_snapshots = set( + f"{e.camera}-{e.id}" + for e in Event.select(Event.id, Event.camera).where( + Event.has_snapshot == True + ) + ) + + # Find snapshot files on disk (directly in CLIPS_DIR, not subdirectories) + snapshot_files: list[tuple[str, str]] = [] # (full_path, base_name) + if os.path.isdir(CLIPS_DIR): + for file in os.listdir(CLIPS_DIR): + file_path = os.path.join(CLIPS_DIR, file) + if os.path.isfile(file_path) and file.endswith( + (".jpg", "-clean.webp", "-clean.png") + ): + # Extract base name (camera-event_id) from filename + base_name = file + for suffix in ["-clean.webp", "-clean.png", ".jpg"]: + if file.endswith(suffix): + base_name = file[: -len(suffix)] + break + snapshot_files.append((file_path, base_name)) + + result.files_checked = len(snapshot_files) + + # Find orphans + orphans: list[str] = [] + for file_path, base_name in snapshot_files: + if base_name not in events_with_snapshots: + orphans.append(file_path) + + result.orphans_found = len(orphans) + result.orphan_paths = orphans + + if len(orphans) == 0: + return result + + # Safety check + if ( + result.files_checked > 0 + and len(orphans) / result.files_checked > SAFETY_THRESHOLD + ): + if force: + logger.warning( + f"Event snapshots sync: Would delete {len(orphans)}/{result.files_checked} " + f"({len(orphans) / result.files_checked * 100:.2f}%) files (force=True, bypassing safety threshold)." + ) + else: + logger.warning( + f"Event snapshots sync: Would delete {len(orphans)}/{result.files_checked} " + f"({len(orphans) / result.files_checked * 100:.2f}%) files. " + "Aborting due to safety threshold." + ) + result.aborted = True + return result + + if dry_run: + logger.info( + f"Event snapshots sync (dry run): Found {len(orphans)} orphaned files" + ) + return result + + # Delete orphans + logger.info(f"Deleting {len(orphans)} orphaned event snapshot files") + for file_path in orphans: + try: + os.unlink(file_path) + result.orphans_deleted += 1 + except OSError as e: + logger.error(f"Failed to delete {file_path}: {e}") + + except Exception as e: + logger.error(f"Error syncing event snapshots: {e}") + result.error = str(e) + + return result + + +def sync_event_thumbnails(dry_run: bool = False, force: bool = False) -> SyncResult: + """Sync event thumbnails - delete files not referenced by any event. + + Event thumbnails are stored at: THUMB_DIR/{camera}/{event_id}.webp + Only events without inline thumbnail (thumbnail field is None/empty) use files. + """ + result = SyncResult(media_type="event_thumbnails") + + try: + # Get all events that use file-based thumbnails + # Events with thumbnail field populated don't need files + events_with_file_thumbs = set( + (e.camera, e.id) + for e in Event.select(Event.id, Event.camera, Event.thumbnail).where( + (Event.thumbnail.is_null(True)) | (Event.thumbnail == "") + ) + ) + + # Find thumbnail files on disk + thumbnail_files: list[ + tuple[str, str, str] + ] = [] # (full_path, camera, event_id) + if os.path.isdir(THUMB_DIR): + for camera_dir in os.listdir(THUMB_DIR): + camera_path = os.path.join(THUMB_DIR, camera_dir) + if not os.path.isdir(camera_path): + continue + for file in os.listdir(camera_path): + if file.endswith(".webp"): + event_id = file[:-5] # Remove .webp + file_path = os.path.join(camera_path, file) + thumbnail_files.append((file_path, camera_dir, event_id)) + + result.files_checked = len(thumbnail_files) + + # Find orphans - files where event doesn't exist or event has inline thumbnail + orphans: list[str] = [] + for file_path, camera, event_id in thumbnail_files: + if (camera, event_id) not in events_with_file_thumbs: + # Check if event exists with inline thumbnail + event_exists = Event.select().where(Event.id == event_id).exists() + if not event_exists: + orphans.append(file_path) + # If event exists with inline thumbnail, the file is also orphaned + elif event_exists: + event = Event.get_or_none(Event.id == event_id) + if event and event.thumbnail: + orphans.append(file_path) + + result.orphans_found = len(orphans) + result.orphan_paths = orphans + + if len(orphans) == 0: + return result + + # Safety check + if ( + result.files_checked > 0 + and len(orphans) / result.files_checked > SAFETY_THRESHOLD + ): + if force: + logger.warning( + f"Event thumbnails sync: Would delete {len(orphans)}/{result.files_checked} " + f"({len(orphans) / result.files_checked * 100:.2f}%) files (force=True, bypassing safety threshold)." + ) + else: + logger.warning( + f"Event thumbnails sync: Would delete {len(orphans)}/{result.files_checked} " + f"({len(orphans) / result.files_checked * 100:.2f}%) files. " + "Aborting due to safety threshold." + ) + result.aborted = True + return result + + if dry_run: + logger.info( + f"Event thumbnails sync (dry run): Found {len(orphans)} orphaned files" + ) + return result + + # Delete orphans + logger.info(f"Deleting {len(orphans)} orphaned event thumbnail files") + for file_path in orphans: + try: + os.unlink(file_path) + result.orphans_deleted += 1 + except OSError as e: + logger.error(f"Failed to delete {file_path}: {e}") + + except Exception as e: + logger.error(f"Error syncing event thumbnails: {e}") + result.error = str(e) + + return result + + +def sync_review_thumbnails(dry_run: bool = False, force: bool = False) -> SyncResult: + """Sync review segment thumbnails - delete files not referenced by any review segment. + + Review thumbnails are stored at: CLIPS_DIR/review/thumb-{camera}-{review_id}.webp + The full path is stored in ReviewSegment.thumb_path + """ + result = SyncResult(media_type="review_thumbnails") + + try: + # Get all thumb paths from DB + review_thumb_paths = set( + r.thumb_path + for r in ReviewSegment.select(ReviewSegment.thumb_path) + if r.thumb_path + ) + + # Find review thumbnail files on disk + review_dir = os.path.join(CLIPS_DIR, "review") + thumbnail_files: list[str] = [] + if os.path.isdir(review_dir): + for file in os.listdir(review_dir): + if file.startswith("thumb-") and file.endswith(".webp"): + file_path = os.path.join(review_dir, file) + thumbnail_files.append(file_path) + + result.files_checked = len(thumbnail_files) + + # Find orphans + orphans: list[str] = [] + for file_path in thumbnail_files: + if file_path not in review_thumb_paths: + orphans.append(file_path) + + result.orphans_found = len(orphans) + result.orphan_paths = orphans + + if len(orphans) == 0: + return result + + # Safety check + if ( + result.files_checked > 0 + and len(orphans) / result.files_checked > SAFETY_THRESHOLD + ): + if force: + logger.warning( + f"Review thumbnails sync: Would delete {len(orphans)}/{result.files_checked} " + f"({len(orphans) / result.files_checked * 100:.2f}%) files (force=True, bypassing safety threshold)." + ) + else: + logger.warning( + f"Review thumbnails sync: Would delete {len(orphans)}/{result.files_checked} " + f"({len(orphans) / result.files_checked * 100:.2f}%) files. " + "Aborting due to safety threshold." + ) + result.aborted = True + return result + + if dry_run: + logger.info( + f"Review thumbnails sync (dry run): Found {len(orphans)} orphaned files" + ) + return result + + # Delete orphans + logger.info(f"Deleting {len(orphans)} orphaned review thumbnail files") + for file_path in orphans: + try: + os.unlink(file_path) + result.orphans_deleted += 1 + except OSError as e: + logger.error(f"Failed to delete {file_path}: {e}") + + except Exception as e: + logger.error(f"Error syncing review thumbnails: {e}") + result.error = str(e) + + return result + + +def sync_previews(dry_run: bool = False, force: bool = False) -> SyncResult: + """Sync preview files - delete files not referenced by any preview record. + + Previews are stored at: CLIPS_DIR/previews/{camera}/*.mp4 + The full path is stored in Previews.path + """ + result = SyncResult(media_type="previews") + + try: + # Get all preview paths from DB + preview_paths = set(p.path for p in Previews.select(Previews.path) if p.path) + + # Find preview files on disk + previews_dir = os.path.join(CLIPS_DIR, "previews") + preview_files: list[str] = [] + if os.path.isdir(previews_dir): + for camera_dir in os.listdir(previews_dir): + camera_path = os.path.join(previews_dir, camera_dir) + if not os.path.isdir(camera_path): + continue + for file in os.listdir(camera_path): + if file.endswith(".mp4"): + file_path = os.path.join(camera_path, file) + preview_files.append(file_path) + + result.files_checked = len(preview_files) + + # Find orphans + orphans: list[str] = [] + for file_path in preview_files: + if file_path not in preview_paths: + orphans.append(file_path) + + result.orphans_found = len(orphans) + result.orphan_paths = orphans + + if len(orphans) == 0: + return result + + # Safety check + if ( + result.files_checked > 0 + and len(orphans) / result.files_checked > SAFETY_THRESHOLD + ): + if force: + logger.warning( + f"Previews sync: Would delete {len(orphans)}/{result.files_checked} " + f"({len(orphans) / result.files_checked * 100:.2f}%) files (force=True, bypassing safety threshold)." + ) + else: + logger.warning( + f"Previews sync: Would delete {len(orphans)}/{result.files_checked} " + f"({len(orphans) / result.files_checked * 100:.2f}%) files. " + "Aborting due to safety threshold." + ) + result.aborted = True + return result + + if dry_run: + logger.info(f"Previews sync (dry run): Found {len(orphans)} orphaned files") + return result + + # Delete orphans + logger.info(f"Deleting {len(orphans)} orphaned preview files") + for file_path in orphans: + try: + os.unlink(file_path) + result.orphans_deleted += 1 + except OSError as e: + logger.error(f"Failed to delete {file_path}: {e}") + + except Exception as e: + logger.error(f"Error syncing previews: {e}") + result.error = str(e) + + return result + + +def sync_exports(dry_run: bool = False, force: bool = False) -> SyncResult: + """Sync export files - delete files not referenced by any export record. + + Export videos are stored at: EXPORT_DIR/*.mp4 + Export thumbnails are stored at: CLIPS_DIR/export/*.jpg + The paths are stored in Export.video_path and Export.thumb_path + """ + result = SyncResult(media_type="exports") + + try: + # Get all export paths from DB + export_video_paths = set() + export_thumb_paths = set() + for e in Export.select(Export.video_path, Export.thumb_path): + if e.video_path: + export_video_paths.add(e.video_path) + if e.thumb_path: + export_thumb_paths.add(e.thumb_path) + + # Find export video files on disk + export_files: list[str] = [] + if os.path.isdir(EXPORT_DIR): + for file in os.listdir(EXPORT_DIR): + if file.endswith(".mp4"): + file_path = os.path.join(EXPORT_DIR, file) + export_files.append(file_path) + + # Find export thumbnail files on disk + export_thumb_dir = os.path.join(CLIPS_DIR, "export") + thumb_files: list[str] = [] + if os.path.isdir(export_thumb_dir): + for file in os.listdir(export_thumb_dir): + if file.endswith(".jpg"): + file_path = os.path.join(export_thumb_dir, file) + thumb_files.append(file_path) + + result.files_checked = len(export_files) + len(thumb_files) + + # Find orphans + orphans: list[str] = [] + for file_path in export_files: + if file_path not in export_video_paths: + orphans.append(file_path) + for file_path in thumb_files: + if file_path not in export_thumb_paths: + orphans.append(file_path) + + result.orphans_found = len(orphans) + result.orphan_paths = orphans + + if len(orphans) == 0: + return result + + # Safety check + if ( + result.files_checked > 0 + and len(orphans) / result.files_checked > SAFETY_THRESHOLD + ): + if force: + logger.warning( + f"Exports sync: Would delete {len(orphans)}/{result.files_checked} " + f"({len(orphans) / result.files_checked * 100:.2f}%) files (force=True, bypassing safety threshold)." + ) + else: + logger.warning( + f"Exports sync: Would delete {len(orphans)}/{result.files_checked} " + f"({len(orphans) / result.files_checked * 100:.2f}%) files. " + "Aborting due to safety threshold." + ) + result.aborted = True + return result + + if dry_run: + logger.info(f"Exports sync (dry run): Found {len(orphans)} orphaned files") + return result + + # Delete orphans + logger.info(f"Deleting {len(orphans)} orphaned export files") + for file_path in orphans: + try: + os.unlink(file_path) + result.orphans_deleted += 1 + except OSError as e: + logger.error(f"Failed to delete {file_path}: {e}") + + except Exception as e: + logger.error(f"Error syncing exports: {e}") + result.error = str(e) + + return result + + +@dataclass +class MediaSyncResults: + """Combined results from all media sync operations.""" + + event_snapshots: SyncResult | None = None + event_thumbnails: SyncResult | None = None + review_thumbnails: SyncResult | None = None + previews: SyncResult | None = None + exports: SyncResult | None = None + recordings: SyncResult | None = None + + @property + def total_files_checked(self) -> int: + total = 0 + for result in [ + self.event_snapshots, + self.event_thumbnails, + self.review_thumbnails, + self.previews, + self.exports, + self.recordings, + ]: + if result: + total += result.files_checked + return total + + @property + def total_orphans_found(self) -> int: + total = 0 + for result in [ + self.event_snapshots, + self.event_thumbnails, + self.review_thumbnails, + self.previews, + self.exports, + self.recordings, + ]: + if result: + total += result.orphans_found + return total + + @property + def total_orphans_deleted(self) -> int: + total = 0 + for result in [ + self.event_snapshots, + self.event_thumbnails, + self.review_thumbnails, + self.previews, + self.exports, + self.recordings, + ]: + if result: + total += result.orphans_deleted + return total + + def to_dict(self) -> dict: + """Convert results to dictionary for API response.""" + results = {} + for name, result in [ + ("event_snapshots", self.event_snapshots), + ("event_thumbnails", self.event_thumbnails), + ("review_thumbnails", self.review_thumbnails), + ("previews", self.previews), + ("exports", self.exports), + ("recordings", self.recordings), + ]: + if result: + results[name] = { + "files_checked": result.files_checked, + "orphans_found": result.orphans_found, + "orphans_deleted": result.orphans_deleted, + "aborted": result.aborted, + "error": result.error, + } + results["totals"] = { + "files_checked": self.total_files_checked, + "orphans_found": self.total_orphans_found, + "orphans_deleted": self.total_orphans_deleted, + } + return results + + +def sync_all_media( + dry_run: bool = False, media_types: list[str] = ["all"], force: bool = False +) -> MediaSyncResults: + """Sync specified media types with the database. + + Args: + dry_run: If True, only report orphans without deleting them. + media_types: List of media types to sync. Can include: 'all', 'event_snapshots', + 'event_thumbnails', 'review_thumbnails', 'previews', 'exports', 'recordings' + force: If True, bypass safety threshold checks. + + Returns: + MediaSyncResults with details of each sync operation. + """ + logger.debug( + f"Starting media sync (dry_run={dry_run}, media_types={media_types}, force={force})" + ) + + results = MediaSyncResults() + + # Determine which media types to sync + sync_all = "all" in media_types + + if sync_all or "event_snapshots" in media_types: + results.event_snapshots = sync_event_snapshots(dry_run=dry_run, force=force) + + if sync_all or "event_thumbnails" in media_types: + results.event_thumbnails = sync_event_thumbnails(dry_run=dry_run, force=force) + + if sync_all or "review_thumbnails" in media_types: + results.review_thumbnails = sync_review_thumbnails(dry_run=dry_run, force=force) + + if sync_all or "previews" in media_types: + results.previews = sync_previews(dry_run=dry_run, force=force) + + if sync_all or "exports" in media_types: + results.exports = sync_exports(dry_run=dry_run, force=force) + + if sync_all or "recordings" in media_types: + results.recordings = sync_recordings(dry_run=dry_run, force=force) + + logger.info( + f"Media sync complete: checked {results.total_files_checked} files, " + f"found {results.total_orphans_found} orphans, " + f"deleted {results.total_orphans_deleted}" + ) + + return results diff --git a/frigate/util/object.py b/frigate/util/object.py index 905745da6..021150132 100644 --- a/frigate/util/object.py +++ b/frigate/util/object.py @@ -248,20 +248,20 @@ def is_object_filtered(obj, objects_to_track, object_filters): if obj_settings.max_ratio < object_ratio: return True - if obj_settings.mask is not None: + if obj_settings.rasterized_mask is not None: # compute the coordinates of the object and make sure # the location isn't outside the bounds of the image (can happen from rounding) object_xmin = object_box[0] object_xmax = object_box[2] object_ymax = object_box[3] - y_location = min(int(object_ymax), len(obj_settings.mask) - 1) + y_location = min(int(object_ymax), len(obj_settings.rasterized_mask) - 1) x_location = min( int((object_xmax + object_xmin) / 2.0), - len(obj_settings.mask[0]) - 1, + len(obj_settings.rasterized_mask[0]) - 1, ) # if the object is in a masked location, don't add it to detected objects - if obj_settings.mask[y_location][x_location] == 0: + if obj_settings.rasterized_mask[y_location][x_location] == 0: return True return False diff --git a/frigate/util/rknn_converter.py b/frigate/util/rknn_converter.py index f7ebbf5e6..5660c7601 100644 --- a/frigate/util/rknn_converter.py +++ b/frigate/util/rknn_converter.py @@ -110,6 +110,7 @@ def ensure_torch_dependencies() -> bool: "pip", "install", "--break-system-packages", + "setuptools<81", "torch", "torchvision", ], diff --git a/frigate/util/schema.py b/frigate/util/schema.py new file mode 100644 index 000000000..5ba1bc061 --- /dev/null +++ b/frigate/util/schema.py @@ -0,0 +1,46 @@ +"""JSON schema utilities for Frigate.""" + +from typing import Any, Dict, Type + +from pydantic import BaseModel, TypeAdapter + + +def get_config_schema(config_class: Type[BaseModel]) -> Dict[str, Any]: + """ + Returns the JSON schema for FrigateConfig with polymorphic detectors. + + This utility patches the FrigateConfig schema to include the full polymorphic + definitions for detectors. By default, Pydantic's schema for Dict[str, BaseDetectorConfig] + only includes the base class fields. This function replaces it with a reference + to the DetectorConfig union, which includes all available detector subclasses. + """ + # Import here to ensure all detector plugins are loaded through the detectors module + from frigate.detectors import DetectorConfig + + # Get the base schema for FrigateConfig + schema = config_class.model_json_schema() + + # Get the schema for the polymorphic DetectorConfig union + detector_adapter: TypeAdapter = TypeAdapter(DetectorConfig) + detector_schema = detector_adapter.json_schema() + + # Ensure $defs exists in FrigateConfig schema + if "$defs" not in schema: + schema["$defs"] = {} + + # Merge $defs from DetectorConfig into FrigateConfig schema + # This includes the specific schemas for each detector plugin (OvDetectorConfig, etc.) + if "$defs" in detector_schema: + schema["$defs"].update(detector_schema["$defs"]) + + # Extract the union schema (oneOf/discriminator) and add it as a definition + detector_union_schema = {k: v for k, v in detector_schema.items() if k != "$defs"} + schema["$defs"]["DetectorConfig"] = detector_union_schema + + # Update the 'detectors' property to use the polymorphic DetectorConfig definition + if "detectors" in schema.get("properties", {}): + schema["properties"]["detectors"]["additionalProperties"] = { + "$ref": "#/$defs/DetectorConfig" + } + + return schema diff --git a/frigate/util/services.py b/frigate/util/services.py index 64d83833d..f1eedb01e 100644 --- a/frigate/util/services.py +++ b/frigate/util/services.py @@ -121,7 +121,7 @@ def get_cpu_stats() -> dict[str, dict]: pid = str(process.info["pid"]) try: cpu_percent = process.info["cpu_percent"] - cmdline = process.info["cmdline"] + cmdline = " ".join(process.info["cmdline"]).rstrip() with open(f"/proc/{pid}/stat", "r") as f: stats = f.readline().split() @@ -155,7 +155,7 @@ def get_cpu_stats() -> dict[str, dict]: "cpu": str(cpu_percent), "cpu_average": str(round(cpu_average_usage, 2)), "mem": f"{mem_pct}", - "cmdline": clean_camera_user_pass(" ".join(cmdline)), + "cmdline": clean_camera_user_pass(cmdline), } except Exception: continue @@ -417,12 +417,12 @@ def get_openvino_npu_stats() -> Optional[dict[str, str]]: else: usage = 0.0 - return {"npu": f"{round(usage, 2)}", "mem": "-"} + return {"npu": f"{round(usage, 2)}", "mem": "-%"} except (FileNotFoundError, PermissionError, ValueError): return None -def get_rockchip_gpu_stats() -> Optional[dict[str, str]]: +def get_rockchip_gpu_stats() -> Optional[dict[str, str | float]]: """Get GPU stats using rk.""" try: with open("/sys/kernel/debug/rkrga/load", "r") as f: @@ -440,7 +440,16 @@ def get_rockchip_gpu_stats() -> Optional[dict[str, str]]: return None average_load = f"{round(sum(load_values) / len(load_values), 2)}%" - return {"gpu": average_load, "mem": "-"} + stats: dict[str, str | float] = {"gpu": average_load, "mem": "-%"} + + try: + with open("/sys/class/thermal/thermal_zone5/temp", "r") as f: + line = f.readline().strip() + stats["temp"] = round(int(line) / 1000, 1) + except (FileNotFoundError, OSError, ValueError): + pass + + return stats def get_rockchip_npu_stats() -> Optional[dict[str, float | str]]: @@ -463,13 +472,25 @@ def get_rockchip_npu_stats() -> Optional[dict[str, float | str]]: percentages = [int(load) for load in core_loads] mean = round(sum(percentages) / len(percentages), 2) - return {"npu": mean, "mem": "-"} + stats: dict[str, float | str] = {"npu": mean, "mem": "-%"} + + try: + with open("/sys/class/thermal/thermal_zone6/temp", "r") as f: + line = f.readline().strip() + stats["temp"] = round(int(line) / 1000, 1) + except (FileNotFoundError, OSError, ValueError): + pass + + return stats -def try_get_info(f, h, default="N/A"): +def try_get_info(f, h, default="N/A", sensor=None): try: if h: - v = f(h) + if sensor is not None: + v = f(h, sensor) + else: + v = f(h) else: v = f() except nvml.NVMLError_NotSupported: @@ -498,6 +519,9 @@ def get_nvidia_gpu_stats() -> dict[int, dict]: util = try_get_info(nvml.nvmlDeviceGetUtilizationRates, handle) enc = try_get_info(nvml.nvmlDeviceGetEncoderUtilization, handle) dec = try_get_info(nvml.nvmlDeviceGetDecoderUtilization, handle) + temp = try_get_info( + nvml.nvmlDeviceGetTemperature, handle, default=None, sensor=0 + ) pstate = try_get_info(nvml.nvmlDeviceGetPowerState, handle, default=None) if util != "N/A": @@ -510,6 +534,11 @@ def get_nvidia_gpu_stats() -> dict[int, dict]: else: gpu_mem_util = -1 + if temp != "N/A" and temp is not None: + temp = float(temp) + else: + temp = None + if enc != "N/A": enc_util = enc[0] else: @@ -527,6 +556,7 @@ def get_nvidia_gpu_stats() -> dict[int, dict]: "enc": enc_util, "dec": dec_util, "pstate": pstate or "unknown", + "temp": temp, } except Exception: pass @@ -556,6 +586,53 @@ def get_jetson_stats() -> Optional[dict[int, dict]]: return results +def get_hailo_temps() -> dict[str, float]: + """Get temperatures for Hailo devices.""" + try: + from hailo_platform import Device + except ModuleNotFoundError: + return {} + + temps = {} + + try: + device_ids = Device.scan() + for i, device_id in enumerate(device_ids): + try: + with Device(device_id) as device: + temp_info = device.control.get_chip_temperature() + + # Get board name and normalise it + identity = device.control.identify() + board_name = None + for line in str(identity).split("\n"): + if line.startswith("Board Name:"): + board_name = ( + line.split(":", 1)[1].strip().lower().replace("-", "") + ) + break + + if not board_name: + board_name = f"hailo{i}" + + # Use indexed name if multiple devices, otherwise just the board name + device_name = ( + f"{board_name}-{i}" if len(device_ids) > 1 else board_name + ) + + # ts1_temperature is also available, but appeared to be the same as ts0 in testing. + temps[device_name] = round(temp_info.ts0_temperature, 1) + except Exception as e: + logger.debug( + f"Failed to get temperature for Hailo device {device_id}: {e}" + ) + continue + except Exception as e: + logger.debug(f"Failed to scan for Hailo devices: {e}") + + return temps + + def ffprobe_stream(ffmpeg, path: str, detailed: bool = False) -> sp.CompletedProcess: """Run ffprobe on stream.""" clean_path = escape_special_characters(path) @@ -591,12 +668,17 @@ def ffprobe_stream(ffmpeg, path: str, detailed: bool = False) -> sp.CompletedPro def vainfo_hwaccel(device_name: Optional[str] = None) -> sp.CompletedProcess: """Run vainfo.""" - ffprobe_cmd = ( - ["vainfo"] - if not device_name - else ["vainfo", "--display", "drm", "--device", f"/dev/dri/{device_name}"] - ) - return sp.run(ffprobe_cmd, capture_output=True) + if not device_name: + cmd = ["vainfo"] + else: + if os.path.isabs(device_name) and device_name.startswith("/dev/dri/"): + device_path = device_name + else: + device_path = f"/dev/dri/{device_name}" + + cmd = ["vainfo", "--display", "drm", "--device", device_path] + + return sp.run(cmd, capture_output=True) def get_nvidia_driver_info() -> dict[str, Any]: diff --git a/frigate/video.py b/frigate/video.py index 112844543..5e42619dd 100755 --- a/frigate/video.py +++ b/frigate/video.py @@ -3,6 +3,7 @@ import queue import subprocess as sp import threading import time +from collections import deque from datetime import datetime, timedelta, timezone from multiprocessing import Queue, Value from multiprocessing.synchronize import Event as MpEvent @@ -117,6 +118,7 @@ def capture_frames( frame_rate.start() skipped_eps = EventsPerSecond() skipped_eps.start() + config_subscriber = CameraConfigUpdateSubscriber( None, {config.name: config}, [CameraConfigUpdateEnum.enabled] ) @@ -181,6 +183,9 @@ class CameraWatchdog(threading.Thread): camera_fps, skipped_fps, ffmpeg_pid, + stalls, + reconnects, + detection_frame, stop_event, ): threading.Thread.__init__(self) @@ -201,6 +206,10 @@ class CameraWatchdog(threading.Thread): self.frame_index = 0 self.stop_event = stop_event self.sleeptime = self.config.ffmpeg.retry_interval + self.reconnect_timestamps = deque() + self.stalls = stalls + self.reconnects = reconnects + self.detection_frame = detection_frame self.config_subscriber = CameraConfigUpdateSubscriber( None, @@ -216,6 +225,35 @@ class CameraWatchdog(threading.Thread): self.latest_cache_segment_time: float = 0 self.record_enable_time: datetime | None = None + # Stall tracking (based on last processed frame) + self._stall_timestamps: deque[float] = deque() + self._stall_active: bool = False + + # Status caching to reduce message volume + self._last_detect_status: str | None = None + self._last_record_status: str | None = None + self._last_status_update_time: float = 0.0 + + def _send_detect_status(self, status: str, now: float) -> None: + """Send detect status only if changed or retry_interval has elapsed.""" + if ( + status != self._last_detect_status + or (now - self._last_status_update_time) >= self.sleeptime + ): + self.requestor.send_data(f"{self.config.name}/status/detect", status) + self._last_detect_status = status + self._last_status_update_time = now + + def _send_record_status(self, status: str, now: float) -> None: + """Send record status only if changed or retry_interval has elapsed.""" + if ( + status != self._last_record_status + or (now - self._last_status_update_time) >= self.sleeptime + ): + self.requestor.send_data(f"{self.config.name}/status/record", status) + self._last_record_status = status + self._last_status_update_time = now + def _update_enabled_state(self) -> bool: """Fetch the latest config and update enabled state.""" self.config_subscriber.check_for_updates() @@ -242,6 +280,14 @@ class CameraWatchdog(threading.Thread): else: self.ffmpeg_detect_process.wait() + # Update reconnects + now = datetime.now().timestamp() + self.reconnect_timestamps.append(now) + while self.reconnect_timestamps and self.reconnect_timestamps[0] < now - 3600: + self.reconnect_timestamps.popleft() + if self.reconnects: + self.reconnects.value = len(self.reconnect_timestamps) + # Wait for old capture thread to fully exit before starting a new one if self.capture_thread is not None and self.capture_thread.is_alive(): self.logger.info("Waiting for capture thread to exit...") @@ -267,7 +313,10 @@ class CameraWatchdog(threading.Thread): self.record_enable_time = datetime.now().astimezone(timezone.utc) time.sleep(self.sleeptime) - while not self.stop_event.wait(self.sleeptime): + last_restart_time = datetime.now().timestamp() + + # 1 second watchdog loop + while not self.stop_event.wait(1): enabled = self._update_enabled_state() if enabled != self.was_enabled: if enabled: @@ -285,12 +334,9 @@ class CameraWatchdog(threading.Thread): self.record_enable_time = None # update camera status - self.requestor.send_data( - f"{self.config.name}/status/detect", "disabled" - ) - self.requestor.send_data( - f"{self.config.name}/status/record", "disabled" - ) + now = datetime.now().timestamp() + self._send_detect_status("disabled", now) + self._send_record_status("disabled", now) self.was_enabled = enabled continue @@ -329,36 +375,44 @@ class CameraWatchdog(threading.Thread): now = datetime.now().timestamp() + # Check if enough time has passed to allow ffmpeg restart (backoff pacing) + time_since_last_restart = now - last_restart_time + can_restart = time_since_last_restart >= self.sleeptime + if not self.capture_thread.is_alive(): - self.requestor.send_data(f"{self.config.name}/status/detect", "offline") + self._send_detect_status("offline", now) self.camera_fps.value = 0 self.logger.error( f"Ffmpeg process crashed unexpectedly for {self.config.name}." ) - self.reset_capture_thread(terminate=False) + if can_restart: + self.reset_capture_thread(terminate=False) + last_restart_time = now elif self.camera_fps.value >= (self.config.detect.fps + 10): self.fps_overflow_count += 1 if self.fps_overflow_count == 3: - self.requestor.send_data( - f"{self.config.name}/status/detect", "offline" - ) + self._send_detect_status("offline", now) self.fps_overflow_count = 0 self.camera_fps.value = 0 self.logger.info( f"{self.config.name} exceeded fps limit. Exiting ffmpeg..." ) - self.reset_capture_thread(drain_output=False) + if can_restart: + self.reset_capture_thread(drain_output=False) + last_restart_time = now elif now - self.capture_thread.current_frame.value > 20: - self.requestor.send_data(f"{self.config.name}/status/detect", "offline") + self._send_detect_status("offline", now) self.camera_fps.value = 0 self.logger.info( f"No frames received from {self.config.name} in 20 seconds. Exiting ffmpeg..." ) - self.reset_capture_thread() + if can_restart: + self.reset_capture_thread() + last_restart_time = now else: # process is running normally - self.requestor.send_data(f"{self.config.name}/status/detect", "online") + self._send_detect_status("online", now) self.fps_overflow_count = 0 for p in self.ffmpeg_other_processes: @@ -441,9 +495,7 @@ class CameraWatchdog(threading.Thread): continue else: - self.requestor.send_data( - f"{self.config.name}/status/record", "online" - ) + self._send_record_status("online", now) p["latest_segment_time"] = self.latest_cache_segment_time if poll is None: @@ -459,6 +511,34 @@ class CameraWatchdog(threading.Thread): p["cmd"], self.logger, p["logpipe"], ffmpeg_process=p["process"] ) + # Update stall metrics based on last processed frame timestamp + now = datetime.now().timestamp() + processed_ts = ( + float(self.detection_frame.value) if self.detection_frame else 0.0 + ) + if processed_ts > 0: + delta = now - processed_ts + observed_fps = ( + self.camera_fps.value + if self.camera_fps.value > 0 + else self.config.detect.fps + ) + interval = 1.0 / max(observed_fps, 0.1) + stall_threshold = max(2.0 * interval, 2.0) + + if delta > stall_threshold: + if not self._stall_active: + self._stall_timestamps.append(now) + self._stall_active = True + else: + self._stall_active = False + + while self._stall_timestamps and self._stall_timestamps[0] < now - 3600: + self._stall_timestamps.popleft() + + if self.stalls: + self.stalls.value = len(self._stall_timestamps) + self.stop_all_ffmpeg() self.logpipe.close() self.config_subscriber.stop() @@ -596,6 +676,9 @@ class CameraCapture(FrigateProcess): self.camera_metrics.camera_fps, self.camera_metrics.skipped_fps, self.camera_metrics.ffmpeg_pid, + self.camera_metrics.stalls_last_hour, + self.camera_metrics.reconnects_last_hour, + self.camera_metrics.detection_frame, self.stop_event, ) camera_watchdog.start() diff --git a/generate_config_translations.py b/generate_config_translations.py index c19578f1a..f41957561 100644 --- a/generate_config_translations.py +++ b/generate_config_translations.py @@ -8,20 +8,18 @@ and generates JSON translation files with titles and descriptions for the web UI import json import logging -import shutil +import sys from pathlib import Path -from typing import Any, Dict, Optional, get_args, get_origin - -from pydantic import BaseModel -from pydantic.fields import FieldInfo +from typing import Any, Dict, get_args, get_origin from frigate.config.config import FrigateConfig +from frigate.util.schema import get_config_schema logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) -def get_field_translations(field_info: FieldInfo) -> Dict[str, str]: +def get_field_translations(field_info) -> Dict[str, str]: """Extract title and description from a Pydantic field.""" translations = {} @@ -34,50 +32,147 @@ def get_field_translations(field_info: FieldInfo) -> Dict[str, str]: return translations -def process_model_fields(model: type[BaseModel]) -> Dict[str, Any]: +def extract_translations_from_schema( + schema: Dict[str, Any], defs: Dict[str, Any] = None +) -> Dict[str, Any]: """ - Recursively process a Pydantic model to extract translations. + Recursively extract translations (titles and descriptions) from a JSON schema. - Returns a nested dictionary structure matching the config schema, - with title and description for each field. + Returns a dictionary structure with label and description for each field, + and nested fields directly under their parent keys. """ + if defs is None: + defs = schema.get("$defs", {}) + translations = {} - model_fields = model.model_fields + # Add top-level title and description if present + if "title" in schema: + translations["label"] = schema["title"] + if "description" in schema: + translations["description"] = schema["description"] - for field_name, field_info in model_fields.items(): - field_translations = get_field_translations(field_info) + # Process nested properties + properties = schema.get("properties", {}) + for field_name, field_schema in properties.items(): + field_translations = {} - # Get the field's type annotation - field_type = field_info.annotation + # Handle $ref references + if "$ref" in field_schema: + ref_path = field_schema["$ref"] + if ref_path.startswith("#/$defs/"): + ref_name = ref_path.split("/")[-1] + if ref_name in defs: + ref_schema = defs[ref_name] + # Extract from the referenced schema + ref_translations = extract_translations_from_schema( + ref_schema, defs=defs + ) + # Use the $ref field's own title/description if present + if "title" in field_schema: + field_translations["label"] = field_schema["title"] + elif "label" in ref_translations: + field_translations["label"] = ref_translations["label"] + if "description" in field_schema: + field_translations["description"] = field_schema["description"] + elif "description" in ref_translations: + field_translations["description"] = ref_translations[ + "description" + ] + # Add nested properties from referenced schema + nested_without_root = { + k: v + for k, v in ref_translations.items() + if k not in ("label", "description") + } + field_translations.update(nested_without_root) + # Handle additionalProperties with $ref (for dict types) + elif "additionalProperties" in field_schema: + additional_props = field_schema["additionalProperties"] + # Extract title and description from the field itself + if "title" in field_schema: + field_translations["label"] = field_schema["title"] + if "description" in field_schema: + field_translations["description"] = field_schema["description"] - # Handle Optional types - origin = get_origin(field_type) + # If additionalProperties contains a $ref, extract nested translations + if "$ref" in additional_props: + ref_path = additional_props["$ref"] + if ref_path.startswith("#/$defs/"): + ref_name = ref_path.split("/")[-1] + if ref_name in defs: + ref_schema = defs[ref_name] + nested = extract_translations_from_schema(ref_schema, defs=defs) + nested_without_root = { + k: v + for k, v in nested.items() + if k not in ("label", "description") + } + field_translations.update(nested_without_root) + # Handle items with $ref (for array types) + elif "items" in field_schema: + items = field_schema["items"] + # Extract title and description from the field itself + if "title" in field_schema: + field_translations["label"] = field_schema["title"] + if "description" in field_schema: + field_translations["description"] = field_schema["description"] - if origin is Optional or ( - hasattr(origin, "__name__") and origin.__name__ == "UnionType" - ): - args = get_args(field_type) - field_type = next( - (arg for arg in args if arg is not type(None)), field_type - ) + # If items contains a $ref, extract nested translations + if "$ref" in items: + ref_path = items["$ref"] + if ref_path.startswith("#/$defs/"): + ref_name = ref_path.split("/")[-1] + if ref_name in defs: + ref_schema = defs[ref_name] + nested = extract_translations_from_schema(ref_schema, defs=defs) + nested_without_root = { + k: v + for k, v in nested.items() + if k not in ("label", "description") + } + field_translations.update(nested_without_root) + else: + # Extract title and description + if "title" in field_schema: + field_translations["label"] = field_schema["title"] + if "description" in field_schema: + field_translations["description"] = field_schema["description"] - # Handle Dict types (like Dict[str, CameraConfig]) - if get_origin(field_type) is dict: - dict_args = get_args(field_type) - - if len(dict_args) >= 2: - value_type = dict_args[1] - - if isinstance(value_type, type) and issubclass(value_type, BaseModel): - nested_translations = process_model_fields(value_type) - - if nested_translations: - field_translations["properties"] = nested_translations - elif isinstance(field_type, type) and issubclass(field_type, BaseModel): - nested_translations = process_model_fields(field_type) - if nested_translations: - field_translations["properties"] = nested_translations + # Recursively process nested properties + if "properties" in field_schema: + nested = extract_translations_from_schema(field_schema, defs=defs) + # Merge nested translations + nested_without_root = { + k: v for k, v in nested.items() if k not in ("label", "description") + } + field_translations.update(nested_without_root) + # Handle anyOf cases + elif "anyOf" in field_schema: + for item in field_schema["anyOf"]: + if "properties" in item: + nested = extract_translations_from_schema(item, defs=defs) + nested_without_root = { + k: v + for k, v in nested.items() + if k not in ("label", "description") + } + field_translations.update(nested_without_root) + elif "$ref" in item: + ref_path = item["$ref"] + if ref_path.startswith("#/$defs/"): + ref_name = ref_path.split("/")[-1] + if ref_name in defs: + ref_schema = defs[ref_name] + nested = extract_translations_from_schema( + ref_schema, defs=defs + ) + nested_without_root = { + k: v + for k, v in nested.items() + if k not in ("label", "description") + } + field_translations.update(nested_without_root) if field_translations: translations[field_name] = field_translations @@ -85,76 +180,350 @@ def process_model_fields(model: type[BaseModel]) -> Dict[str, Any]: return translations -def generate_section_translation( - section_name: str, field_info: FieldInfo -) -> Dict[str, Any]: +def generate_section_translation(config_class: type) -> Dict[str, Any]: """ - Generate translation structure for a top-level config section. + Generate translation structure for a config section using its JSON schema. """ - section_translations = get_field_translations(field_info) - field_type = field_info.annotation - origin = get_origin(field_type) + schema = config_class.model_json_schema() + return extract_translations_from_schema(schema) - if origin is Optional or ( - hasattr(origin, "__name__") and origin.__name__ == "UnionType" - ): - args = get_args(field_type) - field_type = next((arg for arg in args if arg is not type(None)), field_type) - # Handle Dict types (like detectors, cameras, camera_groups) - if get_origin(field_type) is dict: - dict_args = get_args(field_type) - if len(dict_args) >= 2: - value_type = dict_args[1] - if isinstance(value_type, type) and issubclass(value_type, BaseModel): - nested = process_model_fields(value_type) - if nested: - section_translations["properties"] = nested +def get_detector_translations( + config_schema: Dict[str, Any], +) -> tuple[Dict[str, Any], set[str]]: + """Build detector type translations with nested fields based on schema definitions.""" + defs = config_schema.get("$defs", {}) + detector_schema = defs.get("DetectorConfig", {}) + discriminator = detector_schema.get("discriminator", {}) + mapping = discriminator.get("mapping", {}) - # If the field itself is a BaseModel, process it - elif isinstance(field_type, type) and issubclass(field_type, BaseModel): - nested = process_model_fields(field_type) - if nested: - section_translations["properties"] = nested + type_translations: Dict[str, Any] = {} + nested_field_keys: set[str] = set() + for detector_type, ref in mapping.items(): + if not isinstance(ref, str): + continue - return section_translations + if not ref.startswith("#/$defs/"): + continue + + ref_name = ref.split("/")[-1] + ref_schema = defs.get(ref_name, {}) + if not ref_schema: + continue + + type_entry: Dict[str, str] = {} + title = ref_schema.get("title") + description = ref_schema.get("description") + if title: + type_entry["label"] = title + if description: + type_entry["description"] = description + + nested = extract_translations_from_schema(ref_schema, defs=defs) + nested_without_root = { + k: v for k, v in nested.items() if k not in ("label", "description") + } + if nested_without_root: + type_entry.update(nested_without_root) + nested_field_keys.update(nested_without_root.keys()) + + if type_entry: + type_translations[detector_type] = type_entry + + return type_translations, nested_field_keys def main(): """Main function to generate config translations.""" # Define output directory - output_dir = Path(__file__).parent / "web" / "public" / "locales" / "en" / "config" + if len(sys.argv) > 1: + output_dir = Path(sys.argv[1]) + else: + output_dir = ( + Path(__file__).parent / "web" / "public" / "locales" / "en" / "config" + ) logger.info(f"Output directory: {output_dir}") - # Clean and recreate the output directory - if output_dir.exists(): - logger.info(f"Removing existing directory: {output_dir}") - shutil.rmtree(output_dir) - - logger.info(f"Creating directory: {output_dir}") + # Ensure the output directory exists; do not delete existing files. output_dir.mkdir(parents=True, exist_ok=True) + logger.info( + f"Using output directory (existing files will be overwritten): {output_dir}" + ) config_fields = FrigateConfig.model_fields + config_schema = get_config_schema(FrigateConfig) logger.info(f"Found {len(config_fields)} top-level config sections") + global_translations = {} + for field_name, field_info in config_fields.items(): if field_name.startswith("_"): continue logger.info(f"Processing section: {field_name}") - section_data = generate_section_translation(field_name, field_info) + + # Get the field's type + field_type = field_info.annotation + from typing import Optional, Union + + origin = get_origin(field_type) + if ( + origin is Optional + or origin is Union + or ( + hasattr(origin, "__name__") + and origin.__name__ in ("UnionType", "Union") + ) + ): + args = get_args(field_type) + field_type = next( + (arg for arg in args if arg is not type(None)), field_type + ) + + # Handle Dict[str, SomeModel] - extract the value type + if origin is dict: + args = get_args(field_type) + if args and len(args) > 1: + field_type = args[1] # Get value type from Dict[key, value] + + # Start with field's top-level metadata (label, description) + section_data = get_field_translations(field_info) + + # Generate nested translations from the field type's schema + if hasattr(field_type, "model_json_schema"): + schema = field_type.model_json_schema() + # Extract nested properties from schema + nested = extract_translations_from_schema(schema) + # Remove top-level label/description from nested since we got those from field_info + nested_without_root = { + k: v for k, v in nested.items() if k not in ("label", "description") + } + section_data.update(nested_without_root) + + if field_name == "detectors": + detector_types, detector_field_keys = get_detector_translations( + config_schema + ) + section_data.update(detector_types) + for key in detector_field_keys: + if key == "type": + continue + section_data.pop(key, None) if not section_data: logger.warning(f"No translations found for section: {field_name}") continue - output_file = output_dir / f"{field_name}.json" - with open(output_file, "w", encoding="utf-8") as f: - json.dump(section_data, f, indent=2, ensure_ascii=False) + # Add camera-level fields to global config documentation if applicable + CAMERA_LEVEL_FIELDS = { + "birdseye": ( + "frigate.config.camera.birdseye", + "BirdseyeCameraConfig", + ["order"], + ), + "ffmpeg": ( + "frigate.config.camera.ffmpeg", + "CameraFfmpegConfig", + ["inputs"], + ), + "lpr": ( + "frigate.config.classification", + "CameraLicensePlateRecognitionConfig", + ["expire_time"], + ), + "semantic_search": ( + "frigate.config.classification", + "CameraSemanticSearchConfig", + ["triggers"], + ), + } - logger.info(f"Generated: {output_file}") + if field_name in CAMERA_LEVEL_FIELDS: + module_path, class_name, field_names = CAMERA_LEVEL_FIELDS[field_name] + try: + import importlib + + module = importlib.import_module(module_path) + camera_class = getattr(module, class_name) + schema = camera_class.model_json_schema() + camera_fields = schema.get("properties", {}) + defs = schema.get("$defs", {}) + + for fname in field_names: + if fname in camera_fields: + field_schema = camera_fields[fname] + field_trans = {} + if "title" in field_schema: + field_trans["label"] = field_schema["title"] + if "description" in field_schema: + field_trans["description"] = field_schema["description"] + + # Extract nested properties based on schema type + nested_to_extract = None + + # Handle direct $ref + if "$ref" in field_schema: + ref_path = field_schema["$ref"] + if ref_path.startswith("#/$defs/"): + ref_name = ref_path.split("/")[-1] + if ref_name in defs: + nested_to_extract = defs[ref_name] + + # Handle additionalProperties with $ref (for dict types) + elif "additionalProperties" in field_schema: + additional_props = field_schema["additionalProperties"] + if "$ref" in additional_props: + ref_path = additional_props["$ref"] + if ref_path.startswith("#/$defs/"): + ref_name = ref_path.split("/")[-1] + if ref_name in defs: + nested_to_extract = defs[ref_name] + + # Handle items with $ref (for array types) + elif "items" in field_schema: + items = field_schema["items"] + if "$ref" in items: + ref_path = items["$ref"] + if ref_path.startswith("#/$defs/"): + ref_name = ref_path.split("/")[-1] + if ref_name in defs: + nested_to_extract = defs[ref_name] + + # Extract nested properties if we found a schema to use + if nested_to_extract: + nested = extract_translations_from_schema( + nested_to_extract, defs=defs + ) + nested_without_root = { + k: v + for k, v in nested.items() + if k not in ("label", "description") + } + field_trans.update(nested_without_root) + + if field_trans: + section_data[fname] = field_trans + except Exception as e: + logger.warning( + f"Could not add camera-level fields for {field_name}: {e}" + ) + + # Add to global translations instead of writing separate files + global_translations[field_name] = section_data + + logger.info(f"Added section to global translations: {field_name}") + + # Handle camera-level configs that aren't top-level FrigateConfig fields + # These are defined as fields in CameraConfig, so we extract title/description from there + camera_level_configs = { + "camera_mqtt": ("frigate.config.camera.mqtt", "CameraMqttConfig", "mqtt"), + "camera_ui": ("frigate.config.camera.ui", "CameraUiConfig", "ui"), + "onvif": ("frigate.config.camera.onvif", "OnvifConfig", "onvif"), + } + + # Import CameraConfig to extract field metadata + from frigate.config.camera.camera import CameraConfig + + camera_config_schema = CameraConfig.model_json_schema() + camera_properties = camera_config_schema.get("properties", {}) + + for config_name, ( + module_path, + class_name, + camera_field_name, + ) in camera_level_configs.items(): + try: + logger.info(f"Processing camera-level section: {config_name}") + import importlib + + module = importlib.import_module(module_path) + config_class = getattr(module, class_name) + + section_data = {} + + # Extract top-level label and description from CameraConfig field definition + if camera_field_name in camera_properties: + field_schema = camera_properties[camera_field_name] + if "title" in field_schema: + section_data["label"] = field_schema["title"] + if "description" in field_schema: + section_data["description"] = field_schema["description"] + + # Process model fields from schema + schema = config_class.model_json_schema() + nested = extract_translations_from_schema(schema) + # Remove top-level label/description since we got those from CameraConfig + nested_without_root = { + k: v for k, v in nested.items() if k not in ("label", "description") + } + section_data.update(nested_without_root) + + # Add camera-level section into global translations (do not write separate file) + global_translations[config_name] = section_data + logger.info( + f"Added camera-level section to global translations: {config_name}" + ) + except Exception as e: + logger.error(f"Failed to generate {config_name}: {e}") + + # Remove top-level 'cameras' field if present so it remains a separate file + if "cameras" in global_translations: + logger.info( + "Removing top-level 'cameras' from global translations to keep it as a separate cameras.json" + ) + del global_translations["cameras"] + + # Write consolidated global.json with per-section keys + global_file = output_dir / "global.json" + with open(global_file, "w", encoding="utf-8") as f: + json.dump(global_translations, f, indent=2, ensure_ascii=False) + f.write("\n") + + logger.info(f"Generated consolidated translations: {global_file}") + + if not global_translations: + logger.warning("No global translations were generated!") + else: + logger.info(f"Global contains {len(global_translations)} sections") + + # Generate cameras.json from CameraConfig schema + cameras_file = output_dir / "cameras.json" + logger.info(f"Generating cameras.json: {cameras_file}") + try: + if "camera_config_schema" in locals(): + camera_schema = camera_config_schema + else: + from frigate.config.camera.camera import CameraConfig + + camera_schema = CameraConfig.model_json_schema() + + camera_translations = extract_translations_from_schema(camera_schema) + + # Change descriptions to use 'for this camera' for fields that are global + def sanitize_camera_descriptions(obj): + if isinstance(obj, dict): + for k, v in list(obj.items()): + if k == "description" and isinstance(v, str): + obj[k] = v.replace( + "for all cameras; can be overridden per-camera", + "for this camera", + ) + else: + sanitize_camera_descriptions(v) + elif isinstance(obj, list): + for item in obj: + sanitize_camera_descriptions(item) + + sanitize_camera_descriptions(camera_translations) + + with open(cameras_file, "w", encoding="utf-8") as f: + json.dump(camera_translations, f, indent=2, ensure_ascii=False) + f.write("\n") + logger.info(f"Generated cameras.json: {cameras_file}") + except Exception as e: + logger.error(f"Failed to generate cameras.json: {e}") logger.info("Translation generation complete!") diff --git a/migrations/033_create_export_case_table.py b/migrations/033_create_export_case_table.py new file mode 100644 index 000000000..08edcbc32 --- /dev/null +++ b/migrations/033_create_export_case_table.py @@ -0,0 +1,50 @@ +"""Peewee migrations -- 033_create_export_case_table.py. + +Some examples (model - class or model name):: + + > Model = migrator.orm['model_name'] # Return model in current state by name + + > migrator.sql(sql) # Run custom SQL + > migrator.python(func, *args, **kwargs) # Run python code + > migrator.create_model(Model) # Create a model (could be used as decorator) + > migrator.remove_model(model, cascade=True) # Remove a model + > migrator.add_fields(model, **fields) # Add fields to a model + > migrator.change_fields(model, **fields) # Change fields + > migrator.remove_fields(model, *field_names, cascade=True) + > migrator.rename_field(model, old_field_name, new_field_name) + > migrator.rename_table(model, new_table_name) + > migrator.add_index(model, *col_names, unique=False) + > migrator.drop_index(model, *col_names) + > migrator.add_not_null(model, *field_names) + > migrator.drop_not_null(model, *field_names) + > migrator.add_default(model, field_name, default) + +""" + +import peewee as pw + +SQL = pw.SQL + + +def migrate(migrator, database, fake=False, **kwargs): + migrator.sql( + """ + CREATE TABLE IF NOT EXISTS "exportcase" ( + "id" VARCHAR(30) NOT NULL PRIMARY KEY, + "name" VARCHAR(100) NOT NULL, + "description" TEXT NULL, + "created_at" DATETIME NOT NULL, + "updated_at" DATETIME NOT NULL + ) + """ + ) + migrator.sql( + 'CREATE INDEX IF NOT EXISTS "exportcase_name" ON "exportcase" ("name")' + ) + migrator.sql( + 'CREATE INDEX IF NOT EXISTS "exportcase_created_at" ON "exportcase" ("created_at")' + ) + + +def rollback(migrator, database, fake=False, **kwargs): + pass diff --git a/migrations/034_add_export_case_to_exports.py b/migrations/034_add_export_case_to_exports.py new file mode 100644 index 000000000..da9e1d4ac --- /dev/null +++ b/migrations/034_add_export_case_to_exports.py @@ -0,0 +1,40 @@ +"""Peewee migrations -- 034_add_export_case_to_exports.py. + +Some examples (model - class or model name):: + + > Model = migrator.orm['model_name'] # Return model in current state by name + + > migrator.sql(sql) # Run custom SQL + > migrator.python(func, *args, **kwargs) # Run python code + > migrator.create_model(Model) # Create a model (could be used as decorator) + > migrator.remove_model(model, cascade=True) # Remove a model + > migrator.add_fields(model, **fields) # Add fields to a model + > migrator.change_fields(model, **fields) # Change fields + > migrator.remove_fields(model, *field_names, cascade=True) + > migrator.rename_field(model, old_field_name, new_field_name) + > migrator.rename_table(model, new_table_name) + > migrator.add_index(model, *col_names, unique=False) + > migrator.drop_index(model, *col_names) + > migrator.add_not_null(model, *field_names) + > migrator.drop_not_null(model, *field_names) + > migrator.add_default(model, field_name, default) + +""" + +import peewee as pw + +SQL = pw.SQL + + +def migrate(migrator, database, fake=False, **kwargs): + # Add nullable export_case_id column to export table + migrator.sql('ALTER TABLE "export" ADD COLUMN "export_case_id" VARCHAR(30) NULL') + + # Index for faster case-based queries + migrator.sql( + 'CREATE INDEX IF NOT EXISTS "export_export_case_id" ON "export" ("export_case_id")' + ) + + +def rollback(migrator, database, fake=False, **kwargs): + pass diff --git a/migrations/035_add_motion_heatmap.py b/migrations/035_add_motion_heatmap.py new file mode 100644 index 000000000..b6962083e --- /dev/null +++ b/migrations/035_add_motion_heatmap.py @@ -0,0 +1,34 @@ +"""Peewee migrations -- 035_add_motion_heatmap.py. + +Some examples (model - class or model name):: + + > Model = migrator.orm['model_name'] # Return model in current state by name + + > migrator.sql(sql) # Run custom SQL + > migrator.python(func, *args, **kwargs) # Run python code + > migrator.create_model(Model) # Create a model (could be used as decorator) + > migrator.remove_model(model, cascade=True) # Remove a model + > migrator.add_fields(model, **fields) # Add fields to a model + > migrator.change_fields(model, **fields) # Change fields + > migrator.remove_fields(model, *field_names, cascade=True) + > migrator.rename_field(model, old_field_name, new_field_name) + > migrator.rename_table(model, new_table_name) + > migrator.add_index(model, *col_names, unique=False) + > migrator.drop_index(model, *col_names) + > migrator.add_not_null(model, *field_names) + > migrator.drop_not_null(model, *field_names) + > migrator.add_default(model, field_name, default) + +""" + +import peewee as pw + +SQL = pw.SQL + + +def migrate(migrator, database, fake=False, **kwargs): + migrator.sql('ALTER TABLE "recordings" ADD COLUMN "motion_heatmap" TEXT NULL') + + +def rollback(migrator, database, fake=False, **kwargs): + pass diff --git a/web/package-lock.json b/web/package-lock.json index cfd5aa2c6..a3135a345 100644 --- a/web/package-lock.json +++ b/web/package-lock.json @@ -7,6 +7,7 @@ "": { "name": "web-new", "version": "0.0.0", + "hasInstallScript": true, "dependencies": { "@cycjimmy/jsmpeg-player": "^6.1.2", "@hookform/resolvers": "^3.9.0", @@ -21,17 +22,22 @@ "@radix-ui/react-hover-card": "^1.1.6", "@radix-ui/react-label": "^2.1.2", "@radix-ui/react-popover": "^1.1.6", + "@radix-ui/react-progress": "^1.1.8", "@radix-ui/react-radio-group": "^1.2.3", "@radix-ui/react-scroll-area": "^1.2.3", "@radix-ui/react-select": "^2.1.6", "@radix-ui/react-separator": "^1.1.7", "@radix-ui/react-slider": "^1.2.3", - "@radix-ui/react-slot": "^1.2.3", + "@radix-ui/react-slot": "1.2.4", "@radix-ui/react-switch": "^1.1.3", "@radix-ui/react-tabs": "^1.1.3", "@radix-ui/react-toggle": "^1.1.2", "@radix-ui/react-toggle-group": "^1.1.2", "@radix-ui/react-tooltip": "^1.2.8", + "@rjsf/core": "^6.3.1", + "@rjsf/shadcn": "^6.3.1", + "@rjsf/utils": "^6.3.1", + "@rjsf/validator-ajv8": "^6.3.1", "apexcharts": "^3.52.0", "axios": "^1.7.7", "class-variance-authority": "^0.7.1", @@ -40,8 +46,7 @@ "copy-to-clipboard": "^3.3.3", "date-fns": "^3.6.0", "date-fns-tz": "^3.2.0", - "embla-carousel-react": "^8.2.0", - "framer-motion": "^11.5.4", + "framer-motion": "^12.35.0", "hls.js": "^1.5.20", "i18next": "^24.2.0", "i18next-http-backend": "^3.0.1", @@ -51,28 +56,28 @@ "lodash": "^4.17.23", "lucide-react": "^0.477.0", "monaco-yaml": "^5.3.1", - "next-themes": "^0.3.0", + "next-themes": "^0.4.6", "nosleep.js": "^0.12.0", - "react": "^18.3.1", + "react": "^19.2.4", "react-apexcharts": "^1.4.1", "react-day-picker": "^9.7.0", "react-device-detect": "^2.2.3", - "react-dom": "^18.3.1", + "react-dom": "^19.2.4", "react-dropzone": "^14.3.8", - "react-grid-layout": "^1.5.0", + "react-grid-layout": "^2.2.2", "react-hook-form": "^7.52.1", "react-i18next": "^15.2.0", "react-icons": "^5.5.0", - "react-konva": "^18.2.10", + "react-konva": "^19.2.3", + "react-markdown": "^9.0.1", "react-router-dom": "^6.30.3", "react-swipeable": "^7.0.2", "react-tracked": "^2.0.1", - "react-transition-group": "^4.4.5", "react-use-websocket": "^4.8.1", - "react-zoom-pan-pinch": "3.4.4", - "recoil": "^0.7.7", + "react-zoom-pan-pinch": "^3.7.0", + "remark-gfm": "^4.0.0", "scroll-into-view-if-needed": "^3.1.0", - "sonner": "^1.5.0", + "sonner": "^2.0.7", "sort-by": "^1.2.0", "strftime": "^0.10.3", "swr": "^2.3.2", @@ -80,7 +85,7 @@ "tailwind-scrollbar": "^3.1.0", "tailwindcss-animate": "^1.0.7", "use-long-press": "^3.2.0", - "vaul": "^0.9.1", + "vaul": "^1.1.2", "vite-plugin-monaco-editor": "^1.1.0", "zod": "^3.23.8" }, @@ -89,11 +94,8 @@ "@testing-library/jest-dom": "^6.6.2", "@types/lodash": "^4.17.12", "@types/node": "^20.14.10", - "@types/react": "^18.3.2", - "@types/react-dom": "^18.3.0", - "@types/react-grid-layout": "^1.3.5", - "@types/react-icons": "^3.0.0", - "@types/react-transition-group": "^4.4.10", + "@types/react": "^19.2.14", + "@types/react-dom": "^19.2.3", "@types/strftime": "^0.9.8", "@typescript-eslint/eslint-plugin": "^7.5.0", "@typescript-eslint/parser": "^7.5.0", @@ -104,18 +106,20 @@ "eslint-config-prettier": "^9.1.0", "eslint-plugin-jest": "^28.2.0", "eslint-plugin-prettier": "^5.0.1", - "eslint-plugin-react-hooks": "^4.6.0", + "eslint-plugin-react-hooks": "^5.2.0", "eslint-plugin-react-refresh": "^0.4.8", "eslint-plugin-vitest-globals": "^1.5.0", "fake-indexeddb": "^6.0.0", "jest-websocket-mock": "^2.5.0", "jsdom": "^24.1.1", + "monaco-editor": "^0.52.0", "msw": "^2.3.5", + "patch-package": "^8.0.1", "postcss": "^8.4.47", "prettier": "^3.3.3", "prettier-plugin-tailwindcss": "^0.6.5", "tailwindcss": "^3.4.9", - "typescript": "^5.8.2", + "typescript": "^5.9.3", "vite": "^6.4.1", "vitest": "^3.0.7" } @@ -759,31 +763,31 @@ } }, "node_modules/@floating-ui/core": { - "version": "1.6.9", - "resolved": "https://registry.npmjs.org/@floating-ui/core/-/core-1.6.9.tgz", - "integrity": "sha512-uMXCuQ3BItDUbAMhIXw7UPXRfAlOAvZzdK9BWpE60MCn+Svt3aLn9jsPTi/WNGlRUu2uI0v5S7JiIUsbsvh3fw==", + "version": "1.7.5", + "resolved": "https://registry.npmjs.org/@floating-ui/core/-/core-1.7.5.tgz", + "integrity": "sha512-1Ih4WTWyw0+lKyFMcBHGbb5U5FtuHJuujoyyr5zTaWS5EYMeT6Jb2AuDeftsCsEuchO+mM2ij5+q9crhydzLhQ==", "license": "MIT", "dependencies": { - "@floating-ui/utils": "^0.2.9" + "@floating-ui/utils": "^0.2.11" } }, "node_modules/@floating-ui/dom": { - "version": "1.6.13", - "resolved": "https://registry.npmjs.org/@floating-ui/dom/-/dom-1.6.13.tgz", - "integrity": "sha512-umqzocjDgNRGTuO7Q8CU32dkHkECqI8ZdMZ5Swb6QAM0t5rnlrN3lGo1hdpscRd3WS8T6DKYK4ephgIH9iRh3w==", + "version": "1.7.6", + "resolved": "https://registry.npmjs.org/@floating-ui/dom/-/dom-1.7.6.tgz", + "integrity": "sha512-9gZSAI5XM36880PPMm//9dfiEngYoC6Am2izES1FF406YFsjvyBMmeJ2g4SAju3xWwtuynNRFL2s9hgxpLI5SQ==", "license": "MIT", "dependencies": { - "@floating-ui/core": "^1.6.0", - "@floating-ui/utils": "^0.2.9" + "@floating-ui/core": "^1.7.5", + "@floating-ui/utils": "^0.2.11" } }, "node_modules/@floating-ui/react-dom": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/@floating-ui/react-dom/-/react-dom-2.1.2.tgz", - "integrity": "sha512-06okr5cgPzMNBy+Ycse2A6udMi4bqwW/zgBF/rwjcNqWkyr82Mcg8b0vjX8OJpZFy/FKjJmw6wV7t44kK6kW7A==", + "version": "2.1.8", + "resolved": "https://registry.npmjs.org/@floating-ui/react-dom/-/react-dom-2.1.8.tgz", + "integrity": "sha512-cC52bHwM/n/CxS87FH0yWdngEZrjdtLW/qVruo68qg+prK7ZQ4YGdut2GyDVpoGeAYe/h899rVeOVm6Oi40k2A==", "license": "MIT", "dependencies": { - "@floating-ui/dom": "^1.0.0" + "@floating-ui/dom": "^1.7.6" }, "peerDependencies": { "react": ">=16.8.0", @@ -791,9 +795,9 @@ } }, "node_modules/@floating-ui/utils": { - "version": "0.2.9", - "resolved": "https://registry.npmjs.org/@floating-ui/utils/-/utils-0.2.9.tgz", - "integrity": "sha512-MDWhGtE+eHw5JW7lq4qhc5yRLS11ERl1c7Z6Xd0a58DozHES6EnNNwUWbMiG4J9Cgj053Bhk8zvlhFYKVhULwg==", + "version": "0.2.11", + "resolved": "https://registry.npmjs.org/@floating-ui/utils/-/utils-0.2.11.tgz", + "integrity": "sha512-RiB/yIh78pcIxl6lLMG0CgBXAZ2Y0eVHqMPYugu+9U0AeT6YBeiJpf7lbdJNIugFP5SIjwNRgo4DhR1Qxi26Gg==", "license": "MIT" }, "node_modules/@hookform/resolvers": { @@ -1287,13 +1291,89 @@ } } }, - "node_modules/@radix-ui/react-alert-dialog/node_modules/@radix-ui/react-slot": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.1.2.tgz", - "integrity": "sha512-YAKxaiGsSQJ38VzKH86/BPRC4rh+b1Jpa+JneA5LRE7skmLPNAyeG8kPJj/oo4STLvlrs8vkf/iYyc3A5stYCQ==", + "node_modules/@radix-ui/react-alert-dialog/node_modules/@radix-ui/react-dismissable-layer": { + "version": "1.1.5", + "resolved": "https://registry.npmjs.org/@radix-ui/react-dismissable-layer/-/react-dismissable-layer-1.1.5.tgz", + "integrity": "sha512-E4TywXY6UsXNRhFrECa5HAvE5/4BFcGyfTyK36gP+pAW1ed7UTK4vKwdr53gAJYwqbfCWC6ATvJa3J3R/9+Qrg==", "license": "MIT", "dependencies": { - "@radix-ui/react-compose-refs": "1.1.1" + "@radix-ui/primitive": "1.1.1", + "@radix-ui/react-compose-refs": "1.1.1", + "@radix-ui/react-primitive": "2.0.2", + "@radix-ui/react-use-callback-ref": "1.1.0", + "@radix-ui/react-use-escape-keydown": "1.1.0" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-alert-dialog/node_modules/@radix-ui/react-focus-scope": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@radix-ui/react-focus-scope/-/react-focus-scope-1.1.2.tgz", + "integrity": "sha512-zxwE80FCU7lcXUGWkdt6XpTTCKPitG1XKOwViTxHVKIJhZl9MvIl2dVHeZENCWD9+EdWv05wlaEkRXUykU27RA==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.1", + "@radix-ui/react-primitive": "2.0.2", + "@radix-ui/react-use-callback-ref": "1.1.0" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-alert-dialog/node_modules/@radix-ui/react-portal": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/@radix-ui/react-portal/-/react-portal-1.1.4.tgz", + "integrity": "sha512-sn2O9k1rPFYVyKd5LAJfo96JlSGVFpa1fS6UuBJfrZadudiw5tAmru+n1x7aMRQ84qDM71Zh1+SzK5QwU0tJfA==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-primitive": "2.0.2", + "@radix-ui/react-use-layout-effect": "1.1.0" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-alert-dialog/node_modules/@radix-ui/react-use-escape-keydown": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-escape-keydown/-/react-use-escape-keydown-1.1.0.tgz", + "integrity": "sha512-L7vwWlR1kTTQ3oh7g1O0CBF3YCyyTj8NmhLR+phShpyA50HCfBFKVJTpshm9PzLiKmehsrQzTYTpX9HvmC9rhw==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-use-callback-ref": "1.1.0" }, "peerDependencies": { "@types/react": "*", @@ -1306,12 +1386,35 @@ } }, "node_modules/@radix-ui/react-arrow": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/@radix-ui/react-arrow/-/react-arrow-1.1.2.tgz", - "integrity": "sha512-G+KcpzXHq24iH0uGG/pF8LyzpFJYGD4RfLjCIBfGdSLXvjLHST31RUiRVrupIBMvIppMgSzQ6l66iAxl03tdlg==", + "version": "1.1.7", + "resolved": "https://registry.npmjs.org/@radix-ui/react-arrow/-/react-arrow-1.1.7.tgz", + "integrity": "sha512-F+M1tLhO+mlQaOWspE8Wstg+z6PwxwRd8oQ8IXceWz92kfAmalTRf0EjrouQeo7QssEPfCn05B4Ihs1K9WQ/7w==", "license": "MIT", "dependencies": { - "@radix-ui/react-primitive": "2.0.2" + "@radix-ui/react-primitive": "2.1.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-arrow/node_modules/@radix-ui/react-primitive": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.3.tgz", + "integrity": "sha512-m9gTwRkhy2lvCPe6QJp4d3G1TYEUHn/FzJUtq9MjH46an1wJU+GdoGC5VLof8RX8Ft/DlpshApkhswDLZzHIcQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-slot": "1.2.3" }, "peerDependencies": { "@types/react": "*", @@ -1352,19 +1455,19 @@ } }, "node_modules/@radix-ui/react-checkbox": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/@radix-ui/react-checkbox/-/react-checkbox-1.1.4.tgz", - "integrity": "sha512-wP0CPAHq+P5I4INKe3hJrIa1WoNqqrejzW+zoU0rOvo1b9gDEJJFl2rYfO1PYJUQCc2H1WZxIJmyv9BS8i5fLw==", + "version": "1.3.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-checkbox/-/react-checkbox-1.3.3.tgz", + "integrity": "sha512-wBbpv+NQftHDdG86Qc0pIyXk5IR3tM8Vd0nWLKDcX8nNn4nXFOFwsKuqw2okA/1D/mpaAkmuyndrPJTYDNZtFw==", "license": "MIT", "dependencies": { - "@radix-ui/primitive": "1.1.1", - "@radix-ui/react-compose-refs": "1.1.1", - "@radix-ui/react-context": "1.1.1", - "@radix-ui/react-presence": "1.1.2", - "@radix-ui/react-primitive": "2.0.2", - "@radix-ui/react-use-controllable-state": "1.1.0", - "@radix-ui/react-use-previous": "1.1.0", - "@radix-ui/react-use-size": "1.1.0" + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-presence": "1.1.5", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-controllable-state": "1.2.2", + "@radix-ui/react-use-previous": "1.1.1", + "@radix-ui/react-use-size": "1.1.1" }, "peerDependencies": { "@types/react": "*", @@ -1381,6 +1484,141 @@ } } }, + "node_modules/@radix-ui/react-checkbox/node_modules/@radix-ui/primitive": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/@radix-ui/primitive/-/primitive-1.1.3.tgz", + "integrity": "sha512-JTF99U/6XIjCBo0wqkU5sK10glYe27MRRsfwoiq5zzOEZLHU3A3KCMa5X/azekYRCJ0HlwI0crAXS/5dEHTzDg==", + "license": "MIT" + }, + "node_modules/@radix-ui/react-checkbox/node_modules/@radix-ui/react-context": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@radix-ui/react-context/-/react-context-1.1.2.tgz", + "integrity": "sha512-jCi/QKUM2r1Ju5a3J64TH2A5SpKAgh0LpknyqdQ4m6DCV0xJ2HG1xARRwNGPQfi1SLdLWZ1OJz6F4OMBBNiGJA==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-checkbox/node_modules/@radix-ui/react-presence": { + "version": "1.1.5", + "resolved": "https://registry.npmjs.org/@radix-ui/react-presence/-/react-presence-1.1.5.tgz", + "integrity": "sha512-/jfEwNDdQVBCNvjkGit4h6pMOzq8bHkopq458dPt2lMjx+eBQUohZNG9A7DtO/O5ukSbxuaNGXMjHicgwy6rQQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-use-layout-effect": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-checkbox/node_modules/@radix-ui/react-primitive": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.3.tgz", + "integrity": "sha512-m9gTwRkhy2lvCPe6QJp4d3G1TYEUHn/FzJUtq9MjH46an1wJU+GdoGC5VLof8RX8Ft/DlpshApkhswDLZzHIcQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-slot": "1.2.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-checkbox/node_modules/@radix-ui/react-use-controllable-state": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-controllable-state/-/react-use-controllable-state-1.2.2.tgz", + "integrity": "sha512-BjasUjixPFdS+NKkypcyyN5Pmg83Olst0+c6vGov0diwTEo6mgdqVR6hxcEgFuh4QrAs7Rc+9KuGJ9TVCj0Zzg==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-use-effect-event": "0.0.2", + "@radix-ui/react-use-layout-effect": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-checkbox/node_modules/@radix-ui/react-use-layout-effect": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-layout-effect/-/react-use-layout-effect-1.1.1.tgz", + "integrity": "sha512-RbJRS4UWQFkzHTTwVymMTUv8EqYhOp8dOOviLj2ugtTiXRaRQS7GLGxZTLL1jWhMeoSCf5zmcZkqTl9IiYfXcQ==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-checkbox/node_modules/@radix-ui/react-use-previous": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-previous/-/react-use-previous-1.1.1.tgz", + "integrity": "sha512-2dHfToCj/pzca2Ck724OZ5L0EVrr3eHRNsG/b3xQJLA2hZpVCS99bLAX+hm1IHXDEnzU6by5z/5MIY794/a8NQ==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-checkbox/node_modules/@radix-ui/react-use-size": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-size/-/react-use-size-1.1.1.tgz", + "integrity": "sha512-ewrXRDTAqAXlkl6t/fkXWNAhFX9I+CkKlw6zjEwk86RSPKwZr3xpBRso655aqYafwtnbpHLj6toFzmd6xdVptQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-use-layout-effect": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, "node_modules/@radix-ui/react-collapsible": { "version": "1.1.12", "resolved": "https://registry.npmjs.org/@radix-ui/react-collapsible/-/react-collapsible-1.1.12.tgz", @@ -1417,21 +1655,6 @@ "integrity": "sha512-JTF99U/6XIjCBo0wqkU5sK10glYe27MRRsfwoiq5zzOEZLHU3A3KCMa5X/azekYRCJ0HlwI0crAXS/5dEHTzDg==", "license": "MIT" }, - "node_modules/@radix-ui/react-collapsible/node_modules/@radix-ui/react-compose-refs": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/@radix-ui/react-compose-refs/-/react-compose-refs-1.1.2.tgz", - "integrity": "sha512-z4eqJvfiNnFMHIIvXP3CY57y2WJs5g2v3X0zm9mEJkrkNv4rDxu+sg9Jh8EkXyeqBkB7SOcboo9dMVqhyrACIg==", - "license": "MIT", - "peerDependencies": { - "@types/react": "*", - "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" - }, - "peerDependenciesMeta": { - "@types/react": { - "optional": true - } - } - }, "node_modules/@radix-ui/react-collapsible/node_modules/@radix-ui/react-context": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/@radix-ui/react-context/-/react-context-1.1.2.tgz", @@ -1572,28 +1795,10 @@ } } }, - "node_modules/@radix-ui/react-collection/node_modules/@radix-ui/react-slot": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.1.2.tgz", - "integrity": "sha512-YAKxaiGsSQJ38VzKH86/BPRC4rh+b1Jpa+JneA5LRE7skmLPNAyeG8kPJj/oo4STLvlrs8vkf/iYyc3A5stYCQ==", - "license": "MIT", - "dependencies": { - "@radix-ui/react-compose-refs": "1.1.1" - }, - "peerDependencies": { - "@types/react": "*", - "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" - }, - "peerDependenciesMeta": { - "@types/react": { - "optional": true - } - } - }, "node_modules/@radix-ui/react-compose-refs": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/@radix-ui/react-compose-refs/-/react-compose-refs-1.1.1.tgz", - "integrity": "sha512-Y9VzoRDSJtgFMUCoiZBDVo084VQ5hfpXxVE+NgkdNsjiDBByiImMZKKhxMwCbdHvhlENG6a833CbFkOQvTricw==", + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@radix-ui/react-compose-refs/-/react-compose-refs-1.1.2.tgz", + "integrity": "sha512-z4eqJvfiNnFMHIIvXP3CY57y2WJs5g2v3X0zm9mEJkrkNv4rDxu+sg9Jh8EkXyeqBkB7SOcboo9dMVqhyrACIg==", "license": "MIT", "peerDependencies": { "@types/react": "*", @@ -1690,21 +1895,6 @@ "integrity": "sha512-JTF99U/6XIjCBo0wqkU5sK10glYe27MRRsfwoiq5zzOEZLHU3A3KCMa5X/azekYRCJ0HlwI0crAXS/5dEHTzDg==", "license": "MIT" }, - "node_modules/@radix-ui/react-dialog/node_modules/@radix-ui/react-compose-refs": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/@radix-ui/react-compose-refs/-/react-compose-refs-1.1.2.tgz", - "integrity": "sha512-z4eqJvfiNnFMHIIvXP3CY57y2WJs5g2v3X0zm9mEJkrkNv4rDxu+sg9Jh8EkXyeqBkB7SOcboo9dMVqhyrACIg==", - "license": "MIT", - "peerDependencies": { - "@types/react": "*", - "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" - }, - "peerDependenciesMeta": { - "@types/react": { - "optional": true - } - } - }, "node_modules/@radix-ui/react-dialog/node_modules/@radix-ui/react-context": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/@radix-ui/react-context/-/react-context-1.1.2.tgz", @@ -1720,33 +1910,6 @@ } } }, - "node_modules/@radix-ui/react-dialog/node_modules/@radix-ui/react-dismissable-layer": { - "version": "1.1.11", - "resolved": "https://registry.npmjs.org/@radix-ui/react-dismissable-layer/-/react-dismissable-layer-1.1.11.tgz", - "integrity": "sha512-Nqcp+t5cTB8BinFkZgXiMJniQH0PsUt2k51FUhbdfeKvc4ACcG2uQniY/8+h1Yv6Kza4Q7lD7PQV0z0oicE0Mg==", - "license": "MIT", - "dependencies": { - "@radix-ui/primitive": "1.1.3", - "@radix-ui/react-compose-refs": "1.1.2", - "@radix-ui/react-primitive": "2.1.3", - "@radix-ui/react-use-callback-ref": "1.1.1", - "@radix-ui/react-use-escape-keydown": "1.1.1" - }, - "peerDependencies": { - "@types/react": "*", - "@types/react-dom": "*", - "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", - "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" - }, - "peerDependenciesMeta": { - "@types/react": { - "optional": true - }, - "@types/react-dom": { - "optional": true - } - } - }, "node_modules/@radix-ui/react-dialog/node_modules/@radix-ui/react-focus-guards": { "version": "1.1.3", "resolved": "https://registry.npmjs.org/@radix-ui/react-focus-guards/-/react-focus-guards-1.1.3.tgz", @@ -1762,31 +1925,6 @@ } } }, - "node_modules/@radix-ui/react-dialog/node_modules/@radix-ui/react-focus-scope": { - "version": "1.1.7", - "resolved": "https://registry.npmjs.org/@radix-ui/react-focus-scope/-/react-focus-scope-1.1.7.tgz", - "integrity": "sha512-t2ODlkXBQyn7jkl6TNaw/MtVEVvIGelJDCG41Okq/KwUsJBwQ4XVZsHAVUkK4mBv3ewiAS3PGuUWuY2BoK4ZUw==", - "license": "MIT", - "dependencies": { - "@radix-ui/react-compose-refs": "1.1.2", - "@radix-ui/react-primitive": "2.1.3", - "@radix-ui/react-use-callback-ref": "1.1.1" - }, - "peerDependencies": { - "@types/react": "*", - "@types/react-dom": "*", - "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", - "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" - }, - "peerDependenciesMeta": { - "@types/react": { - "optional": true - }, - "@types/react-dom": { - "optional": true - } - } - }, "node_modules/@radix-ui/react-dialog/node_modules/@radix-ui/react-id": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/@radix-ui/react-id/-/react-id-1.1.1.tgz", @@ -1805,30 +1943,6 @@ } } }, - "node_modules/@radix-ui/react-dialog/node_modules/@radix-ui/react-portal": { - "version": "1.1.9", - "resolved": "https://registry.npmjs.org/@radix-ui/react-portal/-/react-portal-1.1.9.tgz", - "integrity": "sha512-bpIxvq03if6UNwXZ+HTK71JLh4APvnXntDc6XOX8UVq4XQOVl7lwok0AvIl+b8zgCw3fSaVTZMpAPPagXbKmHQ==", - "license": "MIT", - "dependencies": { - "@radix-ui/react-primitive": "2.1.3", - "@radix-ui/react-use-layout-effect": "1.1.1" - }, - "peerDependencies": { - "@types/react": "*", - "@types/react-dom": "*", - "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", - "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" - }, - "peerDependenciesMeta": { - "@types/react": { - "optional": true - }, - "@types/react-dom": { - "optional": true - } - } - }, "node_modules/@radix-ui/react-dialog/node_modules/@radix-ui/react-presence": { "version": "1.1.5", "resolved": "https://registry.npmjs.org/@radix-ui/react-presence/-/react-presence-1.1.5.tgz", @@ -1876,21 +1990,6 @@ } } }, - "node_modules/@radix-ui/react-dialog/node_modules/@radix-ui/react-use-callback-ref": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/@radix-ui/react-use-callback-ref/-/react-use-callback-ref-1.1.1.tgz", - "integrity": "sha512-FkBMwD+qbGQeMu1cOHnuGB6x4yzPjho8ap5WtbEJ26umhgqVXbhekKUQO+hZEL1vU92a3wHwdp0HAcqAUF5iDg==", - "license": "MIT", - "peerDependencies": { - "@types/react": "*", - "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" - }, - "peerDependenciesMeta": { - "@types/react": { - "optional": true - } - } - }, "node_modules/@radix-ui/react-dialog/node_modules/@radix-ui/react-use-controllable-state": { "version": "1.2.2", "resolved": "https://registry.npmjs.org/@radix-ui/react-use-controllable-state/-/react-use-controllable-state-1.2.2.tgz", @@ -1910,24 +2009,6 @@ } } }, - "node_modules/@radix-ui/react-dialog/node_modules/@radix-ui/react-use-escape-keydown": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/@radix-ui/react-use-escape-keydown/-/react-use-escape-keydown-1.1.1.tgz", - "integrity": "sha512-Il0+boE7w/XebUHyBjroE+DbByORGR9KKmITzbR7MyQ4akpORYP/ZmbhAr0DG7RmmBqoOnZdy2QlvajJ2QA59g==", - "license": "MIT", - "dependencies": { - "@radix-ui/react-use-callback-ref": "1.1.1" - }, - "peerDependencies": { - "@types/react": "*", - "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" - }, - "peerDependenciesMeta": { - "@types/react": { - "optional": true - } - } - }, "node_modules/@radix-ui/react-dialog/node_modules/@radix-ui/react-use-layout-effect": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/@radix-ui/react-use-layout-effect/-/react-use-layout-effect-1.1.1.tgz", @@ -1959,16 +2040,16 @@ } }, "node_modules/@radix-ui/react-dismissable-layer": { - "version": "1.1.5", - "resolved": "https://registry.npmjs.org/@radix-ui/react-dismissable-layer/-/react-dismissable-layer-1.1.5.tgz", - "integrity": "sha512-E4TywXY6UsXNRhFrECa5HAvE5/4BFcGyfTyK36gP+pAW1ed7UTK4vKwdr53gAJYwqbfCWC6ATvJa3J3R/9+Qrg==", + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/@radix-ui/react-dismissable-layer/-/react-dismissable-layer-1.1.11.tgz", + "integrity": "sha512-Nqcp+t5cTB8BinFkZgXiMJniQH0PsUt2k51FUhbdfeKvc4ACcG2uQniY/8+h1Yv6Kza4Q7lD7PQV0z0oicE0Mg==", "license": "MIT", "dependencies": { - "@radix-ui/primitive": "1.1.1", - "@radix-ui/react-compose-refs": "1.1.1", - "@radix-ui/react-primitive": "2.0.2", - "@radix-ui/react-use-callback-ref": "1.1.0", - "@radix-ui/react-use-escape-keydown": "1.1.0" + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-callback-ref": "1.1.1", + "@radix-ui/react-use-escape-keydown": "1.1.1" }, "peerDependencies": { "@types/react": "*", @@ -1985,6 +2066,50 @@ } } }, + "node_modules/@radix-ui/react-dismissable-layer/node_modules/@radix-ui/primitive": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/@radix-ui/primitive/-/primitive-1.1.3.tgz", + "integrity": "sha512-JTF99U/6XIjCBo0wqkU5sK10glYe27MRRsfwoiq5zzOEZLHU3A3KCMa5X/azekYRCJ0HlwI0crAXS/5dEHTzDg==", + "license": "MIT" + }, + "node_modules/@radix-ui/react-dismissable-layer/node_modules/@radix-ui/react-primitive": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.3.tgz", + "integrity": "sha512-m9gTwRkhy2lvCPe6QJp4d3G1TYEUHn/FzJUtq9MjH46an1wJU+GdoGC5VLof8RX8Ft/DlpshApkhswDLZzHIcQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-slot": "1.2.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-dismissable-layer/node_modules/@radix-ui/react-use-callback-ref": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-callback-ref/-/react-use-callback-ref-1.1.1.tgz", + "integrity": "sha512-FkBMwD+qbGQeMu1cOHnuGB6x4yzPjho8ap5WtbEJ26umhgqVXbhekKUQO+hZEL1vU92a3wHwdp0HAcqAUF5iDg==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, "node_modules/@radix-ui/react-dropdown-menu": { "version": "2.1.6", "resolved": "https://registry.npmjs.org/@radix-ui/react-dropdown-menu/-/react-dropdown-menu-2.1.6.tgz", @@ -2030,14 +2155,14 @@ } }, "node_modules/@radix-ui/react-focus-scope": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/@radix-ui/react-focus-scope/-/react-focus-scope-1.1.2.tgz", - "integrity": "sha512-zxwE80FCU7lcXUGWkdt6XpTTCKPitG1XKOwViTxHVKIJhZl9MvIl2dVHeZENCWD9+EdWv05wlaEkRXUykU27RA==", + "version": "1.1.7", + "resolved": "https://registry.npmjs.org/@radix-ui/react-focus-scope/-/react-focus-scope-1.1.7.tgz", + "integrity": "sha512-t2ODlkXBQyn7jkl6TNaw/MtVEVvIGelJDCG41Okq/KwUsJBwQ4XVZsHAVUkK4mBv3ewiAS3PGuUWuY2BoK4ZUw==", "license": "MIT", "dependencies": { - "@radix-ui/react-compose-refs": "1.1.1", - "@radix-ui/react-primitive": "2.0.2", - "@radix-ui/react-use-callback-ref": "1.1.0" + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-callback-ref": "1.1.1" }, "peerDependencies": { "@types/react": "*", @@ -2054,6 +2179,44 @@ } } }, + "node_modules/@radix-ui/react-focus-scope/node_modules/@radix-ui/react-primitive": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.3.tgz", + "integrity": "sha512-m9gTwRkhy2lvCPe6QJp4d3G1TYEUHn/FzJUtq9MjH46an1wJU+GdoGC5VLof8RX8Ft/DlpshApkhswDLZzHIcQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-slot": "1.2.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-focus-scope/node_modules/@radix-ui/react-use-callback-ref": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-callback-ref/-/react-use-callback-ref-1.1.1.tgz", + "integrity": "sha512-FkBMwD+qbGQeMu1cOHnuGB6x4yzPjho8ap5WtbEJ26umhgqVXbhekKUQO+hZEL1vU92a3wHwdp0HAcqAUF5iDg==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, "node_modules/@radix-ui/react-hover-card": { "version": "1.1.6", "resolved": "https://registry.npmjs.org/@radix-ui/react-hover-card/-/react-hover-card-1.1.6.tgz", @@ -2085,6 +2248,84 @@ } } }, + "node_modules/@radix-ui/react-hover-card/node_modules/@radix-ui/react-dismissable-layer": { + "version": "1.1.5", + "resolved": "https://registry.npmjs.org/@radix-ui/react-dismissable-layer/-/react-dismissable-layer-1.1.5.tgz", + "integrity": "sha512-E4TywXY6UsXNRhFrECa5HAvE5/4BFcGyfTyK36gP+pAW1ed7UTK4vKwdr53gAJYwqbfCWC6ATvJa3J3R/9+Qrg==", + "license": "MIT", + "dependencies": { + "@radix-ui/primitive": "1.1.1", + "@radix-ui/react-compose-refs": "1.1.1", + "@radix-ui/react-primitive": "2.0.2", + "@radix-ui/react-use-callback-ref": "1.1.0", + "@radix-ui/react-use-escape-keydown": "1.1.0" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-hover-card/node_modules/@radix-ui/react-portal": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/@radix-ui/react-portal/-/react-portal-1.1.4.tgz", + "integrity": "sha512-sn2O9k1rPFYVyKd5LAJfo96JlSGVFpa1fS6UuBJfrZadudiw5tAmru+n1x7aMRQ84qDM71Zh1+SzK5QwU0tJfA==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-primitive": "2.0.2", + "@radix-ui/react-use-layout-effect": "1.1.0" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-hover-card/node_modules/@radix-ui/react-use-escape-keydown": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-escape-keydown/-/react-use-escape-keydown-1.1.0.tgz", + "integrity": "sha512-L7vwWlR1kTTQ3oh7g1O0CBF3YCyyTj8NmhLR+phShpyA50HCfBFKVJTpshm9PzLiKmehsrQzTYTpX9HvmC9rhw==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-use-callback-ref": "1.1.0" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-icons": { + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/@radix-ui/react-icons/-/react-icons-1.3.2.tgz", + "integrity": "sha512-fyQIhGDhzfc9pK2kH6Pl9c4BDJGfMkPqkyIgYDthyNYoNg3wVhoJMMh19WS4Up/1KMPFVpNsT2q3WmXn2N1m6g==", + "license": "MIT", + "peerDependencies": { + "react": "^16.x || ^17.x || ^18.x || ^19.0.0 || ^19.0.0-rc" + } + }, "node_modules/@radix-ui/react-id": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/@radix-ui/react-id/-/react-id-1.1.0.tgz", @@ -2104,12 +2345,35 @@ } }, "node_modules/@radix-ui/react-label": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/@radix-ui/react-label/-/react-label-2.1.2.tgz", - "integrity": "sha512-zo1uGMTaNlHehDyFQcDZXRJhUPDuukcnHz0/jnrup0JA6qL+AFpAnty+7VKa9esuU5xTblAZzTGYJKSKaBxBhw==", + "version": "2.1.8", + "resolved": "https://registry.npmjs.org/@radix-ui/react-label/-/react-label-2.1.8.tgz", + "integrity": "sha512-FmXs37I6hSBVDlO4y764TNz1rLgKwjJMQ0EGte6F3Cb3f4bIuHB/iLa/8I9VKkmOy+gNHq8rql3j686ACVV21A==", "license": "MIT", "dependencies": { - "@radix-ui/react-primitive": "2.0.2" + "@radix-ui/react-primitive": "2.1.4" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-label/node_modules/@radix-ui/react-primitive": { + "version": "2.1.4", + "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.4.tgz", + "integrity": "sha512-9hQc4+GNVtJAIEPEqlYqW5RiYdrr8ea5XQ0ZOnD6fgru+83kqT15mq2OCcbe8KnjRZl5vF3ks69AKz3kh1jrhg==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-slot": "1.2.4" }, "peerDependencies": { "@types/react": "*", @@ -2166,95 +2430,17 @@ } } }, - "node_modules/@radix-ui/react-menu/node_modules/@radix-ui/react-slot": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.1.2.tgz", - "integrity": "sha512-YAKxaiGsSQJ38VzKH86/BPRC4rh+b1Jpa+JneA5LRE7skmLPNAyeG8kPJj/oo4STLvlrs8vkf/iYyc3A5stYCQ==", - "license": "MIT", - "dependencies": { - "@radix-ui/react-compose-refs": "1.1.1" - }, - "peerDependencies": { - "@types/react": "*", - "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" - }, - "peerDependenciesMeta": { - "@types/react": { - "optional": true - } - } - }, - "node_modules/@radix-ui/react-popover": { - "version": "1.1.6", - "resolved": "https://registry.npmjs.org/@radix-ui/react-popover/-/react-popover-1.1.6.tgz", - "integrity": "sha512-NQouW0x4/GnkFJ/pRqsIS3rM/k97VzKnVb2jB7Gq7VEGPy5g7uNV1ykySFt7eWSp3i2uSGFwaJcvIRJBAHmmFg==", + "node_modules/@radix-ui/react-menu/node_modules/@radix-ui/react-dismissable-layer": { + "version": "1.1.5", + "resolved": "https://registry.npmjs.org/@radix-ui/react-dismissable-layer/-/react-dismissable-layer-1.1.5.tgz", + "integrity": "sha512-E4TywXY6UsXNRhFrECa5HAvE5/4BFcGyfTyK36gP+pAW1ed7UTK4vKwdr53gAJYwqbfCWC6ATvJa3J3R/9+Qrg==", "license": "MIT", "dependencies": { "@radix-ui/primitive": "1.1.1", "@radix-ui/react-compose-refs": "1.1.1", - "@radix-ui/react-context": "1.1.1", - "@radix-ui/react-dismissable-layer": "1.1.5", - "@radix-ui/react-focus-guards": "1.1.1", - "@radix-ui/react-focus-scope": "1.1.2", - "@radix-ui/react-id": "1.1.0", - "@radix-ui/react-popper": "1.2.2", - "@radix-ui/react-portal": "1.1.4", - "@radix-ui/react-presence": "1.1.2", - "@radix-ui/react-primitive": "2.0.2", - "@radix-ui/react-slot": "1.1.2", - "@radix-ui/react-use-controllable-state": "1.1.0", - "aria-hidden": "^1.2.4", - "react-remove-scroll": "^2.6.3" - }, - "peerDependencies": { - "@types/react": "*", - "@types/react-dom": "*", - "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", - "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" - }, - "peerDependenciesMeta": { - "@types/react": { - "optional": true - }, - "@types/react-dom": { - "optional": true - } - } - }, - "node_modules/@radix-ui/react-popover/node_modules/@radix-ui/react-slot": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.1.2.tgz", - "integrity": "sha512-YAKxaiGsSQJ38VzKH86/BPRC4rh+b1Jpa+JneA5LRE7skmLPNAyeG8kPJj/oo4STLvlrs8vkf/iYyc3A5stYCQ==", - "license": "MIT", - "dependencies": { - "@radix-ui/react-compose-refs": "1.1.1" - }, - "peerDependencies": { - "@types/react": "*", - "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" - }, - "peerDependenciesMeta": { - "@types/react": { - "optional": true - } - } - }, - "node_modules/@radix-ui/react-popper": { - "version": "1.2.2", - "resolved": "https://registry.npmjs.org/@radix-ui/react-popper/-/react-popper-1.2.2.tgz", - "integrity": "sha512-Rvqc3nOpwseCyj/rgjlJDYAgyfw7OC1tTkKn2ivhaMGcYt8FSBlahHOZak2i3QwkRXUXgGgzeEe2RuqeEHuHgA==", - "license": "MIT", - "dependencies": { - "@floating-ui/react-dom": "^2.0.0", - "@radix-ui/react-arrow": "1.1.2", - "@radix-ui/react-compose-refs": "1.1.1", - "@radix-ui/react-context": "1.1.1", "@radix-ui/react-primitive": "2.0.2", "@radix-ui/react-use-callback-ref": "1.1.0", - "@radix-ui/react-use-layout-effect": "1.1.0", - "@radix-ui/react-use-rect": "1.1.0", - "@radix-ui/react-use-size": "1.1.0", - "@radix-ui/rect": "1.1.0" + "@radix-ui/react-use-escape-keydown": "1.1.0" }, "peerDependencies": { "@types/react": "*", @@ -2271,7 +2457,32 @@ } } }, - "node_modules/@radix-ui/react-portal": { + "node_modules/@radix-ui/react-menu/node_modules/@radix-ui/react-focus-scope": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@radix-ui/react-focus-scope/-/react-focus-scope-1.1.2.tgz", + "integrity": "sha512-zxwE80FCU7lcXUGWkdt6XpTTCKPitG1XKOwViTxHVKIJhZl9MvIl2dVHeZENCWD9+EdWv05wlaEkRXUykU27RA==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.1", + "@radix-ui/react-primitive": "2.0.2", + "@radix-ui/react-use-callback-ref": "1.1.0" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-menu/node_modules/@radix-ui/react-portal": { "version": "1.1.4", "resolved": "https://registry.npmjs.org/@radix-ui/react-portal/-/react-portal-1.1.4.tgz", "integrity": "sha512-sn2O9k1rPFYVyKd5LAJfo96JlSGVFpa1fS6UuBJfrZadudiw5tAmru+n1x7aMRQ84qDM71Zh1+SzK5QwU0tJfA==", @@ -2295,6 +2506,376 @@ } } }, + "node_modules/@radix-ui/react-menu/node_modules/@radix-ui/react-use-escape-keydown": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-escape-keydown/-/react-use-escape-keydown-1.1.0.tgz", + "integrity": "sha512-L7vwWlR1kTTQ3oh7g1O0CBF3YCyyTj8NmhLR+phShpyA50HCfBFKVJTpshm9PzLiKmehsrQzTYTpX9HvmC9rhw==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-use-callback-ref": "1.1.0" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-popover": { + "version": "1.1.15", + "resolved": "https://registry.npmjs.org/@radix-ui/react-popover/-/react-popover-1.1.15.tgz", + "integrity": "sha512-kr0X2+6Yy/vJzLYJUPCZEc8SfQcf+1COFoAqauJm74umQhta9M7lNJHP7QQS3vkvcGLQUbWpMzwrXYwrYztHKA==", + "license": "MIT", + "dependencies": { + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-dismissable-layer": "1.1.11", + "@radix-ui/react-focus-guards": "1.1.3", + "@radix-ui/react-focus-scope": "1.1.7", + "@radix-ui/react-id": "1.1.1", + "@radix-ui/react-popper": "1.2.8", + "@radix-ui/react-portal": "1.1.9", + "@radix-ui/react-presence": "1.1.5", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-slot": "1.2.3", + "@radix-ui/react-use-controllable-state": "1.2.2", + "aria-hidden": "^1.2.4", + "react-remove-scroll": "^2.6.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-popover/node_modules/@radix-ui/primitive": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/@radix-ui/primitive/-/primitive-1.1.3.tgz", + "integrity": "sha512-JTF99U/6XIjCBo0wqkU5sK10glYe27MRRsfwoiq5zzOEZLHU3A3KCMa5X/azekYRCJ0HlwI0crAXS/5dEHTzDg==", + "license": "MIT" + }, + "node_modules/@radix-ui/react-popover/node_modules/@radix-ui/react-context": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@radix-ui/react-context/-/react-context-1.1.2.tgz", + "integrity": "sha512-jCi/QKUM2r1Ju5a3J64TH2A5SpKAgh0LpknyqdQ4m6DCV0xJ2HG1xARRwNGPQfi1SLdLWZ1OJz6F4OMBBNiGJA==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-popover/node_modules/@radix-ui/react-focus-guards": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-focus-guards/-/react-focus-guards-1.1.3.tgz", + "integrity": "sha512-0rFg/Rj2Q62NCm62jZw0QX7a3sz6QCQU0LpZdNrJX8byRGaGVTqbrW9jAoIAHyMQqsNpeZ81YgSizOt5WXq0Pw==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-popover/node_modules/@radix-ui/react-id": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-id/-/react-id-1.1.1.tgz", + "integrity": "sha512-kGkGegYIdQsOb4XjsfM97rXsiHaBwco+hFI66oO4s9LU+PLAC5oJ7khdOVFxkhsmlbpUqDAvXw11CluXP+jkHg==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-use-layout-effect": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-popover/node_modules/@radix-ui/react-presence": { + "version": "1.1.5", + "resolved": "https://registry.npmjs.org/@radix-ui/react-presence/-/react-presence-1.1.5.tgz", + "integrity": "sha512-/jfEwNDdQVBCNvjkGit4h6pMOzq8bHkopq458dPt2lMjx+eBQUohZNG9A7DtO/O5ukSbxuaNGXMjHicgwy6rQQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-use-layout-effect": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-popover/node_modules/@radix-ui/react-primitive": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.3.tgz", + "integrity": "sha512-m9gTwRkhy2lvCPe6QJp4d3G1TYEUHn/FzJUtq9MjH46an1wJU+GdoGC5VLof8RX8Ft/DlpshApkhswDLZzHIcQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-slot": "1.2.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-popover/node_modules/@radix-ui/react-use-controllable-state": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-controllable-state/-/react-use-controllable-state-1.2.2.tgz", + "integrity": "sha512-BjasUjixPFdS+NKkypcyyN5Pmg83Olst0+c6vGov0diwTEo6mgdqVR6hxcEgFuh4QrAs7Rc+9KuGJ9TVCj0Zzg==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-use-effect-event": "0.0.2", + "@radix-ui/react-use-layout-effect": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-popover/node_modules/@radix-ui/react-use-layout-effect": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-layout-effect/-/react-use-layout-effect-1.1.1.tgz", + "integrity": "sha512-RbJRS4UWQFkzHTTwVymMTUv8EqYhOp8dOOviLj2ugtTiXRaRQS7GLGxZTLL1jWhMeoSCf5zmcZkqTl9IiYfXcQ==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-popper": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/@radix-ui/react-popper/-/react-popper-1.2.8.tgz", + "integrity": "sha512-0NJQ4LFFUuWkE7Oxf0htBKS6zLkkjBH+hM1uk7Ng705ReR8m/uelduy1DBo0PyBXPKVnBA6YBlU94MBGXrSBCw==", + "license": "MIT", + "dependencies": { + "@floating-ui/react-dom": "^2.0.0", + "@radix-ui/react-arrow": "1.1.7", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-callback-ref": "1.1.1", + "@radix-ui/react-use-layout-effect": "1.1.1", + "@radix-ui/react-use-rect": "1.1.1", + "@radix-ui/react-use-size": "1.1.1", + "@radix-ui/rect": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-popper/node_modules/@radix-ui/react-context": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@radix-ui/react-context/-/react-context-1.1.2.tgz", + "integrity": "sha512-jCi/QKUM2r1Ju5a3J64TH2A5SpKAgh0LpknyqdQ4m6DCV0xJ2HG1xARRwNGPQfi1SLdLWZ1OJz6F4OMBBNiGJA==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-popper/node_modules/@radix-ui/react-primitive": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.3.tgz", + "integrity": "sha512-m9gTwRkhy2lvCPe6QJp4d3G1TYEUHn/FzJUtq9MjH46an1wJU+GdoGC5VLof8RX8Ft/DlpshApkhswDLZzHIcQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-slot": "1.2.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-popper/node_modules/@radix-ui/react-use-callback-ref": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-callback-ref/-/react-use-callback-ref-1.1.1.tgz", + "integrity": "sha512-FkBMwD+qbGQeMu1cOHnuGB6x4yzPjho8ap5WtbEJ26umhgqVXbhekKUQO+hZEL1vU92a3wHwdp0HAcqAUF5iDg==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-popper/node_modules/@radix-ui/react-use-layout-effect": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-layout-effect/-/react-use-layout-effect-1.1.1.tgz", + "integrity": "sha512-RbJRS4UWQFkzHTTwVymMTUv8EqYhOp8dOOviLj2ugtTiXRaRQS7GLGxZTLL1jWhMeoSCf5zmcZkqTl9IiYfXcQ==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-popper/node_modules/@radix-ui/react-use-size": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-size/-/react-use-size-1.1.1.tgz", + "integrity": "sha512-ewrXRDTAqAXlkl6t/fkXWNAhFX9I+CkKlw6zjEwk86RSPKwZr3xpBRso655aqYafwtnbpHLj6toFzmd6xdVptQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-use-layout-effect": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-portal": { + "version": "1.1.9", + "resolved": "https://registry.npmjs.org/@radix-ui/react-portal/-/react-portal-1.1.9.tgz", + "integrity": "sha512-bpIxvq03if6UNwXZ+HTK71JLh4APvnXntDc6XOX8UVq4XQOVl7lwok0AvIl+b8zgCw3fSaVTZMpAPPagXbKmHQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-layout-effect": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-portal/node_modules/@radix-ui/react-primitive": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.3.tgz", + "integrity": "sha512-m9gTwRkhy2lvCPe6QJp4d3G1TYEUHn/FzJUtq9MjH46an1wJU+GdoGC5VLof8RX8Ft/DlpshApkhswDLZzHIcQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-slot": "1.2.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-portal/node_modules/@radix-ui/react-use-layout-effect": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-layout-effect/-/react-use-layout-effect-1.1.1.tgz", + "integrity": "sha512-RbJRS4UWQFkzHTTwVymMTUv8EqYhOp8dOOviLj2ugtTiXRaRQS7GLGxZTLL1jWhMeoSCf5zmcZkqTl9IiYfXcQ==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, "node_modules/@radix-ui/react-presence": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/@radix-ui/react-presence/-/react-presence-1.1.2.tgz", @@ -2342,40 +2923,14 @@ } } }, - "node_modules/@radix-ui/react-primitive/node_modules/@radix-ui/react-slot": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.1.2.tgz", - "integrity": "sha512-YAKxaiGsSQJ38VzKH86/BPRC4rh+b1Jpa+JneA5LRE7skmLPNAyeG8kPJj/oo4STLvlrs8vkf/iYyc3A5stYCQ==", + "node_modules/@radix-ui/react-progress": { + "version": "1.1.8", + "resolved": "https://registry.npmjs.org/@radix-ui/react-progress/-/react-progress-1.1.8.tgz", + "integrity": "sha512-+gISHcSPUJ7ktBy9RnTqbdKW78bcGke3t6taawyZ71pio1JewwGSJizycs7rLhGTvMJYCQB1DBK4KQsxs7U8dA==", "license": "MIT", "dependencies": { - "@radix-ui/react-compose-refs": "1.1.1" - }, - "peerDependencies": { - "@types/react": "*", - "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" - }, - "peerDependenciesMeta": { - "@types/react": { - "optional": true - } - } - }, - "node_modules/@radix-ui/react-radio-group": { - "version": "1.2.3", - "resolved": "https://registry.npmjs.org/@radix-ui/react-radio-group/-/react-radio-group-1.2.3.tgz", - "integrity": "sha512-xtCsqt8Rp09FK50ItqEqTJ7Sxanz8EM8dnkVIhJrc/wkMMomSmXHvYbhv3E7Zx4oXh98aaLt9W679SUYXg4IDA==", - "license": "MIT", - "dependencies": { - "@radix-ui/primitive": "1.1.1", - "@radix-ui/react-compose-refs": "1.1.1", - "@radix-ui/react-context": "1.1.1", - "@radix-ui/react-direction": "1.1.0", - "@radix-ui/react-presence": "1.1.2", - "@radix-ui/react-primitive": "2.0.2", - "@radix-ui/react-roving-focus": "1.1.2", - "@radix-ui/react-use-controllable-state": "1.1.0", - "@radix-ui/react-use-previous": "1.1.0", - "@radix-ui/react-use-size": "1.1.0" + "@radix-ui/react-context": "1.1.3", + "@radix-ui/react-primitive": "2.1.4" }, "peerDependencies": { "@types/react": "*", @@ -2392,6 +2947,316 @@ } } }, + "node_modules/@radix-ui/react-progress/node_modules/@radix-ui/react-context": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-context/-/react-context-1.1.3.tgz", + "integrity": "sha512-ieIFACdMpYfMEjF0rEf5KLvfVyIkOz6PDGyNnP+u+4xQ6jny3VCgA4OgXOwNx2aUkxn8zx9fiVcM8CfFYv9Lxw==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-progress/node_modules/@radix-ui/react-primitive": { + "version": "2.1.4", + "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.4.tgz", + "integrity": "sha512-9hQc4+GNVtJAIEPEqlYqW5RiYdrr8ea5XQ0ZOnD6fgru+83kqT15mq2OCcbe8KnjRZl5vF3ks69AKz3kh1jrhg==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-slot": "1.2.4" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-radio-group": { + "version": "1.3.8", + "resolved": "https://registry.npmjs.org/@radix-ui/react-radio-group/-/react-radio-group-1.3.8.tgz", + "integrity": "sha512-VBKYIYImA5zsxACdisNQ3BjCBfmbGH3kQlnFVqlWU4tXwjy7cGX8ta80BcrO+WJXIn5iBylEH3K6ZTlee//lgQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-direction": "1.1.1", + "@radix-ui/react-presence": "1.1.5", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-roving-focus": "1.1.11", + "@radix-ui/react-use-controllable-state": "1.2.2", + "@radix-ui/react-use-previous": "1.1.1", + "@radix-ui/react-use-size": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-radio-group/node_modules/@radix-ui/primitive": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/@radix-ui/primitive/-/primitive-1.1.3.tgz", + "integrity": "sha512-JTF99U/6XIjCBo0wqkU5sK10glYe27MRRsfwoiq5zzOEZLHU3A3KCMa5X/azekYRCJ0HlwI0crAXS/5dEHTzDg==", + "license": "MIT" + }, + "node_modules/@radix-ui/react-radio-group/node_modules/@radix-ui/react-collection": { + "version": "1.1.7", + "resolved": "https://registry.npmjs.org/@radix-ui/react-collection/-/react-collection-1.1.7.tgz", + "integrity": "sha512-Fh9rGN0MoI4ZFUNyfFVNU4y9LUz93u9/0K+yLgA2bwRojxM8JU1DyvvMBabnZPBgMWREAJvU2jjVzq+LrFUglw==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-slot": "1.2.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-radio-group/node_modules/@radix-ui/react-context": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@radix-ui/react-context/-/react-context-1.1.2.tgz", + "integrity": "sha512-jCi/QKUM2r1Ju5a3J64TH2A5SpKAgh0LpknyqdQ4m6DCV0xJ2HG1xARRwNGPQfi1SLdLWZ1OJz6F4OMBBNiGJA==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-radio-group/node_modules/@radix-ui/react-direction": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-direction/-/react-direction-1.1.1.tgz", + "integrity": "sha512-1UEWRX6jnOA2y4H5WczZ44gOOjTEmlqv1uNW4GAJEO5+bauCBhv8snY65Iw5/VOS/ghKN9gr2KjnLKxrsvoMVw==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-radio-group/node_modules/@radix-ui/react-id": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-id/-/react-id-1.1.1.tgz", + "integrity": "sha512-kGkGegYIdQsOb4XjsfM97rXsiHaBwco+hFI66oO4s9LU+PLAC5oJ7khdOVFxkhsmlbpUqDAvXw11CluXP+jkHg==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-use-layout-effect": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-radio-group/node_modules/@radix-ui/react-presence": { + "version": "1.1.5", + "resolved": "https://registry.npmjs.org/@radix-ui/react-presence/-/react-presence-1.1.5.tgz", + "integrity": "sha512-/jfEwNDdQVBCNvjkGit4h6pMOzq8bHkopq458dPt2lMjx+eBQUohZNG9A7DtO/O5ukSbxuaNGXMjHicgwy6rQQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-use-layout-effect": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-radio-group/node_modules/@radix-ui/react-primitive": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.3.tgz", + "integrity": "sha512-m9gTwRkhy2lvCPe6QJp4d3G1TYEUHn/FzJUtq9MjH46an1wJU+GdoGC5VLof8RX8Ft/DlpshApkhswDLZzHIcQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-slot": "1.2.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-radio-group/node_modules/@radix-ui/react-roving-focus": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/@radix-ui/react-roving-focus/-/react-roving-focus-1.1.11.tgz", + "integrity": "sha512-7A6S9jSgm/S+7MdtNDSb+IU859vQqJ/QAtcYQcfFC6W8RS4IxIZDldLR0xqCFZ6DCyrQLjLPsxtTNch5jVA4lA==", + "license": "MIT", + "dependencies": { + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-collection": "1.1.7", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-direction": "1.1.1", + "@radix-ui/react-id": "1.1.1", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-callback-ref": "1.1.1", + "@radix-ui/react-use-controllable-state": "1.2.2" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-radio-group/node_modules/@radix-ui/react-use-callback-ref": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-callback-ref/-/react-use-callback-ref-1.1.1.tgz", + "integrity": "sha512-FkBMwD+qbGQeMu1cOHnuGB6x4yzPjho8ap5WtbEJ26umhgqVXbhekKUQO+hZEL1vU92a3wHwdp0HAcqAUF5iDg==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-radio-group/node_modules/@radix-ui/react-use-controllable-state": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-controllable-state/-/react-use-controllable-state-1.2.2.tgz", + "integrity": "sha512-BjasUjixPFdS+NKkypcyyN5Pmg83Olst0+c6vGov0diwTEo6mgdqVR6hxcEgFuh4QrAs7Rc+9KuGJ9TVCj0Zzg==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-use-effect-event": "0.0.2", + "@radix-ui/react-use-layout-effect": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-radio-group/node_modules/@radix-ui/react-use-layout-effect": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-layout-effect/-/react-use-layout-effect-1.1.1.tgz", + "integrity": "sha512-RbJRS4UWQFkzHTTwVymMTUv8EqYhOp8dOOviLj2ugtTiXRaRQS7GLGxZTLL1jWhMeoSCf5zmcZkqTl9IiYfXcQ==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-radio-group/node_modules/@radix-ui/react-use-previous": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-previous/-/react-use-previous-1.1.1.tgz", + "integrity": "sha512-2dHfToCj/pzca2Ck724OZ5L0EVrr3eHRNsG/b3xQJLA2hZpVCS99bLAX+hm1IHXDEnzU6by5z/5MIY794/a8NQ==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-radio-group/node_modules/@radix-ui/react-use-size": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-size/-/react-use-size-1.1.1.tgz", + "integrity": "sha512-ewrXRDTAqAXlkl6t/fkXWNAhFX9I+CkKlw6zjEwk86RSPKwZr3xpBRso655aqYafwtnbpHLj6toFzmd6xdVptQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-use-layout-effect": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, "node_modules/@radix-ui/react-roving-focus": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/@radix-ui/react-roving-focus/-/react-roving-focus-1.1.2.tgz", @@ -2455,30 +3320,30 @@ } }, "node_modules/@radix-ui/react-select": { - "version": "2.1.6", - "resolved": "https://registry.npmjs.org/@radix-ui/react-select/-/react-select-2.1.6.tgz", - "integrity": "sha512-T6ajELxRvTuAMWH0YmRJ1qez+x4/7Nq7QIx7zJ0VK3qaEWdnWpNbEDnmWldG1zBDwqrLy5aLMUWcoGirVj5kMg==", + "version": "2.2.6", + "resolved": "https://registry.npmjs.org/@radix-ui/react-select/-/react-select-2.2.6.tgz", + "integrity": "sha512-I30RydO+bnn2PQztvo25tswPH+wFBjehVGtmagkU78yMdwTwVf12wnAOF+AeP8S2N8xD+5UPbGhkUfPyvT+mwQ==", "license": "MIT", "dependencies": { - "@radix-ui/number": "1.1.0", - "@radix-ui/primitive": "1.1.1", - "@radix-ui/react-collection": "1.1.2", - "@radix-ui/react-compose-refs": "1.1.1", - "@radix-ui/react-context": "1.1.1", - "@radix-ui/react-direction": "1.1.0", - "@radix-ui/react-dismissable-layer": "1.1.5", - "@radix-ui/react-focus-guards": "1.1.1", - "@radix-ui/react-focus-scope": "1.1.2", - "@radix-ui/react-id": "1.1.0", - "@radix-ui/react-popper": "1.2.2", - "@radix-ui/react-portal": "1.1.4", - "@radix-ui/react-primitive": "2.0.2", - "@radix-ui/react-slot": "1.1.2", - "@radix-ui/react-use-callback-ref": "1.1.0", - "@radix-ui/react-use-controllable-state": "1.1.0", - "@radix-ui/react-use-layout-effect": "1.1.0", - "@radix-ui/react-use-previous": "1.1.0", - "@radix-ui/react-visually-hidden": "1.1.2", + "@radix-ui/number": "1.1.1", + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-collection": "1.1.7", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-direction": "1.1.1", + "@radix-ui/react-dismissable-layer": "1.1.11", + "@radix-ui/react-focus-guards": "1.1.3", + "@radix-ui/react-focus-scope": "1.1.7", + "@radix-ui/react-id": "1.1.1", + "@radix-ui/react-popper": "1.2.8", + "@radix-ui/react-portal": "1.1.9", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-slot": "1.2.3", + "@radix-ui/react-use-callback-ref": "1.1.1", + "@radix-ui/react-use-controllable-state": "1.2.2", + "@radix-ui/react-use-layout-effect": "1.1.1", + "@radix-ui/react-use-previous": "1.1.1", + "@radix-ui/react-visually-hidden": "1.2.3", "aria-hidden": "^1.2.4", "react-remove-scroll": "^2.6.3" }, @@ -2497,31 +3362,28 @@ } } }, - "node_modules/@radix-ui/react-select/node_modules/@radix-ui/react-slot": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.1.2.tgz", - "integrity": "sha512-YAKxaiGsSQJ38VzKH86/BPRC4rh+b1Jpa+JneA5LRE7skmLPNAyeG8kPJj/oo4STLvlrs8vkf/iYyc3A5stYCQ==", - "license": "MIT", - "dependencies": { - "@radix-ui/react-compose-refs": "1.1.1" - }, - "peerDependencies": { - "@types/react": "*", - "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" - }, - "peerDependenciesMeta": { - "@types/react": { - "optional": true - } - } + "node_modules/@radix-ui/react-select/node_modules/@radix-ui/number": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/number/-/number-1.1.1.tgz", + "integrity": "sha512-MkKCwxlXTgz6CFoJx3pCwn07GKp36+aZyu/u2Ln2VrA5DcdyCZkASEDBTd8x5whTQQL5CiYf4prXKLcgQdv29g==", + "license": "MIT" }, - "node_modules/@radix-ui/react-separator": { + "node_modules/@radix-ui/react-select/node_modules/@radix-ui/primitive": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/@radix-ui/primitive/-/primitive-1.1.3.tgz", + "integrity": "sha512-JTF99U/6XIjCBo0wqkU5sK10glYe27MRRsfwoiq5zzOEZLHU3A3KCMa5X/azekYRCJ0HlwI0crAXS/5dEHTzDg==", + "license": "MIT" + }, + "node_modules/@radix-ui/react-select/node_modules/@radix-ui/react-collection": { "version": "1.1.7", - "resolved": "https://registry.npmjs.org/@radix-ui/react-separator/-/react-separator-1.1.7.tgz", - "integrity": "sha512-0HEb8R9E8A+jZjvmFCy/J4xhbXy3TV+9XSnGJ3KvTtjlIUy/YQ/p6UYZvi7YbeoeXdyU9+Y3scizK6hkY37baA==", + "resolved": "https://registry.npmjs.org/@radix-ui/react-collection/-/react-collection-1.1.7.tgz", + "integrity": "sha512-Fh9rGN0MoI4ZFUNyfFVNU4y9LUz93u9/0K+yLgA2bwRojxM8JU1DyvvMBabnZPBgMWREAJvU2jjVzq+LrFUglw==", "license": "MIT", "dependencies": { - "@radix-ui/react-primitive": "2.1.3" + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-slot": "1.2.3" }, "peerDependencies": { "@types/react": "*", @@ -2538,7 +3400,70 @@ } } }, - "node_modules/@radix-ui/react-separator/node_modules/@radix-ui/react-primitive": { + "node_modules/@radix-ui/react-select/node_modules/@radix-ui/react-context": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@radix-ui/react-context/-/react-context-1.1.2.tgz", + "integrity": "sha512-jCi/QKUM2r1Ju5a3J64TH2A5SpKAgh0LpknyqdQ4m6DCV0xJ2HG1xARRwNGPQfi1SLdLWZ1OJz6F4OMBBNiGJA==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-select/node_modules/@radix-ui/react-direction": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-direction/-/react-direction-1.1.1.tgz", + "integrity": "sha512-1UEWRX6jnOA2y4H5WczZ44gOOjTEmlqv1uNW4GAJEO5+bauCBhv8snY65Iw5/VOS/ghKN9gr2KjnLKxrsvoMVw==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-select/node_modules/@radix-ui/react-focus-guards": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-focus-guards/-/react-focus-guards-1.1.3.tgz", + "integrity": "sha512-0rFg/Rj2Q62NCm62jZw0QX7a3sz6QCQU0LpZdNrJX8byRGaGVTqbrW9jAoIAHyMQqsNpeZ81YgSizOt5WXq0Pw==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-select/node_modules/@radix-ui/react-id": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-id/-/react-id-1.1.1.tgz", + "integrity": "sha512-kGkGegYIdQsOb4XjsfM97rXsiHaBwco+hFI66oO4s9LU+PLAC5oJ7khdOVFxkhsmlbpUqDAvXw11CluXP+jkHg==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-use-layout-effect": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-select/node_modules/@radix-ui/react-primitive": { "version": "2.1.3", "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.3.tgz", "integrity": "sha512-m9gTwRkhy2lvCPe6QJp4d3G1TYEUHn/FzJUtq9MjH46an1wJU+GdoGC5VLof8RX8Ft/DlpshApkhswDLZzHIcQ==", @@ -2561,23 +3486,77 @@ } } }, - "node_modules/@radix-ui/react-slider": { - "version": "1.2.3", - "resolved": "https://registry.npmjs.org/@radix-ui/react-slider/-/react-slider-1.2.3.tgz", - "integrity": "sha512-nNrLAWLjGESnhqBqcCNW4w2nn7LxudyMzeB6VgdyAnFLC6kfQgnAjSL2v6UkQTnDctJBlxrmxfplWS4iYjdUTw==", + "node_modules/@radix-ui/react-select/node_modules/@radix-ui/react-use-callback-ref": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-callback-ref/-/react-use-callback-ref-1.1.1.tgz", + "integrity": "sha512-FkBMwD+qbGQeMu1cOHnuGB6x4yzPjho8ap5WtbEJ26umhgqVXbhekKUQO+hZEL1vU92a3wHwdp0HAcqAUF5iDg==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-select/node_modules/@radix-ui/react-use-controllable-state": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-controllable-state/-/react-use-controllable-state-1.2.2.tgz", + "integrity": "sha512-BjasUjixPFdS+NKkypcyyN5Pmg83Olst0+c6vGov0diwTEo6mgdqVR6hxcEgFuh4QrAs7Rc+9KuGJ9TVCj0Zzg==", "license": "MIT", "dependencies": { - "@radix-ui/number": "1.1.0", - "@radix-ui/primitive": "1.1.1", - "@radix-ui/react-collection": "1.1.2", - "@radix-ui/react-compose-refs": "1.1.1", - "@radix-ui/react-context": "1.1.1", - "@radix-ui/react-direction": "1.1.0", - "@radix-ui/react-primitive": "2.0.2", - "@radix-ui/react-use-controllable-state": "1.1.0", - "@radix-ui/react-use-layout-effect": "1.1.0", - "@radix-ui/react-use-previous": "1.1.0", - "@radix-ui/react-use-size": "1.1.0" + "@radix-ui/react-use-effect-event": "0.0.2", + "@radix-ui/react-use-layout-effect": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-select/node_modules/@radix-ui/react-use-layout-effect": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-layout-effect/-/react-use-layout-effect-1.1.1.tgz", + "integrity": "sha512-RbJRS4UWQFkzHTTwVymMTUv8EqYhOp8dOOviLj2ugtTiXRaRQS7GLGxZTLL1jWhMeoSCf5zmcZkqTl9IiYfXcQ==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-select/node_modules/@radix-ui/react-use-previous": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-previous/-/react-use-previous-1.1.1.tgz", + "integrity": "sha512-2dHfToCj/pzca2Ck724OZ5L0EVrr3eHRNsG/b3xQJLA2hZpVCS99bLAX+hm1IHXDEnzU6by5z/5MIY794/a8NQ==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-separator": { + "version": "1.1.8", + "resolved": "https://registry.npmjs.org/@radix-ui/react-separator/-/react-separator-1.1.8.tgz", + "integrity": "sha512-sDvqVY4itsKwwSMEe0jtKgfTh+72Sy3gPmQpjqcQneqQ4PFmr/1I0YA+2/puilhggCe2gJcx5EBAYFkWkdpa5g==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-primitive": "2.1.4" }, "peerDependencies": { "@types/react": "*", @@ -2594,13 +3573,161 @@ } } }, - "node_modules/@radix-ui/react-slot": { - "version": "1.2.3", - "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.2.3.tgz", - "integrity": "sha512-aeNmHnBxbi2St0au6VBVC7JXFlhLlOnvIIlePNniyUNAClzmtAUEY8/pBiK3iHjufOlwA+c20/8jngo7xcrg8A==", + "node_modules/@radix-ui/react-separator/node_modules/@radix-ui/react-primitive": { + "version": "2.1.4", + "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.4.tgz", + "integrity": "sha512-9hQc4+GNVtJAIEPEqlYqW5RiYdrr8ea5XQ0ZOnD6fgru+83kqT15mq2OCcbe8KnjRZl5vF3ks69AKz3kh1jrhg==", "license": "MIT", "dependencies": { - "@radix-ui/react-compose-refs": "1.1.2" + "@radix-ui/react-slot": "1.2.4" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-slider": { + "version": "1.3.6", + "resolved": "https://registry.npmjs.org/@radix-ui/react-slider/-/react-slider-1.3.6.tgz", + "integrity": "sha512-JPYb1GuM1bxfjMRlNLE+BcmBC8onfCi60Blk7OBqi2MLTFdS+8401U4uFjnwkOr49BLmXxLC6JHkvAsx5OJvHw==", + "license": "MIT", + "dependencies": { + "@radix-ui/number": "1.1.1", + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-collection": "1.1.7", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-direction": "1.1.1", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-controllable-state": "1.2.2", + "@radix-ui/react-use-layout-effect": "1.1.1", + "@radix-ui/react-use-previous": "1.1.1", + "@radix-ui/react-use-size": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-slider/node_modules/@radix-ui/number": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/number/-/number-1.1.1.tgz", + "integrity": "sha512-MkKCwxlXTgz6CFoJx3pCwn07GKp36+aZyu/u2Ln2VrA5DcdyCZkASEDBTd8x5whTQQL5CiYf4prXKLcgQdv29g==", + "license": "MIT" + }, + "node_modules/@radix-ui/react-slider/node_modules/@radix-ui/primitive": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/@radix-ui/primitive/-/primitive-1.1.3.tgz", + "integrity": "sha512-JTF99U/6XIjCBo0wqkU5sK10glYe27MRRsfwoiq5zzOEZLHU3A3KCMa5X/azekYRCJ0HlwI0crAXS/5dEHTzDg==", + "license": "MIT" + }, + "node_modules/@radix-ui/react-slider/node_modules/@radix-ui/react-collection": { + "version": "1.1.7", + "resolved": "https://registry.npmjs.org/@radix-ui/react-collection/-/react-collection-1.1.7.tgz", + "integrity": "sha512-Fh9rGN0MoI4ZFUNyfFVNU4y9LUz93u9/0K+yLgA2bwRojxM8JU1DyvvMBabnZPBgMWREAJvU2jjVzq+LrFUglw==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-slot": "1.2.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-slider/node_modules/@radix-ui/react-context": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@radix-ui/react-context/-/react-context-1.1.2.tgz", + "integrity": "sha512-jCi/QKUM2r1Ju5a3J64TH2A5SpKAgh0LpknyqdQ4m6DCV0xJ2HG1xARRwNGPQfi1SLdLWZ1OJz6F4OMBBNiGJA==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-slider/node_modules/@radix-ui/react-direction": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-direction/-/react-direction-1.1.1.tgz", + "integrity": "sha512-1UEWRX6jnOA2y4H5WczZ44gOOjTEmlqv1uNW4GAJEO5+bauCBhv8snY65Iw5/VOS/ghKN9gr2KjnLKxrsvoMVw==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-slider/node_modules/@radix-ui/react-primitive": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.3.tgz", + "integrity": "sha512-m9gTwRkhy2lvCPe6QJp4d3G1TYEUHn/FzJUtq9MjH46an1wJU+GdoGC5VLof8RX8Ft/DlpshApkhswDLZzHIcQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-slot": "1.2.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-slider/node_modules/@radix-ui/react-use-controllable-state": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-controllable-state/-/react-use-controllable-state-1.2.2.tgz", + "integrity": "sha512-BjasUjixPFdS+NKkypcyyN5Pmg83Olst0+c6vGov0diwTEo6mgdqVR6hxcEgFuh4QrAs7Rc+9KuGJ9TVCj0Zzg==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-use-effect-event": "0.0.2", + "@radix-ui/react-use-layout-effect": "1.1.1" }, "peerDependencies": { "@types/react": "*", @@ -2612,10 +3739,10 @@ } } }, - "node_modules/@radix-ui/react-slot/node_modules/@radix-ui/react-compose-refs": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/@radix-ui/react-compose-refs/-/react-compose-refs-1.1.2.tgz", - "integrity": "sha512-z4eqJvfiNnFMHIIvXP3CY57y2WJs5g2v3X0zm9mEJkrkNv4rDxu+sg9Jh8EkXyeqBkB7SOcboo9dMVqhyrACIg==", + "node_modules/@radix-ui/react-slider/node_modules/@radix-ui/react-use-layout-effect": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-layout-effect/-/react-use-layout-effect-1.1.1.tgz", + "integrity": "sha512-RbJRS4UWQFkzHTTwVymMTUv8EqYhOp8dOOviLj2ugtTiXRaRQS7GLGxZTLL1jWhMeoSCf5zmcZkqTl9IiYfXcQ==", "license": "MIT", "peerDependencies": { "@types/react": "*", @@ -2627,6 +3754,57 @@ } } }, + "node_modules/@radix-ui/react-slider/node_modules/@radix-ui/react-use-previous": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-previous/-/react-use-previous-1.1.1.tgz", + "integrity": "sha512-2dHfToCj/pzca2Ck724OZ5L0EVrr3eHRNsG/b3xQJLA2hZpVCS99bLAX+hm1IHXDEnzU6by5z/5MIY794/a8NQ==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-slider/node_modules/@radix-ui/react-use-size": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-size/-/react-use-size-1.1.1.tgz", + "integrity": "sha512-ewrXRDTAqAXlkl6t/fkXWNAhFX9I+CkKlw6zjEwk86RSPKwZr3xpBRso655aqYafwtnbpHLj6toFzmd6xdVptQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-use-layout-effect": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-slot": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.2.4.tgz", + "integrity": "sha512-Jl+bCv8HxKnlTLVrcDE8zTMJ09R9/ukw4qBs/oZClOfoQk/cOTbDn+NceXfV7j09YPVQUryJPHurafcSg6EVKA==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, "node_modules/@radix-ui/react-switch": { "version": "1.1.3", "resolved": "https://registry.npmjs.org/@radix-ui/react-switch/-/react-switch-1.1.3.tgz", @@ -2687,14 +3865,14 @@ } }, "node_modules/@radix-ui/react-toggle": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/@radix-ui/react-toggle/-/react-toggle-1.1.2.tgz", - "integrity": "sha512-lntKchNWx3aCHuWKiDY+8WudiegQvBpDRAYL8dKLRvKEH8VOpl0XX6SSU/bUBqIRJbcTy4+MW06Wv8vgp10rzQ==", + "version": "1.1.10", + "resolved": "https://registry.npmjs.org/@radix-ui/react-toggle/-/react-toggle-1.1.10.tgz", + "integrity": "sha512-lS1odchhFTeZv3xwHH31YPObmJn8gOg7Lq12inrr0+BH/l3Tsq32VfjqH1oh80ARM3mlkfMic15n0kg4sD1poQ==", "license": "MIT", "dependencies": { - "@radix-ui/primitive": "1.1.1", - "@radix-ui/react-primitive": "2.0.2", - "@radix-ui/react-use-controllable-state": "1.1.0" + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-controllable-state": "1.2.2" }, "peerDependencies": { "@types/react": "*", @@ -2740,6 +3918,94 @@ } } }, + "node_modules/@radix-ui/react-toggle-group/node_modules/@radix-ui/react-toggle": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@radix-ui/react-toggle/-/react-toggle-1.1.2.tgz", + "integrity": "sha512-lntKchNWx3aCHuWKiDY+8WudiegQvBpDRAYL8dKLRvKEH8VOpl0XX6SSU/bUBqIRJbcTy4+MW06Wv8vgp10rzQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/primitive": "1.1.1", + "@radix-ui/react-primitive": "2.0.2", + "@radix-ui/react-use-controllable-state": "1.1.0" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-toggle/node_modules/@radix-ui/primitive": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/@radix-ui/primitive/-/primitive-1.1.3.tgz", + "integrity": "sha512-JTF99U/6XIjCBo0wqkU5sK10glYe27MRRsfwoiq5zzOEZLHU3A3KCMa5X/azekYRCJ0HlwI0crAXS/5dEHTzDg==", + "license": "MIT" + }, + "node_modules/@radix-ui/react-toggle/node_modules/@radix-ui/react-primitive": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.3.tgz", + "integrity": "sha512-m9gTwRkhy2lvCPe6QJp4d3G1TYEUHn/FzJUtq9MjH46an1wJU+GdoGC5VLof8RX8Ft/DlpshApkhswDLZzHIcQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-slot": "1.2.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-toggle/node_modules/@radix-ui/react-use-controllable-state": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-controllable-state/-/react-use-controllable-state-1.2.2.tgz", + "integrity": "sha512-BjasUjixPFdS+NKkypcyyN5Pmg83Olst0+c6vGov0diwTEo6mgdqVR6hxcEgFuh4QrAs7Rc+9KuGJ9TVCj0Zzg==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-use-effect-event": "0.0.2", + "@radix-ui/react-use-layout-effect": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-toggle/node_modules/@radix-ui/react-use-layout-effect": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-layout-effect/-/react-use-layout-effect-1.1.1.tgz", + "integrity": "sha512-RbJRS4UWQFkzHTTwVymMTUv8EqYhOp8dOOviLj2ugtTiXRaRQS7GLGxZTLL1jWhMeoSCf5zmcZkqTl9IiYfXcQ==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, "node_modules/@radix-ui/react-tooltip": { "version": "1.2.8", "resolved": "https://registry.npmjs.org/@radix-ui/react-tooltip/-/react-tooltip-1.2.8.tgz", @@ -2780,44 +4046,6 @@ "integrity": "sha512-JTF99U/6XIjCBo0wqkU5sK10glYe27MRRsfwoiq5zzOEZLHU3A3KCMa5X/azekYRCJ0HlwI0crAXS/5dEHTzDg==", "license": "MIT" }, - "node_modules/@radix-ui/react-tooltip/node_modules/@radix-ui/react-arrow": { - "version": "1.1.7", - "resolved": "https://registry.npmjs.org/@radix-ui/react-arrow/-/react-arrow-1.1.7.tgz", - "integrity": "sha512-F+M1tLhO+mlQaOWspE8Wstg+z6PwxwRd8oQ8IXceWz92kfAmalTRf0EjrouQeo7QssEPfCn05B4Ihs1K9WQ/7w==", - "license": "MIT", - "dependencies": { - "@radix-ui/react-primitive": "2.1.3" - }, - "peerDependencies": { - "@types/react": "*", - "@types/react-dom": "*", - "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", - "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" - }, - "peerDependenciesMeta": { - "@types/react": { - "optional": true - }, - "@types/react-dom": { - "optional": true - } - } - }, - "node_modules/@radix-ui/react-tooltip/node_modules/@radix-ui/react-compose-refs": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/@radix-ui/react-compose-refs/-/react-compose-refs-1.1.2.tgz", - "integrity": "sha512-z4eqJvfiNnFMHIIvXP3CY57y2WJs5g2v3X0zm9mEJkrkNv4rDxu+sg9Jh8EkXyeqBkB7SOcboo9dMVqhyrACIg==", - "license": "MIT", - "peerDependencies": { - "@types/react": "*", - "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" - }, - "peerDependenciesMeta": { - "@types/react": { - "optional": true - } - } - }, "node_modules/@radix-ui/react-tooltip/node_modules/@radix-ui/react-context": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/@radix-ui/react-context/-/react-context-1.1.2.tgz", @@ -2833,33 +4061,6 @@ } } }, - "node_modules/@radix-ui/react-tooltip/node_modules/@radix-ui/react-dismissable-layer": { - "version": "1.1.11", - "resolved": "https://registry.npmjs.org/@radix-ui/react-dismissable-layer/-/react-dismissable-layer-1.1.11.tgz", - "integrity": "sha512-Nqcp+t5cTB8BinFkZgXiMJniQH0PsUt2k51FUhbdfeKvc4ACcG2uQniY/8+h1Yv6Kza4Q7lD7PQV0z0oicE0Mg==", - "license": "MIT", - "dependencies": { - "@radix-ui/primitive": "1.1.3", - "@radix-ui/react-compose-refs": "1.1.2", - "@radix-ui/react-primitive": "2.1.3", - "@radix-ui/react-use-callback-ref": "1.1.1", - "@radix-ui/react-use-escape-keydown": "1.1.1" - }, - "peerDependencies": { - "@types/react": "*", - "@types/react-dom": "*", - "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", - "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" - }, - "peerDependenciesMeta": { - "@types/react": { - "optional": true - }, - "@types/react-dom": { - "optional": true - } - } - }, "node_modules/@radix-ui/react-tooltip/node_modules/@radix-ui/react-id": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/@radix-ui/react-id/-/react-id-1.1.1.tgz", @@ -2878,62 +4079,6 @@ } } }, - "node_modules/@radix-ui/react-tooltip/node_modules/@radix-ui/react-popper": { - "version": "1.2.8", - "resolved": "https://registry.npmjs.org/@radix-ui/react-popper/-/react-popper-1.2.8.tgz", - "integrity": "sha512-0NJQ4LFFUuWkE7Oxf0htBKS6zLkkjBH+hM1uk7Ng705ReR8m/uelduy1DBo0PyBXPKVnBA6YBlU94MBGXrSBCw==", - "license": "MIT", - "dependencies": { - "@floating-ui/react-dom": "^2.0.0", - "@radix-ui/react-arrow": "1.1.7", - "@radix-ui/react-compose-refs": "1.1.2", - "@radix-ui/react-context": "1.1.2", - "@radix-ui/react-primitive": "2.1.3", - "@radix-ui/react-use-callback-ref": "1.1.1", - "@radix-ui/react-use-layout-effect": "1.1.1", - "@radix-ui/react-use-rect": "1.1.1", - "@radix-ui/react-use-size": "1.1.1", - "@radix-ui/rect": "1.1.1" - }, - "peerDependencies": { - "@types/react": "*", - "@types/react-dom": "*", - "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", - "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" - }, - "peerDependenciesMeta": { - "@types/react": { - "optional": true - }, - "@types/react-dom": { - "optional": true - } - } - }, - "node_modules/@radix-ui/react-tooltip/node_modules/@radix-ui/react-portal": { - "version": "1.1.9", - "resolved": "https://registry.npmjs.org/@radix-ui/react-portal/-/react-portal-1.1.9.tgz", - "integrity": "sha512-bpIxvq03if6UNwXZ+HTK71JLh4APvnXntDc6XOX8UVq4XQOVl7lwok0AvIl+b8zgCw3fSaVTZMpAPPagXbKmHQ==", - "license": "MIT", - "dependencies": { - "@radix-ui/react-primitive": "2.1.3", - "@radix-ui/react-use-layout-effect": "1.1.1" - }, - "peerDependencies": { - "@types/react": "*", - "@types/react-dom": "*", - "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", - "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" - }, - "peerDependenciesMeta": { - "@types/react": { - "optional": true - }, - "@types/react-dom": { - "optional": true - } - } - }, "node_modules/@radix-ui/react-tooltip/node_modules/@radix-ui/react-presence": { "version": "1.1.5", "resolved": "https://registry.npmjs.org/@radix-ui/react-presence/-/react-presence-1.1.5.tgz", @@ -2981,21 +4126,6 @@ } } }, - "node_modules/@radix-ui/react-tooltip/node_modules/@radix-ui/react-use-callback-ref": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/@radix-ui/react-use-callback-ref/-/react-use-callback-ref-1.1.1.tgz", - "integrity": "sha512-FkBMwD+qbGQeMu1cOHnuGB6x4yzPjho8ap5WtbEJ26umhgqVXbhekKUQO+hZEL1vU92a3wHwdp0HAcqAUF5iDg==", - "license": "MIT", - "peerDependencies": { - "@types/react": "*", - "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" - }, - "peerDependenciesMeta": { - "@types/react": { - "optional": true - } - } - }, "node_modules/@radix-ui/react-tooltip/node_modules/@radix-ui/react-use-controllable-state": { "version": "1.2.2", "resolved": "https://registry.npmjs.org/@radix-ui/react-use-controllable-state/-/react-use-controllable-state-1.2.2.tgz", @@ -3015,24 +4145,6 @@ } } }, - "node_modules/@radix-ui/react-tooltip/node_modules/@radix-ui/react-use-escape-keydown": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/@radix-ui/react-use-escape-keydown/-/react-use-escape-keydown-1.1.1.tgz", - "integrity": "sha512-Il0+boE7w/XebUHyBjroE+DbByORGR9KKmITzbR7MyQ4akpORYP/ZmbhAr0DG7RmmBqoOnZdy2QlvajJ2QA59g==", - "license": "MIT", - "dependencies": { - "@radix-ui/react-use-callback-ref": "1.1.1" - }, - "peerDependencies": { - "@types/react": "*", - "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" - }, - "peerDependenciesMeta": { - "@types/react": { - "optional": true - } - } - }, "node_modules/@radix-ui/react-tooltip/node_modules/@radix-ui/react-use-layout-effect": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/@radix-ui/react-use-layout-effect/-/react-use-layout-effect-1.1.1.tgz", @@ -3048,71 +4160,6 @@ } } }, - "node_modules/@radix-ui/react-tooltip/node_modules/@radix-ui/react-use-rect": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/@radix-ui/react-use-rect/-/react-use-rect-1.1.1.tgz", - "integrity": "sha512-QTYuDesS0VtuHNNvMh+CjlKJ4LJickCMUAqjlE3+j8w+RlRpwyX3apEQKGFzbZGdo7XNG1tXa+bQqIE7HIXT2w==", - "license": "MIT", - "dependencies": { - "@radix-ui/rect": "1.1.1" - }, - "peerDependencies": { - "@types/react": "*", - "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" - }, - "peerDependenciesMeta": { - "@types/react": { - "optional": true - } - } - }, - "node_modules/@radix-ui/react-tooltip/node_modules/@radix-ui/react-use-size": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/@radix-ui/react-use-size/-/react-use-size-1.1.1.tgz", - "integrity": "sha512-ewrXRDTAqAXlkl6t/fkXWNAhFX9I+CkKlw6zjEwk86RSPKwZr3xpBRso655aqYafwtnbpHLj6toFzmd6xdVptQ==", - "license": "MIT", - "dependencies": { - "@radix-ui/react-use-layout-effect": "1.1.1" - }, - "peerDependencies": { - "@types/react": "*", - "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" - }, - "peerDependenciesMeta": { - "@types/react": { - "optional": true - } - } - }, - "node_modules/@radix-ui/react-tooltip/node_modules/@radix-ui/react-visually-hidden": { - "version": "1.2.3", - "resolved": "https://registry.npmjs.org/@radix-ui/react-visually-hidden/-/react-visually-hidden-1.2.3.tgz", - "integrity": "sha512-pzJq12tEaaIhqjbzpCuv/OypJY/BPavOofm+dbab+MHLajy277+1lLm6JFcGgF5eskJ6mquGirhXY2GD/8u8Ug==", - "license": "MIT", - "dependencies": { - "@radix-ui/react-primitive": "2.1.3" - }, - "peerDependencies": { - "@types/react": "*", - "@types/react-dom": "*", - "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", - "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" - }, - "peerDependenciesMeta": { - "@types/react": { - "optional": true - }, - "@types/react-dom": { - "optional": true - } - } - }, - "node_modules/@radix-ui/react-tooltip/node_modules/@radix-ui/rect": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/@radix-ui/rect/-/rect-1.1.1.tgz", - "integrity": "sha512-HPwpGIzkl28mWyZqG52jiqDJ12waP11Pa1lGoiyUkIEuMLBP0oeK/C89esbXrxsky5we7dfd8U58nm0SgAWpVw==", - "license": "MIT" - }, "node_modules/@radix-ui/react-use-callback-ref": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/@radix-ui/react-use-callback-ref/-/react-use-callback-ref-1.1.0.tgz", @@ -3180,12 +4227,12 @@ } }, "node_modules/@radix-ui/react-use-escape-keydown": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/@radix-ui/react-use-escape-keydown/-/react-use-escape-keydown-1.1.0.tgz", - "integrity": "sha512-L7vwWlR1kTTQ3oh7g1O0CBF3YCyyTj8NmhLR+phShpyA50HCfBFKVJTpshm9PzLiKmehsrQzTYTpX9HvmC9rhw==", + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-escape-keydown/-/react-use-escape-keydown-1.1.1.tgz", + "integrity": "sha512-Il0+boE7w/XebUHyBjroE+DbByORGR9KKmITzbR7MyQ4akpORYP/ZmbhAr0DG7RmmBqoOnZdy2QlvajJ2QA59g==", "license": "MIT", "dependencies": { - "@radix-ui/react-use-callback-ref": "1.1.0" + "@radix-ui/react-use-callback-ref": "1.1.1" }, "peerDependencies": { "@types/react": "*", @@ -3197,6 +4244,21 @@ } } }, + "node_modules/@radix-ui/react-use-escape-keydown/node_modules/@radix-ui/react-use-callback-ref": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-callback-ref/-/react-use-callback-ref-1.1.1.tgz", + "integrity": "sha512-FkBMwD+qbGQeMu1cOHnuGB6x4yzPjho8ap5WtbEJ26umhgqVXbhekKUQO+hZEL1vU92a3wHwdp0HAcqAUF5iDg==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, "node_modules/@radix-ui/react-use-layout-effect": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/@radix-ui/react-use-layout-effect/-/react-use-layout-effect-1.1.0.tgz", @@ -3228,12 +4290,12 @@ } }, "node_modules/@radix-ui/react-use-rect": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/@radix-ui/react-use-rect/-/react-use-rect-1.1.0.tgz", - "integrity": "sha512-0Fmkebhr6PiseyZlYAOtLS+nb7jLmpqTrJyv61Pe68MKYW6OWdRE2kI70TaYY27u7H0lajqM3hSMMLFq18Z7nQ==", + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-rect/-/react-use-rect-1.1.1.tgz", + "integrity": "sha512-QTYuDesS0VtuHNNvMh+CjlKJ4LJickCMUAqjlE3+j8w+RlRpwyX3apEQKGFzbZGdo7XNG1tXa+bQqIE7HIXT2w==", "license": "MIT", "dependencies": { - "@radix-ui/rect": "1.1.0" + "@radix-ui/rect": "1.1.1" }, "peerDependencies": { "@types/react": "*", @@ -3264,12 +4326,35 @@ } }, "node_modules/@radix-ui/react-visually-hidden": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/@radix-ui/react-visually-hidden/-/react-visually-hidden-1.1.2.tgz", - "integrity": "sha512-1SzA4ns2M1aRlvxErqhLHsBHoS5eI5UUcI2awAMgGUp4LoaoWOKYmvqDY2s/tltuPkh3Yk77YF/r3IRj+Amx4Q==", + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-visually-hidden/-/react-visually-hidden-1.2.3.tgz", + "integrity": "sha512-pzJq12tEaaIhqjbzpCuv/OypJY/BPavOofm+dbab+MHLajy277+1lLm6JFcGgF5eskJ6mquGirhXY2GD/8u8Ug==", "license": "MIT", "dependencies": { - "@radix-ui/react-primitive": "2.0.2" + "@radix-ui/react-primitive": "2.1.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-visually-hidden/node_modules/@radix-ui/react-primitive": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.3.tgz", + "integrity": "sha512-m9gTwRkhy2lvCPe6QJp4d3G1TYEUHn/FzJUtq9MjH46an1wJU+GdoGC5VLof8RX8Ft/DlpshApkhswDLZzHIcQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-slot": "1.2.3" }, "peerDependencies": { "@types/react": "*", @@ -3287,11 +4372,20 @@ } }, "node_modules/@radix-ui/rect": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/@radix-ui/rect/-/rect-1.1.0.tgz", - "integrity": "sha512-A9+lCBZoaMJlVKcRBz2YByCG+Cp2t6nAnMnNba+XiWxnj6r4JUFqfsgwocMBZU9LPtdxC6wB56ySYpc7LQIoJg==", + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/rect/-/rect-1.1.1.tgz", + "integrity": "sha512-HPwpGIzkl28mWyZqG52jiqDJ12waP11Pa1lGoiyUkIEuMLBP0oeK/C89esbXrxsky5we7dfd8U58nm0SgAWpVw==", "license": "MIT" }, + "node_modules/@react-icons/all-files": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/@react-icons/all-files/-/all-files-4.1.0.tgz", + "integrity": "sha512-hxBI2UOuVaI3O/BhQfhtb4kcGn9ft12RWAFVMUeNjqqhLsHvFtzIkFaptBJpFDANTKoDfdVoHTKZDlwKCACbMQ==", + "license": "MIT", + "peerDependencies": { + "react": "*" + } + }, "node_modules/@remix-run/router": { "version": "1.23.2", "resolved": "https://registry.npmjs.org/@remix-run/router/-/router-1.23.2.tgz", @@ -3301,6 +4395,142 @@ "node": ">=14.0.0" } }, + "node_modules/@rjsf/core": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/@rjsf/core/-/core-6.3.1.tgz", + "integrity": "sha512-LTjFz5Fk3FlbgFPJ+OJi1JdWJyiap9dSpx8W6u7JHNB7K5VbwzJe8gIU45XWLHzWFGDHKPm89VrUzjOs07TPtg==", + "license": "Apache-2.0", + "peer": true, + "dependencies": { + "lodash": "^4.17.23", + "lodash-es": "^4.17.23", + "markdown-to-jsx": "^8.0.0", + "prop-types": "^15.8.1" + }, + "engines": { + "node": ">=20" + }, + "peerDependencies": { + "@rjsf/utils": "^6.3.x", + "react": ">=18" + } + }, + "node_modules/@rjsf/shadcn": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/@rjsf/shadcn/-/shadcn-6.3.1.tgz", + "integrity": "sha512-9v+BZ5ip2fdlYRYMPlkNzrhHhZmyrdConPLbHjLN+wVDTeIPZW8IjeV5C/diNqFpS3wm223vW5zOOE5eWuhi/g==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-checkbox": "^1.3.3", + "@radix-ui/react-dialog": "^1.1.15", + "@radix-ui/react-icons": "^1.3.2", + "@radix-ui/react-label": "^2.1.8", + "@radix-ui/react-popover": "^1.1.15", + "@radix-ui/react-radio-group": "^1.3.8", + "@radix-ui/react-select": "^2.2.6", + "@radix-ui/react-separator": "^1.1.8", + "@radix-ui/react-slider": "^1.3.6", + "@radix-ui/react-slot": "^1.2.0", + "@react-icons/all-files": "^4.1.0", + "class-variance-authority": "^0.7.1", + "clsx": "^2.1.1", + "cmdk": "^1.1.1", + "lodash": "^4.17.23", + "lodash-es": "^4.17.23", + "lucide-react": "^0.548.0", + "tailwind-merge": "^3.4.0", + "tailwindcss-animate": "^1.0.7", + "uuid": "^13.0.0" + }, + "engines": { + "node": ">=20" + }, + "peerDependencies": { + "@rjsf/core": "^6.3.x", + "@rjsf/utils": "^6.3.x", + "react": ">=18" + } + }, + "node_modules/@rjsf/shadcn/node_modules/lucide-react": { + "version": "0.548.0", + "resolved": "https://registry.npmjs.org/lucide-react/-/lucide-react-0.548.0.tgz", + "integrity": "sha512-63b16z63jM9yc1MwxajHeuu0FRZFsDtljtDjYm26Kd86UQ5HQzu9ksEtoUUw4RBuewodw/tGFmvipePvRsKeDA==", + "license": "ISC", + "peerDependencies": { + "react": "^16.5.1 || ^17.0.0 || ^18.0.0 || ^19.0.0" + } + }, + "node_modules/@rjsf/shadcn/node_modules/tailwind-merge": { + "version": "3.4.0", + "resolved": "https://registry.npmjs.org/tailwind-merge/-/tailwind-merge-3.4.0.tgz", + "integrity": "sha512-uSaO4gnW+b3Y2aWoWfFpX62vn2sR3skfhbjsEnaBI81WD1wBLlHZe5sWf0AqjksNdYTbGBEd0UasQMT3SNV15g==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/dcastil" + } + }, + "node_modules/@rjsf/utils": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/@rjsf/utils/-/utils-6.3.1.tgz", + "integrity": "sha512-ve2KHl1ITYG8QIonnuK83/T1k/5NuxP4D1egVqP9Hz2ub28kgl0rNMwmRSxXs3WIbCcMW9g3ox+daVrbSNc4Mw==", + "license": "Apache-2.0", + "peer": true, + "dependencies": { + "@x0k/json-schema-merge": "^1.0.2", + "fast-uri": "^3.1.0", + "jsonpointer": "^5.0.1", + "lodash": "^4.17.23", + "lodash-es": "^4.17.23", + "react-is": "^18.3.1" + }, + "engines": { + "node": ">=20" + }, + "peerDependencies": { + "react": ">=18" + } + }, + "node_modules/@rjsf/validator-ajv8": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/@rjsf/validator-ajv8/-/validator-ajv8-6.3.1.tgz", + "integrity": "sha512-2RHDxBc0gBplPniau5UZj7aznpTelSBm1b3DNybok8L0NuIfmndbp9kNXgFuRvlyfsQSyYmZSBjbzeYqr0Hpcw==", + "license": "Apache-2.0", + "dependencies": { + "ajv": "^8.17.1", + "ajv-formats": "^2.1.1", + "lodash": "^4.17.23", + "lodash-es": "^4.17.23" + }, + "engines": { + "node": ">=20" + }, + "peerDependencies": { + "@rjsf/utils": "^6.3.x" + } + }, + "node_modules/@rjsf/validator-ajv8/node_modules/ajv": { + "version": "8.17.1", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.17.1.tgz", + "integrity": "sha512-B/gBuNg5SiMTrPkC+A2+cW0RszwxYmn6VYxB/inlBStS5nx6xHIt/ehKRhIMhqusl7a8LjQoZnjCs5vhwxOQ1g==", + "license": "MIT", + "dependencies": { + "fast-deep-equal": "^3.1.3", + "fast-uri": "^3.0.1", + "json-schema-traverse": "^1.0.0", + "require-from-string": "^2.0.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/@rjsf/validator-ajv8/node_modules/json-schema-traverse": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", + "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==", + "license": "MIT" + }, "node_modules/@rollup/rollup-android-arm-eabi": { "version": "4.34.9", "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.34.9.tgz", @@ -3852,11 +5082,43 @@ "integrity": "sha512-4Kh9a6B2bQciAhf7FSuMRRkUWecJgJu9nPnx3yzpsfXX/c50REIqpHY4C82bXP90qrLtXtkDxTZosYO3UpOwlA==", "dev": true }, + "node_modules/@types/debug": { + "version": "4.1.12", + "resolved": "https://registry.npmjs.org/@types/debug/-/debug-4.1.12.tgz", + "integrity": "sha512-vIChWdVG3LG1SMxEvI/AK+FWJthlrqlTu7fbrlywTkkaONwk/UAGaULXRlf8vkzFBLVm0zkMdCquhL5aOjhXPQ==", + "license": "MIT", + "dependencies": { + "@types/ms": "*" + } + }, "node_modules/@types/estree": { "version": "1.0.6", "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.6.tgz", "integrity": "sha512-AYnb1nQyY49te+VRAVgmzfcgjYS91mY5P0TKUDCLEM+gNnA+3T6rWITXRLYCpahpqSQbN5cE+gHpnPyXjHWxcw==", - "dev": true, + "license": "MIT" + }, + "node_modules/@types/estree-jsx": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/@types/estree-jsx/-/estree-jsx-1.0.5.tgz", + "integrity": "sha512-52CcUVNFyfb1A2ALocQw/Dd1BQFNmSdkuC3BkZ6iqhdMfQz7JWOFRuJFloOzjk+6WijU56m9oKXFAXc7o3Towg==", + "license": "MIT", + "dependencies": { + "@types/estree": "*" + } + }, + "node_modules/@types/hast": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/@types/hast/-/hast-3.0.4.tgz", + "integrity": "sha512-WPs+bbQw5aCj+x6laNGWLH3wviHtoCv/P3+otBhbOhJgG8qtpdAMlTCxLtsTWA7LH1Oh/bFCHsBn0TPS5m30EQ==", + "license": "MIT", + "dependencies": { + "@types/unist": "*" + } + }, + "node_modules/@types/json-schema": { + "version": "7.0.15", + "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.15.tgz", + "integrity": "sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA==", "license": "MIT" }, "node_modules/@types/lodash": { @@ -3866,73 +5128,59 @@ "dev": true, "license": "MIT" }, + "node_modules/@types/mdast": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/@types/mdast/-/mdast-4.0.4.tgz", + "integrity": "sha512-kGaNbPh1k7AFzgpud/gMdvIm5xuECykRR+JnWKQno9TAXVa6WIVCGTPvYGekIDL4uwCZQSYbUxNBSb1aUo79oA==", + "license": "MIT", + "dependencies": { + "@types/unist": "*" + } + }, + "node_modules/@types/ms": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/@types/ms/-/ms-2.1.0.tgz", + "integrity": "sha512-GsCCIZDE/p3i96vtEqx+7dBUGXrc7zeSK3wwPHIaRThS+9OhWIXRqzs4d6k1SVU8g91DrNRWxWUGhp5KXQb2VA==", + "license": "MIT" + }, "node_modules/@types/node": { "version": "20.14.10", "resolved": "https://registry.npmjs.org/@types/node/-/node-20.14.10.tgz", "integrity": "sha512-MdiXf+nDuMvY0gJKxyfZ7/6UFsETO7mGKF54MVD/ekJS6HdFtpZFBgrh6Pseu64XTb2MLyFPlbW6hj8HYRQNOQ==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "undici-types": "~5.26.4" } }, - "node_modules/@types/prop-types": { - "version": "15.7.11", - "resolved": "https://registry.npmjs.org/@types/prop-types/-/prop-types-15.7.11.tgz", - "integrity": "sha512-ga8y9v9uyeiLdpKddhxYQkxNDrfvuPrlFb0N1qnZZByvcElJaXthF1UhvCh9TLWJBEHeNtdnbysW7Y6Uq8CVng==" - }, "node_modules/@types/react": { - "version": "18.3.3", - "resolved": "https://registry.npmjs.org/@types/react/-/react-18.3.3.tgz", - "integrity": "sha512-hti/R0pS0q1/xx+TsI73XIqk26eBsISZ2R0wUijXIngRK9R/e7Xw/cXVxQK7R5JjW+SV4zGcn5hXjudkN/pLIw==", + "version": "19.2.14", + "resolved": "https://registry.npmjs.org/@types/react/-/react-19.2.14.tgz", + "integrity": "sha512-ilcTH/UniCkMdtexkoCN0bI7pMcJDvmQFPvuPvmEaYA/NSfFTAgdUSLAoVjaRJm7+6PvcM+q1zYOwS4wTYMF9w==", "license": "MIT", + "peer": true, "dependencies": { - "@types/prop-types": "*", - "csstype": "^3.0.2" + "csstype": "^3.2.2" } }, "node_modules/@types/react-dom": { - "version": "18.3.0", - "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-18.3.0.tgz", - "integrity": "sha512-EhwApuTmMBmXuFOikhQLIBUn6uFg81SwLMOAUgodJF14SOBOCMdU04gDoYi0WOJJHD144TL32z4yDqCW3dnkQg==", + "version": "19.2.3", + "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-19.2.3.tgz", + "integrity": "sha512-jp2L/eY6fn+KgVVQAOqYItbF0VY/YApe5Mz2F0aykSO8gx31bYCZyvSeYxCHKvzHG5eZjc+zyaS5BrBWya2+kQ==", "devOptional": true, - "dependencies": { - "@types/react": "*" - } - }, - "node_modules/@types/react-grid-layout": { - "version": "1.3.5", - "resolved": "https://registry.npmjs.org/@types/react-grid-layout/-/react-grid-layout-1.3.5.tgz", - "integrity": "sha512-WH/po1gcEcoR6y857yAnPGug+ZhkF4PaTUxgAbwfeSH/QOgVSakKHBXoPGad/sEznmkiaK3pqHk+etdWisoeBQ==", - "dev": true, - "dependencies": { - "@types/react": "*" - } - }, - "node_modules/@types/react-icons": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/@types/react-icons/-/react-icons-3.0.0.tgz", - "integrity": "sha512-Vefs6LkLqF61vfV7AiAqls+vpR94q67gunhMueDznG+msAkrYgRxl7gYjNem/kZ+as2l2mNChmF1jRZzzQQtMg==", - "deprecated": "This is a stub types definition. react-icons provides its own type definitions, so you do not need this installed.", - "dev": true, - "dependencies": { - "react-icons": "*" + "license": "MIT", + "peer": true, + "peerDependencies": { + "@types/react": "^19.2.0" } }, "node_modules/@types/react-reconciler": { - "version": "0.28.8", - "resolved": "https://registry.npmjs.org/@types/react-reconciler/-/react-reconciler-0.28.8.tgz", - "integrity": "sha512-SN9c4kxXZonFhbX4hJrZy37yw9e7EIxcpHCxQv5JUS18wDE5ovkQKlqQEkufdJCCMfuI9BnjUJvhYeJ9x5Ra7g==", - "dependencies": { - "@types/react": "*" - } - }, - "node_modules/@types/react-transition-group": { - "version": "4.4.10", - "resolved": "https://registry.npmjs.org/@types/react-transition-group/-/react-transition-group-4.4.10.tgz", - "integrity": "sha512-hT/+s0VQs2ojCX823m60m5f0sL5idt9SO6Tj6Dg+rdphGPIeJbJ6CxvBYkgkGKrYeDjvIpKTR38UzmtHJOGW3Q==", - "dev": true, - "dependencies": { + "version": "0.33.0", + "resolved": "https://registry.npmjs.org/@types/react-reconciler/-/react-reconciler-0.33.0.tgz", + "integrity": "sha512-HZOXsKT0tGI9LlUw2LuedXsVeB88wFa536vVL0M6vE8zN63nI+sSr1ByxmPToP5K5bukaVscyeCJcF9guVNJ1g==", + "license": "MIT", + "peerDependencies": { "@types/react": "*" } }, @@ -3955,6 +5203,12 @@ "dev": true, "license": "MIT" }, + "node_modules/@types/unist": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/@types/unist/-/unist-3.0.3.tgz", + "integrity": "sha512-ko/gIFJRv177XgZsZcBwnqJN5x/Gien8qNOn0D5bQU/zAzVf9Zt3BlcUiLqhV9y4ARk0GbT3tnUiPNgnTXzc/Q==", + "license": "MIT" + }, "node_modules/@typescript-eslint/eslint-plugin": { "version": "7.12.0", "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-7.12.0.tgz", @@ -4046,6 +5300,7 @@ "integrity": "sha512-dm/J2UDY3oV3TKius2OUZIFHsomQmpHtsV0FTh1WO8EKgHLQ1QCADUqscPgTpU+ih1e21FQSRjXckHn3txn6kQ==", "dev": true, "license": "BSD-2-Clause", + "peer": true, "dependencies": { "@typescript-eslint/scope-manager": "7.12.0", "@typescript-eslint/types": "7.12.0", @@ -4178,7 +5433,6 @@ "version": "1.2.0", "resolved": "https://registry.npmjs.org/@ungap/structured-clone/-/structured-clone-1.2.0.tgz", "integrity": "sha512-zuVdFrMJiuCDQUMCzQaD6KL28MjnqqN8XnAqiEq9PNm/hCPTSGfrXCOfwj1ow4LFb/tNymJPwsNbVePc1xFqrQ==", - "dev": true, "license": "ISC" }, "node_modules/@vitejs/plugin-react-swc": { @@ -4313,6 +5567,22 @@ "url": "https://opencollective.com/vitest" } }, + "node_modules/@x0k/json-schema-merge": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/@x0k/json-schema-merge/-/json-schema-merge-1.0.2.tgz", + "integrity": "sha512-1734qiJHNX3+cJGDMMw2yz7R+7kpbAtl5NdPs1c/0gO5kYT6s4dMbLXiIfpZNsOYhGZI3aH7FWrj4Zxz7epXNg==", + "license": "MIT", + "dependencies": { + "@types/json-schema": "^7.0.15" + } + }, + "node_modules/@yarnpkg/lockfile": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@yarnpkg/lockfile/-/lockfile-1.1.0.tgz", + "integrity": "sha512-GpSwvyXOcOOlV70vbnzjj4fW5xW/FdUF6nQEt1ENy7m4ZCczi1+/buVUPAqmGfqznsORNFzUMjctTIp8a9tuCQ==", + "dev": true, + "license": "BSD-2-Clause" + }, "node_modules/@yr/monotone-cubic-spline": { "version": "1.0.3", "resolved": "https://registry.npmjs.org/@yr/monotone-cubic-spline/-/monotone-cubic-spline-1.0.3.tgz", @@ -4323,6 +5593,7 @@ "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.11.3.tgz", "integrity": "sha512-Y9rRfJG5jcKOE0CLisYbojUjIrIEE7AGMzA/Sm4BslANhbS+cDMpgBdcPT91oJ7OuJ9hYJBx59RjbhxVnrF8Xg==", "dev": true, + "peer": true, "bin": { "acorn": "bin/acorn" }, @@ -4370,6 +5641,45 @@ "url": "https://github.com/sponsors/epoberezkin" } }, + "node_modules/ajv-formats": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/ajv-formats/-/ajv-formats-2.1.1.tgz", + "integrity": "sha512-Wx0Kx52hxE7C18hkMEggYlEifqWZtYaRgouJor+WMdPnQyEK13vgEWyVNup7SoeeoLMsr4kf5h6dOW11I15MUA==", + "license": "MIT", + "dependencies": { + "ajv": "^8.0.0" + }, + "peerDependencies": { + "ajv": "^8.0.0" + }, + "peerDependenciesMeta": { + "ajv": { + "optional": true + } + } + }, + "node_modules/ajv-formats/node_modules/ajv": { + "version": "8.17.1", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.17.1.tgz", + "integrity": "sha512-B/gBuNg5SiMTrPkC+A2+cW0RszwxYmn6VYxB/inlBStS5nx6xHIt/ehKRhIMhqusl7a8LjQoZnjCs5vhwxOQ1g==", + "license": "MIT", + "dependencies": { + "fast-deep-equal": "^3.1.3", + "fast-uri": "^3.0.1", + "json-schema-traverse": "^1.0.0", + "require-from-string": "^2.0.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/ajv-formats/node_modules/json-schema-traverse": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", + "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==", + "license": "MIT" + }, "node_modules/ansi-escapes": { "version": "4.3.2", "resolved": "https://registry.npmjs.org/ansi-escapes/-/ansi-escapes-4.3.2.tgz", @@ -4445,6 +5755,7 @@ "resolved": "https://registry.npmjs.org/apexcharts/-/apexcharts-3.52.0.tgz", "integrity": "sha512-7dg0ADKs8AA89iYMZMe2sFDG0XK5PfqllKV9N+i3hKHm3vEtdhwz8AlXGm+/b0nJ6jKiaXsqci5LfVxNhtB+dA==", "license": "MIT", + "peer": true, "dependencies": { "@yr/monotone-cubic-spline": "^1.0.3", "svg.draggable.js": "^2.2.2", @@ -4571,6 +5882,16 @@ "proxy-from-env": "^1.1.0" } }, + "node_modules/bail": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/bail/-/bail-2.0.2.tgz", + "integrity": "sha512-0xO6mYd7JB2YesxDKplafRpsiOzPt9V02ddPCLbY1xYGPOX24NTyN50qnUxgCPcSoYMhKpAuBTjQoRZCAkUDRw==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, "node_modules/balanced-match": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", @@ -4645,6 +5966,7 @@ } ], "license": "MIT", + "peer": true, "dependencies": { "caniuse-lite": "^1.0.30001646", "electron-to-chromium": "^1.5.4", @@ -4683,6 +6005,25 @@ "node": ">=8" } }, + "node_modules/call-bind": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.8.tgz", + "integrity": "sha512-oKlSFMcMwpUg2ednkhQ454wfWiU/ul3CkJe/PEHcTKuiX6RpbehUiFMXu13HalGZxfUwCQzZG747YXBn1im9ww==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.0", + "es-define-property": "^1.0.0", + "get-intrinsic": "^1.2.4", + "set-function-length": "^1.2.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/call-bind-apply-helpers": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz", @@ -4696,6 +6037,23 @@ "node": ">= 0.4" } }, + "node_modules/call-bound": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/call-bound/-/call-bound-1.0.4.tgz", + "integrity": "sha512-+ys997U96po4Kx/ABpBCqhA9EuxJaQWDQg7295H4hBphv3IZg0boBKuwYpt4YXp6MZ5AmZQnU/tyMTlRpaSejg==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.2", + "get-intrinsic": "^1.3.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/callsites": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", @@ -4735,6 +6093,16 @@ ], "license": "CC-BY-4.0" }, + "node_modules/ccount": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/ccount/-/ccount-2.0.1.tgz", + "integrity": "sha512-eyrF0jiFpY+3drT6383f1qhkbGsLSifNAjA61IUjZjmLCWjItY6LB9ft9YhoDgwfmclB2zhu51Lc7+95b8NRAg==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, "node_modules/chai": { "version": "5.2.0", "resolved": "https://registry.npmjs.org/chai/-/chai-5.2.0.tgz", @@ -4768,6 +6136,46 @@ "url": "https://github.com/chalk/chalk?sponsor=1" } }, + "node_modules/character-entities": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/character-entities/-/character-entities-2.0.2.tgz", + "integrity": "sha512-shx7oQ0Awen/BRIdkjkvz54PnEEI/EjwXDSIZp86/KKdbafHh1Df/RYGBhn4hbe2+uKC9FnT5UCEdyPz3ai9hQ==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/character-entities-html4": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/character-entities-html4/-/character-entities-html4-2.1.0.tgz", + "integrity": "sha512-1v7fgQRj6hnSwFpq1Eu0ynr/CDEw0rXo2B61qXrLNdHZmPKgb7fqS1a2JwF0rISo9q77jDI8VMEHoApn8qDoZA==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/character-entities-legacy": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/character-entities-legacy/-/character-entities-legacy-3.0.0.tgz", + "integrity": "sha512-RpPp0asT/6ufRm//AJVwpViZbGM/MkjQFxJccQRHmISF/22NBtsHqAWmL+/pmkPWoIUJdWyeVleTl1wydHATVQ==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/character-reference-invalid": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/character-reference-invalid/-/character-reference-invalid-2.0.1.tgz", + "integrity": "sha512-iBZ4F4wRbyORVsu0jPV7gXkOsGYjGHPmAyv+HiHG8gi5PtC9KI2j1+v8/tlibRvjoWX027ypmG/n0HtO5t7unw==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, "node_modules/check-error": { "version": "2.1.1", "resolved": "https://registry.npmjs.org/check-error/-/check-error-2.1.1.tgz", @@ -4815,6 +6223,22 @@ "node": ">= 6" } }, + "node_modules/ci-info": { + "version": "3.9.0", + "resolved": "https://registry.npmjs.org/ci-info/-/ci-info-3.9.0.tgz", + "integrity": "sha512-NIxF55hv4nSqQswkAeiOi1r83xy8JldOFDTWiug55KBu9Jnblncd2U6ViHmYgHf01TPZS77NJBhBMKdWj9HQMQ==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/sibiraj-s" + } + ], + "license": "MIT", + "engines": { + "node": ">=8" + } + }, "node_modules/class-variance-authority": { "version": "0.7.1", "resolved": "https://registry.npmjs.org/class-variance-authority/-/class-variance-authority-0.7.1.tgz", @@ -4877,381 +6301,19 @@ } }, "node_modules/cmdk": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/cmdk/-/cmdk-1.0.0.tgz", - "integrity": "sha512-gDzVf0a09TvoJ5jnuPvygTB77+XdOSwEmJ88L6XPFPlv7T3RxbP9jgenfylrAMD0+Le1aO0nVjQUzl2g+vjz5Q==", + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/cmdk/-/cmdk-1.1.1.tgz", + "integrity": "sha512-Vsv7kFaXm+ptHDMZ7izaRsP70GgrW9NBNGswt9OZaVBLlE0SNpDq8eu/VGXyF9r7M0azK3Wy7OlYXsuyYLFzHg==", "license": "MIT", "dependencies": { - "@radix-ui/react-dialog": "1.0.5", - "@radix-ui/react-primitive": "1.0.3" + "@radix-ui/react-compose-refs": "^1.1.1", + "@radix-ui/react-dialog": "^1.1.6", + "@radix-ui/react-id": "^1.1.0", + "@radix-ui/react-primitive": "^2.0.2" }, "peerDependencies": { - "react": "^18.0.0", - "react-dom": "^18.0.0" - } - }, - "node_modules/cmdk/node_modules/@radix-ui/primitive": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/@radix-ui/primitive/-/primitive-1.0.1.tgz", - "integrity": "sha512-yQ8oGX2GVsEYMWGxcovu1uGWPCxV5BFfeeYxqPmuAzUyLT9qmaMXSAhXpb0WrspIeqYzdJpkh2vHModJPgRIaw==", - "license": "MIT", - "dependencies": { - "@babel/runtime": "^7.13.10" - } - }, - "node_modules/cmdk/node_modules/@radix-ui/react-compose-refs": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/@radix-ui/react-compose-refs/-/react-compose-refs-1.0.1.tgz", - "integrity": "sha512-fDSBgd44FKHa1FRMU59qBMPFcl2PZE+2nmqunj+BWFyYYjnhIDWL2ItDs3rrbJDQOtzt5nIebLCQc4QRfz6LJw==", - "license": "MIT", - "dependencies": { - "@babel/runtime": "^7.13.10" - }, - "peerDependencies": { - "@types/react": "*", - "react": "^16.8 || ^17.0 || ^18.0" - }, - "peerDependenciesMeta": { - "@types/react": { - "optional": true - } - } - }, - "node_modules/cmdk/node_modules/@radix-ui/react-context": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/@radix-ui/react-context/-/react-context-1.0.1.tgz", - "integrity": "sha512-ebbrdFoYTcuZ0v4wG5tedGnp9tzcV8awzsxYph7gXUyvnNLuTIcCk1q17JEbnVhXAKG9oX3KtchwiMIAYp9NLg==", - "license": "MIT", - "dependencies": { - "@babel/runtime": "^7.13.10" - }, - "peerDependencies": { - "@types/react": "*", - "react": "^16.8 || ^17.0 || ^18.0" - }, - "peerDependenciesMeta": { - "@types/react": { - "optional": true - } - } - }, - "node_modules/cmdk/node_modules/@radix-ui/react-dialog": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/@radix-ui/react-dialog/-/react-dialog-1.0.5.tgz", - "integrity": "sha512-GjWJX/AUpB703eEBanuBnIWdIXg6NvJFCXcNlSZk4xdszCdhrJgBoUd1cGk67vFO+WdA2pfI/plOpqz/5GUP6Q==", - "license": "MIT", - "dependencies": { - "@babel/runtime": "^7.13.10", - "@radix-ui/primitive": "1.0.1", - "@radix-ui/react-compose-refs": "1.0.1", - "@radix-ui/react-context": "1.0.1", - "@radix-ui/react-dismissable-layer": "1.0.5", - "@radix-ui/react-focus-guards": "1.0.1", - "@radix-ui/react-focus-scope": "1.0.4", - "@radix-ui/react-id": "1.0.1", - "@radix-ui/react-portal": "1.0.4", - "@radix-ui/react-presence": "1.0.1", - "@radix-ui/react-primitive": "1.0.3", - "@radix-ui/react-slot": "1.0.2", - "@radix-ui/react-use-controllable-state": "1.0.1", - "aria-hidden": "^1.1.1", - "react-remove-scroll": "2.5.5" - }, - "peerDependencies": { - "@types/react": "*", - "@types/react-dom": "*", - "react": "^16.8 || ^17.0 || ^18.0", - "react-dom": "^16.8 || ^17.0 || ^18.0" - }, - "peerDependenciesMeta": { - "@types/react": { - "optional": true - }, - "@types/react-dom": { - "optional": true - } - } - }, - "node_modules/cmdk/node_modules/@radix-ui/react-dismissable-layer": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/@radix-ui/react-dismissable-layer/-/react-dismissable-layer-1.0.5.tgz", - "integrity": "sha512-aJeDjQhywg9LBu2t/At58hCvr7pEm0o2Ke1x33B+MhjNmmZ17sy4KImo0KPLgsnc/zN7GPdce8Cnn0SWvwZO7g==", - "license": "MIT", - "dependencies": { - "@babel/runtime": "^7.13.10", - "@radix-ui/primitive": "1.0.1", - "@radix-ui/react-compose-refs": "1.0.1", - "@radix-ui/react-primitive": "1.0.3", - "@radix-ui/react-use-callback-ref": "1.0.1", - "@radix-ui/react-use-escape-keydown": "1.0.3" - }, - "peerDependencies": { - "@types/react": "*", - "@types/react-dom": "*", - "react": "^16.8 || ^17.0 || ^18.0", - "react-dom": "^16.8 || ^17.0 || ^18.0" - }, - "peerDependenciesMeta": { - "@types/react": { - "optional": true - }, - "@types/react-dom": { - "optional": true - } - } - }, - "node_modules/cmdk/node_modules/@radix-ui/react-focus-guards": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/@radix-ui/react-focus-guards/-/react-focus-guards-1.0.1.tgz", - "integrity": "sha512-Rect2dWbQ8waGzhMavsIbmSVCgYxkXLxxR3ZvCX79JOglzdEy4JXMb98lq4hPxUbLr77nP0UOGf4rcMU+s1pUA==", - "license": "MIT", - "dependencies": { - "@babel/runtime": "^7.13.10" - }, - "peerDependencies": { - "@types/react": "*", - "react": "^16.8 || ^17.0 || ^18.0" - }, - "peerDependenciesMeta": { - "@types/react": { - "optional": true - } - } - }, - "node_modules/cmdk/node_modules/@radix-ui/react-focus-scope": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/@radix-ui/react-focus-scope/-/react-focus-scope-1.0.4.tgz", - "integrity": "sha512-sL04Mgvf+FmyvZeYfNu1EPAaaxD+aw7cYeIB9L9Fvq8+urhltTRaEo5ysKOpHuKPclsZcSUMKlN05x4u+CINpA==", - "license": "MIT", - "dependencies": { - "@babel/runtime": "^7.13.10", - "@radix-ui/react-compose-refs": "1.0.1", - "@radix-ui/react-primitive": "1.0.3", - "@radix-ui/react-use-callback-ref": "1.0.1" - }, - "peerDependencies": { - "@types/react": "*", - "@types/react-dom": "*", - "react": "^16.8 || ^17.0 || ^18.0", - "react-dom": "^16.8 || ^17.0 || ^18.0" - }, - "peerDependenciesMeta": { - "@types/react": { - "optional": true - }, - "@types/react-dom": { - "optional": true - } - } - }, - "node_modules/cmdk/node_modules/@radix-ui/react-id": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/@radix-ui/react-id/-/react-id-1.0.1.tgz", - "integrity": "sha512-tI7sT/kqYp8p96yGWY1OAnLHrqDgzHefRBKQ2YAkBS5ja7QLcZ9Z/uY7bEjPUatf8RomoXM8/1sMj1IJaE5UzQ==", - "license": "MIT", - "dependencies": { - "@babel/runtime": "^7.13.10", - "@radix-ui/react-use-layout-effect": "1.0.1" - }, - "peerDependencies": { - "@types/react": "*", - "react": "^16.8 || ^17.0 || ^18.0" - }, - "peerDependenciesMeta": { - "@types/react": { - "optional": true - } - } - }, - "node_modules/cmdk/node_modules/@radix-ui/react-portal": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/@radix-ui/react-portal/-/react-portal-1.0.4.tgz", - "integrity": "sha512-Qki+C/EuGUVCQTOTD5vzJzJuMUlewbzuKyUy+/iHM2uwGiru9gZeBJtHAPKAEkB5KWGi9mP/CHKcY0wt1aW45Q==", - "license": "MIT", - "dependencies": { - "@babel/runtime": "^7.13.10", - "@radix-ui/react-primitive": "1.0.3" - }, - "peerDependencies": { - "@types/react": "*", - "@types/react-dom": "*", - "react": "^16.8 || ^17.0 || ^18.0", - "react-dom": "^16.8 || ^17.0 || ^18.0" - }, - "peerDependenciesMeta": { - "@types/react": { - "optional": true - }, - "@types/react-dom": { - "optional": true - } - } - }, - "node_modules/cmdk/node_modules/@radix-ui/react-presence": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/@radix-ui/react-presence/-/react-presence-1.0.1.tgz", - "integrity": "sha512-UXLW4UAbIY5ZjcvzjfRFo5gxva8QirC9hF7wRE4U5gz+TP0DbRk+//qyuAQ1McDxBt1xNMBTaciFGvEmJvAZCg==", - "license": "MIT", - "dependencies": { - "@babel/runtime": "^7.13.10", - "@radix-ui/react-compose-refs": "1.0.1", - "@radix-ui/react-use-layout-effect": "1.0.1" - }, - "peerDependencies": { - "@types/react": "*", - "@types/react-dom": "*", - "react": "^16.8 || ^17.0 || ^18.0", - "react-dom": "^16.8 || ^17.0 || ^18.0" - }, - "peerDependenciesMeta": { - "@types/react": { - "optional": true - }, - "@types/react-dom": { - "optional": true - } - } - }, - "node_modules/cmdk/node_modules/@radix-ui/react-primitive": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-1.0.3.tgz", - "integrity": "sha512-yi58uVyoAcK/Nq1inRY56ZSjKypBNKTa/1mcL8qdl6oJeEaDbOldlzrGn7P6Q3Id5d+SYNGc5AJgc4vGhjs5+g==", - "license": "MIT", - "dependencies": { - "@babel/runtime": "^7.13.10", - "@radix-ui/react-slot": "1.0.2" - }, - "peerDependencies": { - "@types/react": "*", - "@types/react-dom": "*", - "react": "^16.8 || ^17.0 || ^18.0", - "react-dom": "^16.8 || ^17.0 || ^18.0" - }, - "peerDependenciesMeta": { - "@types/react": { - "optional": true - }, - "@types/react-dom": { - "optional": true - } - } - }, - "node_modules/cmdk/node_modules/@radix-ui/react-slot": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.0.2.tgz", - "integrity": "sha512-YeTpuq4deV+6DusvVUW4ivBgnkHwECUu0BiN43L5UCDFgdhsRUWAghhTF5MbvNTPzmiFOx90asDSUjWuCNapwg==", - "license": "MIT", - "dependencies": { - "@babel/runtime": "^7.13.10", - "@radix-ui/react-compose-refs": "1.0.1" - }, - "peerDependencies": { - "@types/react": "*", - "react": "^16.8 || ^17.0 || ^18.0" - }, - "peerDependenciesMeta": { - "@types/react": { - "optional": true - } - } - }, - "node_modules/cmdk/node_modules/@radix-ui/react-use-callback-ref": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/@radix-ui/react-use-callback-ref/-/react-use-callback-ref-1.0.1.tgz", - "integrity": "sha512-D94LjX4Sp0xJFVaoQOd3OO9k7tpBYNOXdVhkltUbGv2Qb9OXdrg/CpsjlZv7ia14Sylv398LswWBVVu5nqKzAQ==", - "license": "MIT", - "dependencies": { - "@babel/runtime": "^7.13.10" - }, - "peerDependencies": { - "@types/react": "*", - "react": "^16.8 || ^17.0 || ^18.0" - }, - "peerDependenciesMeta": { - "@types/react": { - "optional": true - } - } - }, - "node_modules/cmdk/node_modules/@radix-ui/react-use-controllable-state": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/@radix-ui/react-use-controllable-state/-/react-use-controllable-state-1.0.1.tgz", - "integrity": "sha512-Svl5GY5FQeN758fWKrjM6Qb7asvXeiZltlT4U2gVfl8Gx5UAv2sMR0LWo8yhsIZh2oQ0eFdZ59aoOOMV7b47VA==", - "license": "MIT", - "dependencies": { - "@babel/runtime": "^7.13.10", - "@radix-ui/react-use-callback-ref": "1.0.1" - }, - "peerDependencies": { - "@types/react": "*", - "react": "^16.8 || ^17.0 || ^18.0" - }, - "peerDependenciesMeta": { - "@types/react": { - "optional": true - } - } - }, - "node_modules/cmdk/node_modules/@radix-ui/react-use-escape-keydown": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/@radix-ui/react-use-escape-keydown/-/react-use-escape-keydown-1.0.3.tgz", - "integrity": "sha512-vyL82j40hcFicA+M4Ex7hVkB9vHgSse1ZWomAqV2Je3RleKGO5iM8KMOEtfoSB0PnIelMd2lATjTGMYqN5ylTg==", - "license": "MIT", - "dependencies": { - "@babel/runtime": "^7.13.10", - "@radix-ui/react-use-callback-ref": "1.0.1" - }, - "peerDependencies": { - "@types/react": "*", - "react": "^16.8 || ^17.0 || ^18.0" - }, - "peerDependenciesMeta": { - "@types/react": { - "optional": true - } - } - }, - "node_modules/cmdk/node_modules/@radix-ui/react-use-layout-effect": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/@radix-ui/react-use-layout-effect/-/react-use-layout-effect-1.0.1.tgz", - "integrity": "sha512-v/5RegiJWYdoCvMnITBkNNx6bCj20fiaJnWtRkU18yITptraXjffz5Qbn05uOiQnOvi+dbkznkoaMltz1GnszQ==", - "license": "MIT", - "dependencies": { - "@babel/runtime": "^7.13.10" - }, - "peerDependencies": { - "@types/react": "*", - "react": "^16.8 || ^17.0 || ^18.0" - }, - "peerDependenciesMeta": { - "@types/react": { - "optional": true - } - } - }, - "node_modules/cmdk/node_modules/react-remove-scroll": { - "version": "2.5.5", - "resolved": "https://registry.npmjs.org/react-remove-scroll/-/react-remove-scroll-2.5.5.tgz", - "integrity": "sha512-ImKhrzJJsyXJfBZ4bzu8Bwpka14c/fQt0k+cyFp/PBhTfyDnU5hjOtM4AG/0AMyy8oKzOTR0lDgJIM7pYXI0kw==", - "license": "MIT", - "dependencies": { - "react-remove-scroll-bar": "^2.3.3", - "react-style-singleton": "^2.2.1", - "tslib": "^2.1.0", - "use-callback-ref": "^1.3.0", - "use-sidecar": "^1.1.2" - }, - "engines": { - "node": ">=10" - }, - "peerDependencies": { - "@types/react": "^16.8.0 || ^17.0.0 || ^18.0.0", - "react": "^16.8.0 || ^17.0.0 || ^18.0.0" - }, - "peerDependenciesMeta": { - "@types/react": { - "optional": true - } + "react": "^18 || ^19 || ^19.0.0-rc", + "react-dom": "^18 || ^19 || ^19.0.0-rc" } }, "node_modules/color-convert": { @@ -5283,6 +6345,16 @@ "node": ">= 0.8" } }, + "node_modules/comma-separated-tokens": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/comma-separated-tokens/-/comma-separated-tokens-2.0.3.tgz", + "integrity": "sha512-Fu4hJdvzeylCfQPp9SGWidpzrMs7tTrlu6Vb8XGaRGck8QSNZJJp538Wrb60Lax4fPwR64ViY468OIUTbRlGZg==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, "node_modules/commander": { "version": "4.1.1", "resolved": "https://registry.npmjs.org/commander/-/commander-4.1.1.tgz", @@ -5372,9 +6444,10 @@ } }, "node_modules/csstype": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.1.2.tgz", - "integrity": "sha512-I7K1Uu0MBPzaFKg4nI5Q7Vs2t+3gWWW648spaF+Rg7pI9ds18Ugn+lvg4SHczUdKlHI5LWBXyqfS8+DufyBsgQ==" + "version": "3.2.3", + "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.2.3.tgz", + "integrity": "sha512-z1HGKcYy2xA8AGQfwrn0PAy+PB7X/GSj3UVJW9qKyn43xWa+gl5nXmU4qqLMRzWVLFC8KusUX8T/0kCiOYpAIQ==", + "license": "MIT" }, "node_modules/data-urls": { "version": "5.0.0", @@ -5394,6 +6467,7 @@ "resolved": "https://registry.npmjs.org/date-fns/-/date-fns-3.6.0.tgz", "integrity": "sha512-fRHTG8g/Gif+kSh50gaGEdToemgfj74aRX3swtiouboip5JDLAyDE9F11nHMIcvOaXeOC6D7SpNhi7uFyB7Uww==", "license": "MIT", + "peer": true, "funding": { "type": "github", "url": "https://github.com/sponsors/kossnocorp" @@ -5418,7 +6492,6 @@ "version": "4.4.0", "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.0.tgz", "integrity": "sha512-6WTZ/IxCY/T6BALoZHaE4ctp9xm+Z5kY/pzYaCHRFeyVhojxlrm+46y68HA6hr0TcwEssoxNiDEUJQjfPZ/RYA==", - "dev": true, "license": "MIT", "dependencies": { "ms": "^2.1.3" @@ -5438,6 +6511,19 @@ "integrity": "sha512-VBBaLc1MgL5XpzgIP7ny5Z6Nx3UrRkIViUkPUdtl9aya5amy3De1gsUUSB1g3+3sExYNjCAsAznmukyxCb1GRA==", "dev": true }, + "node_modules/decode-named-character-reference": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/decode-named-character-reference/-/decode-named-character-reference-1.3.0.tgz", + "integrity": "sha512-GtpQYB283KrPp6nRw50q3U9/VfOutZOe103qlN7BPP6Ad27xYnOIWv4lPzo8HCAL+mMZofJ9KEy30fq6MfaK6Q==", + "license": "MIT", + "dependencies": { + "character-entities": "^2.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, "node_modules/deep-eql": { "version": "5.0.2", "resolved": "https://registry.npmjs.org/deep-eql/-/deep-eql-5.0.2.tgz", @@ -5538,6 +6624,24 @@ "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==", "dev": true }, + "node_modules/define-data-property": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/define-data-property/-/define-data-property-1.1.4.tgz", + "integrity": "sha512-rBMvIzlpA8v6E+SJZoo++HAYqsLrkg7MSfIinMPFhmkorw7X+dOXVJQs+QT69zGkzMyfDnIMN2Wid1+NbL3T+A==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-define-property": "^1.0.0", + "es-errors": "^1.3.0", + "gopd": "^1.0.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/define-lazy-prop": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/define-lazy-prop/-/define-lazy-prop-3.0.0.tgz", @@ -5572,6 +6676,19 @@ "integrity": "sha512-ypdmJU/TbBby2Dxibuv7ZLW3Bs1QEmM7nHjEANfohJLvE0XVujisn1qPJcZxg+qDucsr+bP6fLD1rPS3AhJ7EQ==", "license": "MIT" }, + "node_modules/devlop": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/devlop/-/devlop-1.1.0.tgz", + "integrity": "sha512-RWmIqhcFf1lRYBvNmr7qTNuyCt/7/ns2jbpp1+PalgE/rDQcBT0fioSMUpJ93irlUhC5hrg4cYqe6U+0ImW0rA==", + "license": "MIT", + "dependencies": { + "dequal": "^2.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, "node_modules/didyoumean": { "version": "1.2.2", "resolved": "https://registry.npmjs.org/didyoumean/-/didyoumean-1.2.2.tgz", @@ -5623,15 +6740,6 @@ "integrity": "sha512-7ZgogeTnjuHbo+ct10G9Ffp0mif17idi0IyWNVA/wcwcm7NPOD/WEHVP3n7n3MhXqxoIYm8d6MuZohYWIZ4T3w==", "dev": true }, - "node_modules/dom-helpers": { - "version": "5.2.1", - "resolved": "https://registry.npmjs.org/dom-helpers/-/dom-helpers-5.2.1.tgz", - "integrity": "sha512-nRCa7CK3VTrM2NmGkIy4cbK7IZlgBE/PYMn55rrXefr5xXDP0LdtfPnblFDoVdcAfslJ7or6iqAUnx0CCGIWQA==", - "dependencies": { - "@babel/runtime": "^7.8.7", - "csstype": "^3.0.2" - } - }, "node_modules/dunder-proto": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz", @@ -5660,34 +6768,6 @@ "dev": true, "license": "ISC" }, - "node_modules/embla-carousel": { - "version": "8.2.0", - "resolved": "https://registry.npmjs.org/embla-carousel/-/embla-carousel-8.2.0.tgz", - "integrity": "sha512-rf2GIX8rab9E6ZZN0Uhz05746qu2KrDje9IfFyHzjwxLwhvGjUt6y9+uaY1Sf+B0OPSa3sgas7BE2hWZCtopTA==", - "license": "MIT" - }, - "node_modules/embla-carousel-react": { - "version": "8.2.0", - "resolved": "https://registry.npmjs.org/embla-carousel-react/-/embla-carousel-react-8.2.0.tgz", - "integrity": "sha512-dWqbmaEBQjeAcy/EKrcAX37beVr0ubXuHPuLZkx27z58V1FIvRbbMb4/c3cLZx0PAv/ofngX2QFrwUB+62SPnw==", - "license": "MIT", - "dependencies": { - "embla-carousel": "8.2.0", - "embla-carousel-reactive-utils": "8.2.0" - }, - "peerDependencies": { - "react": "^16.8.0 || ^17.0.1 || ^18.0.0" - } - }, - "node_modules/embla-carousel-reactive-utils": { - "version": "8.2.0", - "resolved": "https://registry.npmjs.org/embla-carousel-reactive-utils/-/embla-carousel-reactive-utils-8.2.0.tgz", - "integrity": "sha512-ZdaPNgMydkPBiDRUv+wRIz3hpZJ3LKrTyz+XWi286qlwPyZFJDjbzPBiXnC3czF9N/nsabSc7LTRvGauUzwKEg==", - "license": "MIT", - "peerDependencies": { - "embla-carousel": "8.2.0" - } - }, "node_modules/emoji-regex": { "version": "8.0.0", "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", @@ -5827,6 +6907,7 @@ "integrity": "sha512-dZ6+mexnaTIbSBZWgou51U6OmzIhYM2VcNdtiTtI7qPNZm35Akpr0f6vtw3w1Kmn5PYo+tZVfh13WrhpS6oLqQ==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@eslint-community/eslint-utils": "^4.2.0", "@eslint-community/regexpp": "^4.6.1", @@ -5882,6 +6963,7 @@ "resolved": "https://registry.npmjs.org/eslint-config-prettier/-/eslint-config-prettier-9.1.0.tgz", "integrity": "sha512-NSWl5BFQWEPi1j4TjVNItzYV7dZXZ+wP6I6ZhrBGpChQhZRUaElihE9uRRkcbRnNb76UMKDF3r+WTmNcGPKsqw==", "dev": true, + "peer": true, "bin": { "eslint-config-prettier": "bin/cli.js" }, @@ -5969,16 +7051,16 @@ } }, "node_modules/eslint-plugin-react-hooks": { - "version": "4.6.2", - "resolved": "https://registry.npmjs.org/eslint-plugin-react-hooks/-/eslint-plugin-react-hooks-4.6.2.tgz", - "integrity": "sha512-QzliNJq4GinDBcD8gPB5v0wh6g8q3SUi6EFF0x8N/BL9PoVs0atuGc47ozMRyOWAKdwaZ5OnbOEa3WR+dSGKuQ==", + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/eslint-plugin-react-hooks/-/eslint-plugin-react-hooks-5.2.0.tgz", + "integrity": "sha512-+f15FfK64YQwZdJNELETdn5ibXEUQmW1DZL6KXhNnc2heoy/sg9VJJeT7n8TlMWouzWqSWavFkIhHyIbIAEapg==", "dev": true, "license": "MIT", "engines": { "node": ">=10" }, "peerDependencies": { - "eslint": "^3.0.0 || ^4.0.0 || ^5.0.0 || ^6.0.0 || ^7.0.0 || ^8.0.0-0" + "eslint": "^3.0.0 || ^4.0.0 || ^5.0.0 || ^6.0.0 || ^7.0.0 || ^8.0.0-0 || ^9.0.0" } }, "node_modules/eslint-plugin-react-refresh": { @@ -6078,6 +7160,16 @@ "node": ">=4.0" } }, + "node_modules/estree-util-is-identifier-name": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/estree-util-is-identifier-name/-/estree-util-is-identifier-name-3.0.0.tgz", + "integrity": "sha512-hFtqIDZTIUZ9BXLb8y4pYGyk6+wekIivNVTcmvk8NoOh+VeRn5y6cEHzbURrWbfp1fIqdVipilzj+lfaadNZmg==", + "license": "MIT", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, "node_modules/estree-walker": { "version": "3.0.3", "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-3.0.3.tgz", @@ -6108,6 +7200,12 @@ "node": ">=12.0.0" } }, + "node_modules/extend": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz", + "integrity": "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==", + "license": "MIT" + }, "node_modules/fake-indexeddb": { "version": "6.0.0", "resolved": "https://registry.npmjs.org/fake-indexeddb/-/fake-indexeddb-6.0.0.tgz", @@ -6121,7 +7219,6 @@ "version": "3.1.3", "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==", - "dev": true, "license": "MIT" }, "node_modules/fast-diff": { @@ -6175,6 +7272,22 @@ "integrity": "sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==", "dev": true }, + "node_modules/fast-uri": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/fast-uri/-/fast-uri-3.1.0.tgz", + "integrity": "sha512-iPeeDKJSWf4IEOasVVrknXpaBV0IApz/gp7S2bb7Z4Lljbl2MGJRqInZiUrQwV16cpzw/D3S5j5Julj/gT52AA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/fastify" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/fastify" + } + ], + "license": "BSD-3-Clause" + }, "node_modules/fastq": { "version": "1.15.0", "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.15.0.tgz", @@ -6235,6 +7348,16 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/find-yarn-workspace-root": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/find-yarn-workspace-root/-/find-yarn-workspace-root-2.0.0.tgz", + "integrity": "sha512-1IMnbjt4KzsQfnhnzNd8wUEgXZ44IzZaZmnLYx7D5FZlaHt2gW20Cri8Q+E/t5tIj4+epTBub+2Zxu/vNILzqQ==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "micromatch": "^4.0.2" + } + }, "node_modules/flat-cache": { "version": "3.2.0", "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-3.2.0.tgz", @@ -6323,17 +7446,19 @@ } }, "node_modules/framer-motion": { - "version": "11.5.4", - "resolved": "https://registry.npmjs.org/framer-motion/-/framer-motion-11.5.4.tgz", - "integrity": "sha512-E+tb3/G6SO69POkdJT+3EpdMuhmtCh9EWuK4I1DnIC23L7tFPrl8vxP+LSovwaw6uUr73rUbpb4FgK011wbRJQ==", + "version": "12.35.0", + "resolved": "https://registry.npmjs.org/framer-motion/-/framer-motion-12.35.0.tgz", + "integrity": "sha512-w8hghCMQ4oq10j6aZh3U2yeEQv5K69O/seDI/41PK4HtgkLrcBovUNc0ayBC3UyyU7V1mrY2yLzvYdWJX9pGZQ==", "license": "MIT", "dependencies": { + "motion-dom": "^12.35.0", + "motion-utils": "^12.29.2", "tslib": "^2.4.0" }, "peerDependencies": { "@emotion/is-prop-valid": "*", - "react": "^18.0.0", - "react-dom": "^18.0.0" + "react": "^18.0.0 || ^19.0.0", + "react-dom": "^18.0.0 || ^19.0.0" }, "peerDependenciesMeta": { "@emotion/is-prop-valid": { @@ -6347,6 +7472,31 @@ } } }, + "node_modules/fs-extra": { + "version": "10.1.0", + "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-10.1.0.tgz", + "integrity": "sha512-oRXApq54ETRj4eMiFzGnHWGy+zo5raudjuxN0b8H7s/RU2oW0Wvsx9O0ACRN/kRq9E8Vu/ReskGB5o3ji+FzHQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "graceful-fs": "^4.2.0", + "jsonfile": "^6.0.1", + "universalify": "^2.0.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/fs-extra/node_modules/universalify": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz", + "integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 10.0.0" + } + }, "node_modules/fs.realpath": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", @@ -6508,6 +7658,13 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/graceful-fs": { + "version": "4.2.11", + "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz", + "integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==", + "dev": true, + "license": "ISC" + }, "node_modules/graphemer": { "version": "1.4.0", "resolved": "https://registry.npmjs.org/graphemer/-/graphemer-1.4.0.tgz", @@ -6523,11 +7680,6 @@ "node": "^12.22.0 || ^14.16.0 || ^16.0.0 || >=17.0.0" } }, - "node_modules/hamt_plus": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/hamt_plus/-/hamt_plus-1.0.2.tgz", - "integrity": "sha512-t2JXKaehnMb9paaYA7J0BX8QQAY8lwfQ9Gjf4pg/mk4krt+cmwmU652HOoWonf+7+EQV97ARPMhhVgU1ra2GhA==" - }, "node_modules/has-flag": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", @@ -6537,6 +7689,19 @@ "node": ">=8" } }, + "node_modules/has-property-descriptors": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/has-property-descriptors/-/has-property-descriptors-1.0.2.tgz", + "integrity": "sha512-55JNKuIW+vq4Ke1BjOTjM2YctQIvCT7GFzHwmfZPGo5wnrgkid0YQtnAleFSqumZm4az3n2BS+erby5ipJdgrg==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-define-property": "^1.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/has-symbols": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz", @@ -6576,6 +7741,46 @@ "node": ">= 0.4" } }, + "node_modules/hast-util-to-jsx-runtime": { + "version": "2.3.6", + "resolved": "https://registry.npmjs.org/hast-util-to-jsx-runtime/-/hast-util-to-jsx-runtime-2.3.6.tgz", + "integrity": "sha512-zl6s8LwNyo1P9uw+XJGvZtdFF1GdAkOg8ujOw+4Pyb76874fLps4ueHXDhXWdk6YHQ6OgUtinliG7RsYvCbbBg==", + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.0", + "@types/hast": "^3.0.0", + "@types/unist": "^3.0.0", + "comma-separated-tokens": "^2.0.0", + "devlop": "^1.0.0", + "estree-util-is-identifier-name": "^3.0.0", + "hast-util-whitespace": "^3.0.0", + "mdast-util-mdx-expression": "^2.0.0", + "mdast-util-mdx-jsx": "^3.0.0", + "mdast-util-mdxjs-esm": "^2.0.0", + "property-information": "^7.0.0", + "space-separated-tokens": "^2.0.0", + "style-to-js": "^1.0.0", + "unist-util-position": "^5.0.0", + "vfile-message": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-whitespace": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/hast-util-whitespace/-/hast-util-whitespace-3.0.0.tgz", + "integrity": "sha512-88JUN06ipLwsnv+dVn+OIYOvAuvBMy/Qoi6O7mQHxdPXpjy+Cd6xRkWwux7DKO+4sYILtLBRIKgsdpS2gQc7qw==", + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, "node_modules/headers-polyfill": { "version": "4.0.2", "resolved": "https://registry.npmjs.org/headers-polyfill/-/headers-polyfill-4.0.2.tgz", @@ -6625,6 +7830,16 @@ "void-elements": "3.1.0" } }, + "node_modules/html-url-attributes": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/html-url-attributes/-/html-url-attributes-3.0.1.tgz", + "integrity": "sha512-ol6UPyBWqsrO6EJySPz2O7ZSr856WDrEzM5zMqp+FJJLGMW35cLYmmZnl0vztAZxRUoNZJFTCohfjuIJ8I4QBQ==", + "license": "MIT", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, "node_modules/http-proxy-agent": { "version": "7.0.2", "resolved": "https://registry.npmjs.org/http-proxy-agent/-/http-proxy-agent-7.0.2.tgz", @@ -6672,6 +7887,7 @@ } ], "license": "MIT", + "peer": true, "dependencies": { "@babel/runtime": "^7.23.2" }, @@ -6766,6 +7982,36 @@ "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==" }, + "node_modules/inline-style-parser": { + "version": "0.2.7", + "resolved": "https://registry.npmjs.org/inline-style-parser/-/inline-style-parser-0.2.7.tgz", + "integrity": "sha512-Nb2ctOyNR8DqQoR0OwRG95uNWIC0C1lCgf5Naz5H6Ji72KZ8OcFZLz2P5sNgwlyoJ8Yif11oMuYs5pBQa86csA==", + "license": "MIT" + }, + "node_modules/is-alphabetical": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-alphabetical/-/is-alphabetical-2.0.1.tgz", + "integrity": "sha512-FWyyY60MeTNyeSRpkM2Iry0G9hpr7/9kD40mD/cGQEuilcZYS4okz8SN2Q6rLCJ8gbCt6fN+rC+6tMGS99LaxQ==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/is-alphanumerical": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-alphanumerical/-/is-alphanumerical-2.0.1.tgz", + "integrity": "sha512-hmbYhX/9MUMF5uh7tOXyK/n0ZvWpad5caBA17GsC6vyuCqaWliRG5K1qS9inmUhEMaOBIW7/whAnSwveW/LtZw==", + "license": "MIT", + "dependencies": { + "is-alphabetical": "^2.0.0", + "is-decimal": "^2.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, "node_modules/is-binary-path": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz", @@ -6788,6 +8034,16 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/is-decimal": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-decimal/-/is-decimal-2.0.1.tgz", + "integrity": "sha512-AAB9hiomQs5DXWcRB1rqsxGUstbRroFOPPVAomNk/3XHR5JyEZChOyTWe2oayKnsSsr/kcGqF+z6yuH6HHpN0A==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, "node_modules/is-docker": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/is-docker/-/is-docker-3.0.0.tgz", @@ -6831,6 +8087,16 @@ "node": ">=0.10.0" } }, + "node_modules/is-hexadecimal": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-hexadecimal/-/is-hexadecimal-2.0.1.tgz", + "integrity": "sha512-DgZQp241c8oO6cA1SbTEWiXeoxV42vlcJxgH+B3hi1AiqqKruZR3ZGF8In3fj4+/y/7rHvlOZLZtgJ/4ttYGZg==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, "node_modules/is-inside-container": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/is-inside-container/-/is-inside-container-1.0.0.tgz", @@ -6873,6 +8139,18 @@ "node": ">=8" } }, + "node_modules/is-plain-obj": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-4.1.0.tgz", + "integrity": "sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg==", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/is-potential-custom-element-name": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/is-potential-custom-element-name/-/is-potential-custom-element-name-1.0.1.tgz", @@ -6918,6 +8196,13 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/isarray": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-2.0.5.tgz", + "integrity": "sha512-xHjhDr3cNBK0BzdUJSPXZntQUx/mwMS5Rw4A7lPJ90XGAO6ISP/ePDNuo0vhqOZU+UD5JoodwCAAoZQd3FeAKw==", + "dev": true, + "license": "MIT" + }, "node_modules/isexe": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", @@ -6979,14 +8264,24 @@ } }, "node_modules/its-fine": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/its-fine/-/its-fine-1.1.3.tgz", - "integrity": "sha512-mncCA+yb6tuh5zK26cHqKlsSyxm4zdm4YgJpxycyx6p9fgxgK5PLu3iDVpKhzTn57Yrv3jk/r0aK0RFTT1OjFw==", + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/its-fine/-/its-fine-2.0.0.tgz", + "integrity": "sha512-KLViCmWx94zOvpLwSlsx6yOCeMhZYaxrJV87Po5k/FoZzcPSahvK5qJ7fYhS61sZi5ikmh2S3Hz55A2l3U69ng==", + "license": "MIT", "dependencies": { - "@types/react-reconciler": "^0.28.0" + "@types/react-reconciler": "^0.28.9" }, "peerDependencies": { - "react": ">=18.0" + "react": "^19.0.0" + } + }, + "node_modules/its-fine/node_modules/@types/react-reconciler": { + "version": "0.28.9", + "resolved": "https://registry.npmjs.org/@types/react-reconciler/-/react-reconciler-0.28.9.tgz", + "integrity": "sha512-HHM3nxyUZ3zAylX8ZEyrDNd2XZOnQ0D5XfunJF5FLQnZbHHYq4UWvW1QfelQNXv1ICNkwYhfxjwfnqivYB6bFg==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*" } }, "node_modules/jackspeak": { @@ -7127,6 +8422,26 @@ "dev": true, "license": "MIT" }, + "node_modules/json-stable-stringify": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/json-stable-stringify/-/json-stable-stringify-1.3.0.tgz", + "integrity": "sha512-qtYiSSFlwot9XHtF9bD9c7rwKjr+RecWT//ZnPvSmEjpV5mmPOCN4j8UjY5hbjNkOwZ/jQv3J6R1/pL7RwgMsg==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.8", + "call-bound": "^1.0.4", + "isarray": "^2.0.5", + "jsonify": "^0.0.1", + "object-keys": "^1.1.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/json-stable-stringify-without-jsonify": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz", @@ -7138,6 +8453,48 @@ "resolved": "https://registry.npmjs.org/jsonc-parser/-/jsonc-parser-3.2.0.tgz", "integrity": "sha512-gfFQZrcTc8CnKXp6Y4/CBT3fTc0OVuDofpre4aEeEpSBPV5X5v4+Vmx+8snU7RLPrNHPKSgLxGo9YuQzz20o+w==" }, + "node_modules/jsonfile": { + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.2.0.tgz", + "integrity": "sha512-FGuPw30AdOIUTRMC2OMRtQV+jkVj2cfPqSeWXv1NEAJ1qZ5zb1X6z1mFhbfOB/iy3ssJCD+3KuZ8r8C3uVFlAg==", + "dev": true, + "license": "MIT", + "dependencies": { + "universalify": "^2.0.0" + }, + "optionalDependencies": { + "graceful-fs": "^4.1.6" + } + }, + "node_modules/jsonfile/node_modules/universalify": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz", + "integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 10.0.0" + } + }, + "node_modules/jsonify": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/jsonify/-/jsonify-0.0.1.tgz", + "integrity": "sha512-2/Ki0GcmuqSrgFyelQq9M05y7PS0mEwuIzrf3f1fPqkVDVRvZrPZtVSMHxdgo8Aq0sxAOb/cr2aqqA3LeWHVPg==", + "dev": true, + "license": "Public Domain", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/jsonpointer": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/jsonpointer/-/jsonpointer-5.0.1.tgz", + "integrity": "sha512-p/nXbhSEcu3pZRdkW1OfJhpsVtW1gd4Wa1fnQc9YLiTfAjn0312eMKimbdIQzuZl9aa9xUGaRlP9T/CJE/ditQ==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, "node_modules/keyv": { "version": "4.5.4", "resolved": "https://registry.npmjs.org/keyv/-/keyv-4.5.4.tgz", @@ -7148,6 +8505,16 @@ "json-buffer": "3.0.1" } }, + "node_modules/klaw-sync": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/klaw-sync/-/klaw-sync-6.0.0.tgz", + "integrity": "sha512-nIeuVSzdCCs6TDPTqI8w1Yre34sSq7AkZ4B3sfOBbI2CgVSB4Du4aLQijFU2+lhAFCwt9+42Hel6lQNIv6AntQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "graceful-fs": "^4.1.11" + } + }, "node_modules/konva": { "version": "9.3.18", "resolved": "https://registry.npmjs.org/konva/-/konva-9.3.18.tgz", @@ -7166,7 +8533,8 @@ "url": "https://github.com/sponsors/lavrton" } ], - "license": "MIT" + "license": "MIT", + "peer": true }, "node_modules/levn": { "version": "0.4.1", @@ -7215,12 +8583,28 @@ "integrity": "sha512-LgVTMpQtIopCi79SJeDiP0TfWi5CNEc/L/aRdTh3yIvmZXTnheWpKjSZhnvMl8iXbC1tFg9gdHHDMLoV7CnG+w==", "license": "MIT" }, + "node_modules/lodash-es": { + "version": "4.17.23", + "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.23.tgz", + "integrity": "sha512-LgVTMpQtIopCi79SJeDiP0TfWi5CNEc/L/aRdTh3yIvmZXTnheWpKjSZhnvMl8iXbC1tFg9gdHHDMLoV7CnG+w==", + "license": "MIT" + }, "node_modules/lodash.merge": { "version": "4.6.2", "resolved": "https://registry.npmjs.org/lodash.merge/-/lodash.merge-4.6.2.tgz", "integrity": "sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==", "dev": true }, + "node_modules/longest-streak": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/longest-streak/-/longest-streak-3.1.0.tgz", + "integrity": "sha512-9Ri+o0JYgehTaVBBDoMqIl8GXtbWg711O3srftcHhZ0dqnETqLaoIK0x17fUw9rFSlK/0NlsKe0Ahhyl5pXE2g==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, "node_modules/loose-envify": { "version": "1.4.0", "resolved": "https://registry.npmjs.org/loose-envify/-/loose-envify-1.4.0.tgz", @@ -7293,6 +8677,33 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/markdown-table": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/markdown-table/-/markdown-table-3.0.4.tgz", + "integrity": "sha512-wiYz4+JrLyb/DqW2hkFJxP7Vd7JuTDm77fvbM8VfEQdmSMqcImWeeRbHwZjBjIFki/VaMK2BhFi7oUUZeM5bqw==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/markdown-to-jsx": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/markdown-to-jsx/-/markdown-to-jsx-8.0.0.tgz", + "integrity": "sha512-hWEaRxeCDjes1CVUQqU+Ov0mCqBqkGhLKjL98KdbwHSgEWZZSJQeGlJQatVfeZ3RaxrfTrZZ3eczl2dhp5c/pA==", + "license": "MIT", + "engines": { + "node": ">= 10" + }, + "peerDependencies": { + "react": ">= 0.14.0" + }, + "peerDependenciesMeta": { + "react": { + "optional": true + } + } + }, "node_modules/math-intrinsics": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz", @@ -7302,6 +8713,288 @@ "node": ">= 0.4" } }, + "node_modules/mdast-util-find-and-replace": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/mdast-util-find-and-replace/-/mdast-util-find-and-replace-3.0.2.tgz", + "integrity": "sha512-Tmd1Vg/m3Xz43afeNxDIhWRtFZgM2VLyaf4vSTYwudTyeuTneoL3qtWMA5jeLyz/O1vDJmmV4QuScFCA2tBPwg==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "escape-string-regexp": "^5.0.0", + "unist-util-is": "^6.0.0", + "unist-util-visit-parents": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-find-and-replace/node_modules/escape-string-regexp": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-5.0.0.tgz", + "integrity": "sha512-/veY75JbMK4j1yjvuUxuVsiS/hr/4iHs9FTT6cgTexxdE0Ly/glccBAkloH/DofkjRbZU3bnoj38mOmhkZ0lHw==", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/mdast-util-from-markdown": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/mdast-util-from-markdown/-/mdast-util-from-markdown-2.0.2.tgz", + "integrity": "sha512-uZhTV/8NBuw0WHkPTrCqDOl0zVe1BIng5ZtHoDk49ME1qqcjYmmLmOf0gELgcRMxN4w2iuIeVso5/6QymSrgmA==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "@types/unist": "^3.0.0", + "decode-named-character-reference": "^1.0.0", + "devlop": "^1.0.0", + "mdast-util-to-string": "^4.0.0", + "micromark": "^4.0.0", + "micromark-util-decode-numeric-character-reference": "^2.0.0", + "micromark-util-decode-string": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0", + "unist-util-stringify-position": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm/-/mdast-util-gfm-3.1.0.tgz", + "integrity": "sha512-0ulfdQOM3ysHhCJ1p06l0b0VKlhU0wuQs3thxZQagjcjPrlFRqY215uZGHHJan9GEAXd9MbfPjFJz+qMkVR6zQ==", + "license": "MIT", + "dependencies": { + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-gfm-autolink-literal": "^2.0.0", + "mdast-util-gfm-footnote": "^2.0.0", + "mdast-util-gfm-strikethrough": "^2.0.0", + "mdast-util-gfm-table": "^2.0.0", + "mdast-util-gfm-task-list-item": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-autolink-literal": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-autolink-literal/-/mdast-util-gfm-autolink-literal-2.0.1.tgz", + "integrity": "sha512-5HVP2MKaP6L+G6YaxPNjuL0BPrq9orG3TsrZ9YXbA3vDw/ACI4MEsnoDpn6ZNm7GnZgtAcONJyPhOP8tNJQavQ==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "ccount": "^2.0.0", + "devlop": "^1.0.0", + "mdast-util-find-and-replace": "^3.0.0", + "micromark-util-character": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-footnote": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-footnote/-/mdast-util-gfm-footnote-2.1.0.tgz", + "integrity": "sha512-sqpDWlsHn7Ac9GNZQMeUzPQSMzR6Wv0WKRNvQRg0KqHh02fpTz69Qc1QSseNX29bhz1ROIyNyxExfawVKTm1GQ==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "devlop": "^1.1.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-strikethrough": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-strikethrough/-/mdast-util-gfm-strikethrough-2.0.0.tgz", + "integrity": "sha512-mKKb915TF+OC5ptj5bJ7WFRPdYtuHv0yTRxK2tJvi+BDqbkiG7h7u/9SI89nRAYcmap2xHQL9D+QG/6wSrTtXg==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-table": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-table/-/mdast-util-gfm-table-2.0.0.tgz", + "integrity": "sha512-78UEvebzz/rJIxLvE7ZtDd/vIQ0RHv+3Mh5DR96p7cS7HsBhYIICDBCu8csTNWNO6tBWfqXPWekRuj2FNOGOZg==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "devlop": "^1.0.0", + "markdown-table": "^3.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-task-list-item": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-task-list-item/-/mdast-util-gfm-task-list-item-2.0.0.tgz", + "integrity": "sha512-IrtvNvjxC1o06taBAVJznEnkiHxLFTzgonUdy8hzFVeDun0uTjxxrRGVaNFqkU1wJR3RBPEfsxmU6jDWPofrTQ==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "devlop": "^1.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-mdx-expression": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/mdast-util-mdx-expression/-/mdast-util-mdx-expression-2.0.1.tgz", + "integrity": "sha512-J6f+9hUp+ldTZqKRSg7Vw5V6MqjATc+3E4gf3CFNcuZNWD8XdyI6zQ8GqH7f8169MM6P7hMBRDVGnn7oHB9kXQ==", + "license": "MIT", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "devlop": "^1.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-mdx-jsx": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/mdast-util-mdx-jsx/-/mdast-util-mdx-jsx-3.2.0.tgz", + "integrity": "sha512-lj/z8v0r6ZtsN/cGNNtemmmfoLAFZnjMbNyLzBafjzikOM+glrjNHPlf6lQDOTccj9n5b0PPihEBbhneMyGs1Q==", + "license": "MIT", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "@types/unist": "^3.0.0", + "ccount": "^2.0.0", + "devlop": "^1.1.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0", + "parse-entities": "^4.0.0", + "stringify-entities": "^4.0.0", + "unist-util-stringify-position": "^4.0.0", + "vfile-message": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-mdxjs-esm": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/mdast-util-mdxjs-esm/-/mdast-util-mdxjs-esm-2.0.1.tgz", + "integrity": "sha512-EcmOpxsZ96CvlP03NghtH1EsLtr0n9Tm4lPUJUBccV9RwUOneqSycg19n5HGzCf+10LozMRSObtVr3ee1WoHtg==", + "license": "MIT", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "devlop": "^1.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-phrasing": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/mdast-util-phrasing/-/mdast-util-phrasing-4.1.0.tgz", + "integrity": "sha512-TqICwyvJJpBwvGAMZjj4J2n0X8QWp21b9l0o7eXyVJ25YNWYbJDVIyD1bZXE6WtV6RmKJVYmQAKWa0zWOABz2w==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "unist-util-is": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-to-hast": { + "version": "13.2.1", + "resolved": "https://registry.npmjs.org/mdast-util-to-hast/-/mdast-util-to-hast-13.2.1.tgz", + "integrity": "sha512-cctsq2wp5vTsLIcaymblUriiTcZd0CwWtCbLvrOzYCDZoWyMNV8sZ7krj09FSnsiJi3WVsHLM4k6Dq/yaPyCXA==", + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "@ungap/structured-clone": "^1.0.0", + "devlop": "^1.0.0", + "micromark-util-sanitize-uri": "^2.0.0", + "trim-lines": "^3.0.0", + "unist-util-position": "^5.0.0", + "unist-util-visit": "^5.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-to-markdown": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/mdast-util-to-markdown/-/mdast-util-to-markdown-2.1.2.tgz", + "integrity": "sha512-xj68wMTvGXVOKonmog6LwyJKrYXZPvlwabaryTjLh9LuvovB/KAH+kvi8Gjj+7rJjsFi23nkUxRQv1KqSroMqA==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "@types/unist": "^3.0.0", + "longest-streak": "^3.0.0", + "mdast-util-phrasing": "^4.0.0", + "mdast-util-to-string": "^4.0.0", + "micromark-util-classify-character": "^2.0.0", + "micromark-util-decode-string": "^2.0.0", + "unist-util-visit": "^5.0.0", + "zwitch": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-to-string": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-to-string/-/mdast-util-to-string-4.0.0.tgz", + "integrity": "sha512-0H44vDimn51F0YwvxSJSm0eCDOJTRlmN0R1yBh4HLj9wiV1Dn0QoXGbvFAWj2hSItVTlCmBF1hqKlIyUBVFLPg==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, "node_modules/merge-stream": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/merge-stream/-/merge-stream-2.0.0.tgz", @@ -7316,6 +9009,569 @@ "node": ">= 8" } }, + "node_modules/micromark": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/micromark/-/micromark-4.0.2.tgz", + "integrity": "sha512-zpe98Q6kvavpCr1NPVSCMebCKfD7CA2NqZ+rykeNhONIJBpc1tFKt9hucLGwha3jNTNI8lHpctWJWoimVF4PfA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "@types/debug": "^4.0.0", + "debug": "^4.0.0", + "decode-named-character-reference": "^1.0.0", + "devlop": "^1.0.0", + "micromark-core-commonmark": "^2.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-chunked": "^2.0.0", + "micromark-util-combine-extensions": "^2.0.0", + "micromark-util-decode-numeric-character-reference": "^2.0.0", + "micromark-util-encode": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0", + "micromark-util-resolve-all": "^2.0.0", + "micromark-util-sanitize-uri": "^2.0.0", + "micromark-util-subtokenize": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-core-commonmark": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/micromark-core-commonmark/-/micromark-core-commonmark-2.0.3.tgz", + "integrity": "sha512-RDBrHEMSxVFLg6xvnXmb1Ayr2WzLAWjeSATAoxwKYJV94TeNavgoIdA0a9ytzDSVzBy2YKFK+emCPOEibLeCrg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "decode-named-character-reference": "^1.0.0", + "devlop": "^1.0.0", + "micromark-factory-destination": "^2.0.0", + "micromark-factory-label": "^2.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-factory-title": "^2.0.0", + "micromark-factory-whitespace": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-chunked": "^2.0.0", + "micromark-util-classify-character": "^2.0.0", + "micromark-util-html-tag-name": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0", + "micromark-util-resolve-all": "^2.0.0", + "micromark-util-subtokenize": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-gfm": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm/-/micromark-extension-gfm-3.0.0.tgz", + "integrity": "sha512-vsKArQsicm7t0z2GugkCKtZehqUm31oeGBV/KVSorWSy8ZlNAv7ytjFhvaryUiCUJYqs+NoE6AFhpQvBTM6Q4w==", + "license": "MIT", + "dependencies": { + "micromark-extension-gfm-autolink-literal": "^2.0.0", + "micromark-extension-gfm-footnote": "^2.0.0", + "micromark-extension-gfm-strikethrough": "^2.0.0", + "micromark-extension-gfm-table": "^2.0.0", + "micromark-extension-gfm-tagfilter": "^2.0.0", + "micromark-extension-gfm-task-list-item": "^2.0.0", + "micromark-util-combine-extensions": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-autolink-literal": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-autolink-literal/-/micromark-extension-gfm-autolink-literal-2.1.0.tgz", + "integrity": "sha512-oOg7knzhicgQ3t4QCjCWgTmfNhvQbDDnJeVu9v81r7NltNCVmhPy1fJRX27pISafdjL+SVc4d3l48Gb6pbRypw==", + "license": "MIT", + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-sanitize-uri": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-footnote": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-footnote/-/micromark-extension-gfm-footnote-2.1.0.tgz", + "integrity": "sha512-/yPhxI1ntnDNsiHtzLKYnE3vf9JZ6cAisqVDauhp4CEHxlb4uoOTxOCJ+9s51bIB8U1N1FJ1RXOKTIlD5B/gqw==", + "license": "MIT", + "dependencies": { + "devlop": "^1.0.0", + "micromark-core-commonmark": "^2.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0", + "micromark-util-sanitize-uri": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-strikethrough": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-strikethrough/-/micromark-extension-gfm-strikethrough-2.1.0.tgz", + "integrity": "sha512-ADVjpOOkjz1hhkZLlBiYA9cR2Anf8F4HqZUO6e5eDcPQd0Txw5fxLzzxnEkSkfnD0wziSGiv7sYhk/ktvbf1uw==", + "license": "MIT", + "dependencies": { + "devlop": "^1.0.0", + "micromark-util-chunked": "^2.0.0", + "micromark-util-classify-character": "^2.0.0", + "micromark-util-resolve-all": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-table": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-table/-/micromark-extension-gfm-table-2.1.1.tgz", + "integrity": "sha512-t2OU/dXXioARrC6yWfJ4hqB7rct14e8f7m0cbI5hUmDyyIlwv5vEtooptH8INkbLzOatzKuVbQmAYcbWoyz6Dg==", + "license": "MIT", + "dependencies": { + "devlop": "^1.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-tagfilter": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-tagfilter/-/micromark-extension-gfm-tagfilter-2.0.0.tgz", + "integrity": "sha512-xHlTOmuCSotIA8TW1mDIM6X2O1SiX5P9IuDtqGonFhEK0qgRI4yeC6vMxEV2dgyr2TiD+2PQ10o+cOhdVAcwfg==", + "license": "MIT", + "dependencies": { + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-task-list-item": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-task-list-item/-/micromark-extension-gfm-task-list-item-2.1.0.tgz", + "integrity": "sha512-qIBZhqxqI6fjLDYFTBIa4eivDMnP+OZqsNwmQ3xNLE4Cxwc+zfQEfbs6tzAo2Hjq+bh6q5F+Z8/cksrLFYWQQw==", + "license": "MIT", + "dependencies": { + "devlop": "^1.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-factory-destination": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-factory-destination/-/micromark-factory-destination-2.0.1.tgz", + "integrity": "sha512-Xe6rDdJlkmbFRExpTOmRj9N3MaWmbAgdpSrBQvCFqhezUn4AHqJHbaEnfbVYYiexVSs//tqOdY/DxhjdCiJnIA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-label": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-factory-label/-/micromark-factory-label-2.0.1.tgz", + "integrity": "sha512-VFMekyQExqIW7xIChcXn4ok29YE3rnuyveW3wZQWWqF4Nv9Wk5rgJ99KzPvHjkmPXF93FXIbBp6YdW3t71/7Vg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "devlop": "^1.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-space": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.1.tgz", + "integrity": "sha512-zRkxjtBxxLd2Sc0d+fbnEunsTj46SWXgXciZmHq0kDYGnck/ZSGj9/wULTV95uoeYiK5hRXP2mJ98Uo4cq/LQg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-title": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-factory-title/-/micromark-factory-title-2.0.1.tgz", + "integrity": "sha512-5bZ+3CjhAd9eChYTHsjy6TGxpOFSKgKKJPJxr293jTbfry2KDoWkhBb6TcPVB4NmzaPhMs1Frm9AZH7OD4Cjzw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-whitespace": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-factory-whitespace/-/micromark-factory-whitespace-2.0.1.tgz", + "integrity": "sha512-Ob0nuZ3PKt/n0hORHyvoD9uZhr+Za8sFoP+OnMcnWK5lngSzALgQYKMr9RJVOWLqQYuyn6ulqGWSXdwf6F80lQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-character": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.1.tgz", + "integrity": "sha512-wv8tdUTJ3thSFFFJKtpYKOYiGP2+v96Hvk4Tu8KpCAsTMs6yi+nVmGh1syvSCsaxz45J6Jbw+9DD6g97+NV67Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-chunked": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-chunked/-/micromark-util-chunked-2.0.1.tgz", + "integrity": "sha512-QUNFEOPELfmvv+4xiNg2sRYeS/P84pTW0TCgP5zc9FpXetHY0ab7SxKyAQCNCc1eK0459uoLI1y5oO5Vc1dbhA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-symbol": "^2.0.0" + } + }, + "node_modules/micromark-util-classify-character": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-classify-character/-/micromark-util-classify-character-2.0.1.tgz", + "integrity": "sha512-K0kHzM6afW/MbeWYWLjoHQv1sgg2Q9EccHEDzSkxiP/EaagNzCm7T/WMKZ3rjMbvIpvBiZgwR3dKMygtA4mG1Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-combine-extensions": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-combine-extensions/-/micromark-util-combine-extensions-2.0.1.tgz", + "integrity": "sha512-OnAnH8Ujmy59JcyZw8JSbK9cGpdVY44NKgSM7E9Eh7DiLS2E9RNQf0dONaGDzEG9yjEl5hcqeIsj4hfRkLH/Bg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-chunked": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-decode-numeric-character-reference": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/micromark-util-decode-numeric-character-reference/-/micromark-util-decode-numeric-character-reference-2.0.2.tgz", + "integrity": "sha512-ccUbYk6CwVdkmCQMyr64dXz42EfHGkPQlBj5p7YVGzq8I7CtjXZJrubAYezf7Rp+bjPseiROqe7G6foFd+lEuw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-symbol": "^2.0.0" + } + }, + "node_modules/micromark-util-decode-string": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-decode-string/-/micromark-util-decode-string-2.0.1.tgz", + "integrity": "sha512-nDV/77Fj6eH1ynwscYTOsbK7rR//Uj0bZXBwJZRfaLEJ1iGBR6kIfNmlNqaqJf649EP0F3NWNdeJi03elllNUQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "decode-named-character-reference": "^1.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-decode-numeric-character-reference": "^2.0.0", + "micromark-util-symbol": "^2.0.0" + } + }, + "node_modules/micromark-util-encode": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-encode/-/micromark-util-encode-2.0.1.tgz", + "integrity": "sha512-c3cVx2y4KqUnwopcO9b/SCdo2O67LwJJ/UyqGfbigahfegL9myoEFoDYZgkT7f36T0bLrM9hZTAaAyH+PCAXjw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromark-util-html-tag-name": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-html-tag-name/-/micromark-util-html-tag-name-2.0.1.tgz", + "integrity": "sha512-2cNEiYDhCWKI+Gs9T0Tiysk136SnR13hhO8yW6BGNyhOC4qYFnwF1nKfD3HFAIXA5c45RrIG1ub11GiXeYd1xA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromark-util-normalize-identifier": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-normalize-identifier/-/micromark-util-normalize-identifier-2.0.1.tgz", + "integrity": "sha512-sxPqmo70LyARJs0w2UclACPUUEqltCkJ6PhKdMIDuJ3gSf/Q+/GIe3WKl0Ijb/GyH9lOpUkRAO2wp0GVkLvS9Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-symbol": "^2.0.0" + } + }, + "node_modules/micromark-util-resolve-all": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-resolve-all/-/micromark-util-resolve-all-2.0.1.tgz", + "integrity": "sha512-VdQyxFWFT2/FGJgwQnJYbe1jjQoNTS4RjglmSjTUlpUMa95Htx9NHeYW4rGDJzbjvCsl9eLjMQwGeElsqmzcHg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-sanitize-uri": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-sanitize-uri/-/micromark-util-sanitize-uri-2.0.1.tgz", + "integrity": "sha512-9N9IomZ/YuGGZZmQec1MbgxtlgougxTodVwDzzEouPKo3qFWvymFHWcnDi2vzV1ff6kas9ucW+o3yzJK9YB1AQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-encode": "^2.0.0", + "micromark-util-symbol": "^2.0.0" + } + }, + "node_modules/micromark-util-subtokenize": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-subtokenize/-/micromark-util-subtokenize-2.1.0.tgz", + "integrity": "sha512-XQLu552iSctvnEcgXw6+Sx75GflAPNED1qx7eBJ+wydBb2KCbRZe+NwvIEEMM83uml1+2WSXpBAcp9IUCgCYWA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "devlop": "^1.0.0", + "micromark-util-chunked": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-symbol": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz", + "integrity": "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromark-util-types": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/micromark-util-types/-/micromark-util-types-2.0.2.tgz", + "integrity": "sha512-Yw0ECSpJoViF1qTU4DC6NwtC4aWGt1EkzaQB8KPPyCRR8z9TWeV0HbEFGTO+ZY1wB22zmxnJqhPyTpOVCpeHTA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, "node_modules/micromatch": { "version": "4.0.5", "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.5.tgz", @@ -7388,6 +9644,16 @@ "node": "*" } }, + "node_modules/minimist": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.8.tgz", + "integrity": "sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/minipass": { "version": "7.1.2", "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.1.2.tgz", @@ -7414,9 +9680,10 @@ } }, "node_modules/monaco-editor": { - "version": "0.44.0", - "resolved": "https://registry.npmjs.org/monaco-editor/-/monaco-editor-0.44.0.tgz", - "integrity": "sha512-5SmjNStN6bSuSE5WPT2ZV+iYn1/yI9sd4Igtk23ChvqB7kDk9lZbB9F5frsuvpB+2njdIeGGFf2G4gbE6rCC9Q==", + "version": "0.52.2", + "resolved": "https://registry.npmjs.org/monaco-editor/-/monaco-editor-0.52.2.tgz", + "integrity": "sha512-GEQWEZmfkOGLdd3XK8ryrfWz3AIP8YymVXiPHEdewrUq7mh0qrKrfHLNCXcbB6sTnMLnOZ3ztSiKcciFUkIJwQ==", + "license": "MIT", "peer": true }, "node_modules/monaco-languageserver-types": { @@ -7489,11 +9756,25 @@ "monaco-editor": ">=0.36" } }, + "node_modules/motion-dom": { + "version": "12.35.0", + "resolved": "https://registry.npmjs.org/motion-dom/-/motion-dom-12.35.0.tgz", + "integrity": "sha512-FFMLEnIejK/zDABn+vqGVAUN4T0+3fw+cVAY8MMT65yR+j5uMuvWdd4npACWhh94OVWQs79CrBBuwOwGRZAQiA==", + "license": "MIT", + "dependencies": { + "motion-utils": "^12.29.2" + } + }, + "node_modules/motion-utils": { + "version": "12.29.2", + "resolved": "https://registry.npmjs.org/motion-utils/-/motion-utils-12.29.2.tgz", + "integrity": "sha512-G3kc34H2cX2gI63RqU+cZq+zWRRPSsNIOjpdl9TN4AQwC4sgwYPl/Q/Obf/d53nOm569T0fYK+tcoSV50BWx8A==", + "license": "MIT" + }, "node_modules/ms": { "version": "2.1.3", "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", - "dev": true, "license": "MIT" }, "node_modules/msw": { @@ -7599,12 +9880,13 @@ "dev": true }, "node_modules/next-themes": { - "version": "0.3.0", - "resolved": "https://registry.npmjs.org/next-themes/-/next-themes-0.3.0.tgz", - "integrity": "sha512-/QHIrsYpd6Kfk7xakK4svpDI5mmXP0gfvCoJdGpZQ2TOrQZmsW0QxjaiLn8wbIKjtm4BTSqLoix4lxYYOnLJ/w==", + "version": "0.4.6", + "resolved": "https://registry.npmjs.org/next-themes/-/next-themes-0.4.6.tgz", + "integrity": "sha512-pZvgD5L0IEvX5/9GWyHMf3m8BKiVQwsCMHfoFosXtXBMnaS0ZnIJ9ST4b4NqLVKDEm8QBxoNNGNaBv2JNF6XNA==", + "license": "MIT", "peerDependencies": { - "react": "^16.8 || ^17 || ^18", - "react-dom": "^16.8 || ^17 || ^18" + "react": "^16.8 || ^17 || ^18 || ^19 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17 || ^18 || ^19 || ^19.0.0-rc" } }, "node_modules/node-fetch": { @@ -7728,6 +10010,16 @@ "node": ">= 6" } }, + "node_modules/object-keys": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/object-keys/-/object-keys-1.1.1.tgz", + "integrity": "sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, "node_modules/object-path": { "version": "0.6.0", "resolved": "https://registry.npmjs.org/object-path/-/object-path-0.6.0.tgz", @@ -7851,6 +10143,31 @@ "node": ">=6" } }, + "node_modules/parse-entities": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/parse-entities/-/parse-entities-4.0.2.tgz", + "integrity": "sha512-GG2AQYWoLgL877gQIKeRPGO1xF9+eG1ujIb5soS5gPvLQ1y2o8FL90w2QWNdf9I361Mpp7726c+lj3U0qK1uGw==", + "license": "MIT", + "dependencies": { + "@types/unist": "^2.0.0", + "character-entities-legacy": "^3.0.0", + "character-reference-invalid": "^2.0.0", + "decode-named-character-reference": "^1.0.0", + "is-alphanumerical": "^2.0.0", + "is-decimal": "^2.0.0", + "is-hexadecimal": "^2.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/parse-entities/node_modules/@types/unist": { + "version": "2.0.11", + "resolved": "https://registry.npmjs.org/@types/unist/-/unist-2.0.11.tgz", + "integrity": "sha512-CmBKiL6NNo/OqgmMn95Fk9Whlp2mtvIv+KNpQKN2F4SjvrEesubTRWGYSg+BnWZOnlCaSTU1sMpsBOzgbYhnsA==", + "license": "MIT" + }, "node_modules/parse5": { "version": "7.1.2", "resolved": "https://registry.npmjs.org/parse5/-/parse5-7.1.2.tgz", @@ -7863,6 +10180,79 @@ "url": "https://github.com/inikulin/parse5?sponsor=1" } }, + "node_modules/patch-package": { + "version": "8.0.1", + "resolved": "https://registry.npmjs.org/patch-package/-/patch-package-8.0.1.tgz", + "integrity": "sha512-VsKRIA8f5uqHQ7NGhwIna6Bx6D9s/1iXlA1hthBVBEbkq+t4kXD0HHt+rJhf/Z+Ci0F/HCB2hvn0qLdLG+Qxlw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@yarnpkg/lockfile": "^1.1.0", + "chalk": "^4.1.2", + "ci-info": "^3.7.0", + "cross-spawn": "^7.0.3", + "find-yarn-workspace-root": "^2.0.0", + "fs-extra": "^10.0.0", + "json-stable-stringify": "^1.0.2", + "klaw-sync": "^6.0.0", + "minimist": "^1.2.6", + "open": "^7.4.2", + "semver": "^7.5.3", + "slash": "^2.0.0", + "tmp": "^0.2.4", + "yaml": "^2.2.2" + }, + "bin": { + "patch-package": "index.js" + }, + "engines": { + "node": ">=14", + "npm": ">5" + } + }, + "node_modules/patch-package/node_modules/is-docker": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/is-docker/-/is-docker-2.2.1.tgz", + "integrity": "sha512-F+i2BKsFrH66iaUFc0woD8sLy8getkwTwtOBjvs56Cx4CgJDeKQeqfz8wAYiSb8JOprWhHH5p77PbmYCvvUuXQ==", + "dev": true, + "license": "MIT", + "bin": { + "is-docker": "cli.js" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/patch-package/node_modules/open": { + "version": "7.4.2", + "resolved": "https://registry.npmjs.org/open/-/open-7.4.2.tgz", + "integrity": "sha512-MVHddDVweXZF3awtlAS+6pgKLlm/JgxZ90+/NBurBoQctVOOB/zDdVjcyPzQ+0laDGbsWgrRkflI65sQeOgT9Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-docker": "^2.0.0", + "is-wsl": "^2.1.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/patch-package/node_modules/slash": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/slash/-/slash-2.0.0.tgz", + "integrity": "sha512-ZYKh3Wh2z1PpEXWr0MpSBZ0V6mZHAQfYevttO11c51CaWjGTaadiKZ+wVt1PbMlDV5qhMFslpZCemhwOK7C89A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, "node_modules/path-browserify": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/path-browserify/-/path-browserify-1.0.1.tgz", @@ -8002,6 +10392,7 @@ } ], "license": "MIT", + "peer": true, "dependencies": { "nanoid": "^3.3.8", "picocolors": "^1.1.1", @@ -8136,6 +10527,7 @@ "resolved": "https://registry.npmjs.org/prettier/-/prettier-3.3.3.tgz", "integrity": "sha512-i2tDNA0O5IrMO757lfrdQZCc2jPNDVntV0m/+4whiDfWaTKfMNgR7Qz0NAeGz/nRqF4m5/6CLzbP4/liHt12Ew==", "license": "MIT", + "peer": true, "bin": { "prettier": "bin/prettier.cjs" }, @@ -8274,6 +10666,16 @@ "resolved": "https://registry.npmjs.org/react-is/-/react-is-16.13.1.tgz", "integrity": "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==" }, + "node_modules/property-information": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/property-information/-/property-information-7.1.0.tgz", + "integrity": "sha512-TwEZ+X+yCJmYfL7TPUOcvBZ4QfoT5YenQiJuX//0th53DE6w0xxLEtfK3iyryQFddXuvkIk51EEgrJQ0WJkOmQ==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, "node_modules/proxy-compare": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/proxy-compare/-/proxy-compare-3.0.0.tgz", @@ -8327,12 +10729,11 @@ ] }, "node_modules/react": { - "version": "18.3.1", - "resolved": "https://registry.npmjs.org/react/-/react-18.3.1.tgz", - "integrity": "sha512-wS+hAgJShR0KhEvPJArfuPVN1+Hz1t0Y6n5jLrGQbkb4urgPE/0Rve+1kMB1v/oWgHgm4WIcV+i7F2pTVj+2iQ==", - "dependencies": { - "loose-envify": "^1.1.0" - }, + "version": "19.2.4", + "resolved": "https://registry.npmjs.org/react/-/react-19.2.4.tgz", + "integrity": "sha512-9nfp2hYpCwOjAN+8TZFGhtWEwgvWHXqESH8qT89AT/lWklpLON22Lc8pEtnpsZz7VmawabSU0gCjnj8aC0euHQ==", + "license": "MIT", + "peer": true, "engines": { "node": ">=0.10.0" } @@ -8393,23 +10794,25 @@ } }, "node_modules/react-dom": { - "version": "18.3.1", - "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-18.3.1.tgz", - "integrity": "sha512-5m4nQKp+rZRb09LNH59GM4BxTh9251/ylbKIbpe7TpGxfJ+9kv6BLkLBXIjjspbgbnIBNqlI23tRnTWT0snUIw==", + "version": "19.2.4", + "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-19.2.4.tgz", + "integrity": "sha512-AXJdLo8kgMbimY95O2aKQqsz2iWi9jMgKJhRBAxECE4IFxfcazB2LmzloIoibJI3C12IlY20+KFaLv+71bUJeQ==", + "license": "MIT", + "peer": true, "dependencies": { - "loose-envify": "^1.1.0", - "scheduler": "^0.23.2" + "scheduler": "^0.27.0" }, "peerDependencies": { - "react": "^18.3.1" + "react": "^19.2.4" } }, "node_modules/react-draggable": { - "version": "4.4.6", - "resolved": "https://registry.npmjs.org/react-draggable/-/react-draggable-4.4.6.tgz", - "integrity": "sha512-LtY5Xw1zTPqHkVmtM3X8MUOxNDOUhv/khTgBgrUvwaS064bwVvxT+q5El0uUFNx5IEPKXuRejr7UqLwBIg5pdw==", + "version": "4.5.0", + "resolved": "https://registry.npmjs.org/react-draggable/-/react-draggable-4.5.0.tgz", + "integrity": "sha512-VC+HBLEZ0XJxnOxVAZsdRi8rD04Iz3SiiKOoYzamjylUcju/hP9np/aZdLHf/7WOD268WMoNJMvYfB5yAK45cw==", + "license": "MIT", "dependencies": { - "clsx": "^1.1.1", + "clsx": "^2.1.1", "prop-types": "^15.8.1" }, "peerDependencies": { @@ -8417,14 +10820,6 @@ "react-dom": ">= 16.3.0" } }, - "node_modules/react-draggable/node_modules/clsx": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/clsx/-/clsx-1.2.1.tgz", - "integrity": "sha512-EcR6r5a8bj6pu3ycsa/E/cKVGuTgZJZdsyUYHOksG/UHIiKfjxzRxYJpyVBwYaQeOvghal9fcc4PidlgzugAQg==", - "engines": { - "node": ">=6" - } - }, "node_modules/react-dropzone": { "version": "14.3.8", "resolved": "https://registry.npmjs.org/react-dropzone/-/react-dropzone-14.3.8.tgz", @@ -8443,15 +10838,15 @@ } }, "node_modules/react-grid-layout": { - "version": "1.5.0", - "resolved": "https://registry.npmjs.org/react-grid-layout/-/react-grid-layout-1.5.0.tgz", - "integrity": "sha512-WBKX7w/LsTfI99WskSu6nX2nbJAUD7GD6nIXcwYLyPpnslojtmql2oD3I2g5C3AK8hrxIarYT8awhuDIp7iQ5w==", + "version": "2.2.2", + "resolved": "https://registry.npmjs.org/react-grid-layout/-/react-grid-layout-2.2.2.tgz", + "integrity": "sha512-yNo9pxQWoxHWRAwHGSVT4DEGELYPyQ7+q9lFclb5jcqeFzva63/2F72CryS/jiTIr/SBIlTaDdyjqH+ODg8oBw==", "license": "MIT", "dependencies": { - "clsx": "^2.0.0", + "clsx": "^2.1.1", "fast-equals": "^4.0.3", "prop-types": "^15.8.1", - "react-draggable": "^4.4.5", + "react-draggable": "^4.4.6", "react-resizable": "^3.0.5", "resize-observer-polyfill": "^1.5.1" }, @@ -8465,6 +10860,7 @@ "resolved": "https://registry.npmjs.org/react-hook-form/-/react-hook-form-7.52.1.tgz", "integrity": "sha512-uNKIhaoICJ5KQALYZ4TOaOLElyM+xipord+Ha3crEFhTntdLvWZqVY49Wqd/0GiVCA/f9NjemLeiNPjG7Hpurg==", "license": "MIT", + "peer": true, "engines": { "node": ">=12.22.0" }, @@ -8508,15 +10904,15 @@ } }, "node_modules/react-is": { - "version": "18.2.0", - "resolved": "https://registry.npmjs.org/react-is/-/react-is-18.2.0.tgz", - "integrity": "sha512-xWGDIW6x921xtzPkhiULtthJHoJvBbF3q26fzloPCK0hsvxtPVelvftw3zjbHWSkR2km9Z+4uxbDDK/6Zw9B8w==", - "dev": true + "version": "18.3.1", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-18.3.1.tgz", + "integrity": "sha512-/LLMVyas0ljjAtoYiPqYiL8VWXzUUdThrmU5+n20DZv+a+ClRoevUzw5JxU+Ieh5/c87ytoTBV9G1FiKfNJdmg==", + "license": "MIT" }, "node_modules/react-konva": { - "version": "18.2.10", - "resolved": "https://registry.npmjs.org/react-konva/-/react-konva-18.2.10.tgz", - "integrity": "sha512-ohcX1BJINL43m4ynjZ24MxFI1syjBdrXhqVxYVDw2rKgr3yuS0x/6m1Y2Z4sl4T/gKhfreBx8KHisd0XC6OT1g==", + "version": "19.2.3", + "resolved": "https://registry.npmjs.org/react-konva/-/react-konva-19.2.3.tgz", + "integrity": "sha512-VsO5CJZwUo12xFa33UEIDOQn6ZZBeE6jlkStGFvpR/3NiDA/9RPQTzw6Ri++C0Pnh3Arco1AehB8qJNv9YCRwg==", "funding": [ { "type": "patreon", @@ -8531,31 +10927,59 @@ "url": "https://github.com/sponsors/lavrton" } ], + "license": "MIT", "dependencies": { - "@types/react-reconciler": "^0.28.2", - "its-fine": "^1.1.1", - "react-reconciler": "~0.29.0", - "scheduler": "^0.23.0" + "@types/react-reconciler": "^0.33.0", + "its-fine": "^2.0.0", + "react-reconciler": "0.33.0", + "scheduler": "0.27.0" }, "peerDependencies": { - "konva": "^8.0.1 || ^7.2.5 || ^9.0.0", - "react": ">=18.0.0", - "react-dom": ">=18.0.0" + "konva": "^8.0.1 || ^7.2.5 || ^9.0.0 || ^10.0.0", + "react": "^19.2.0", + "react-dom": "^19.2.0" + } + }, + "node_modules/react-markdown": { + "version": "9.1.0", + "resolved": "https://registry.npmjs.org/react-markdown/-/react-markdown-9.1.0.tgz", + "integrity": "sha512-xaijuJB0kzGiUdG7nc2MOMDUDBWPyGAjZtUrow9XxUeua8IqeP+VlIfAZ3bphpcLTnSZXz6z9jcVC/TCwbfgdw==", + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "devlop": "^1.0.0", + "hast-util-to-jsx-runtime": "^2.0.0", + "html-url-attributes": "^3.0.0", + "mdast-util-to-hast": "^13.0.0", + "remark-parse": "^11.0.0", + "remark-rehype": "^11.0.0", + "unified": "^11.0.0", + "unist-util-visit": "^5.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + }, + "peerDependencies": { + "@types/react": ">=18", + "react": ">=18" } }, "node_modules/react-reconciler": { - "version": "0.29.0", - "resolved": "https://registry.npmjs.org/react-reconciler/-/react-reconciler-0.29.0.tgz", - "integrity": "sha512-wa0fGj7Zht1EYMRhKWwoo1H9GApxYLBuhoAuXN0TlltESAjDssB+Apf0T/DngVqaMyPypDmabL37vw/2aRM98Q==", + "version": "0.33.0", + "resolved": "https://registry.npmjs.org/react-reconciler/-/react-reconciler-0.33.0.tgz", + "integrity": "sha512-KetWRytFv1epdpJc3J4G75I4WrplZE5jOL7Yq0p34+OVOKF4Se7WrdIdVC45XsSSmUTlht2FM/fM1FZb1mfQeA==", + "license": "MIT", "dependencies": { - "loose-envify": "^1.1.0", - "scheduler": "^0.23.0" + "scheduler": "^0.27.0" }, "engines": { "node": ">=0.10.0" }, "peerDependencies": { - "react": "^18.2.0" + "react": "^19.2.0" } }, "node_modules/react-remove-scroll": { @@ -8606,15 +11030,17 @@ } }, "node_modules/react-resizable": { - "version": "3.0.5", - "resolved": "https://registry.npmjs.org/react-resizable/-/react-resizable-3.0.5.tgz", - "integrity": "sha512-vKpeHhI5OZvYn82kXOs1bC8aOXktGU5AmKAgaZS4F5JPburCtbmDPqE7Pzp+1kN4+Wb81LlF33VpGwWwtXem+w==", + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/react-resizable/-/react-resizable-3.1.3.tgz", + "integrity": "sha512-liJBNayhX7qA4tBJiBD321FDhJxgGTJ07uzH5zSORXoE8h7PyEZ8mLqmosST7ppf6C4zUsbd2gzDMmBCfFp9Lw==", + "license": "MIT", "dependencies": { "prop-types": "15.x", - "react-draggable": "^4.0.3" + "react-draggable": "^4.5.0" }, "peerDependencies": { - "react": ">= 16.3" + "react": ">= 16.3", + "react-dom": ">= 16.3" } }, "node_modules/react-router": { @@ -8703,21 +11129,6 @@ "scheduler": ">=0.19.0" } }, - "node_modules/react-transition-group": { - "version": "4.4.5", - "resolved": "https://registry.npmjs.org/react-transition-group/-/react-transition-group-4.4.5.tgz", - "integrity": "sha512-pZcd1MCJoiKiBR2NRxeCRg13uCXbydPnmB4EOeRrY7480qNWO8IIgQG6zlDkm6uRMsURXPuKq0GWtiM59a5Q6g==", - "dependencies": { - "@babel/runtime": "^7.5.5", - "dom-helpers": "^5.0.1", - "loose-envify": "^1.4.0", - "prop-types": "^15.6.2" - }, - "peerDependencies": { - "react": ">=16.6.0", - "react-dom": ">=16.6.0" - } - }, "node_modules/react-use-websocket": { "version": "4.8.1", "resolved": "https://registry.npmjs.org/react-use-websocket/-/react-use-websocket-4.8.1.tgz", @@ -8728,9 +11139,9 @@ } }, "node_modules/react-zoom-pan-pinch": { - "version": "3.4.4", - "resolved": "https://registry.npmjs.org/react-zoom-pan-pinch/-/react-zoom-pan-pinch-3.4.4.tgz", - "integrity": "sha512-lGTu7D9lQpYEQ6sH+NSlLA7gicgKRW8j+D/4HO1AbSV2POvKRFzdWQ8eI0r3xmOsl4dYQcY+teV6MhULeg1xBw==", + "version": "3.7.0", + "resolved": "https://registry.npmjs.org/react-zoom-pan-pinch/-/react-zoom-pan-pinch-3.7.0.tgz", + "integrity": "sha512-UmReVZ0TxlKzxSbYiAj+LeGRW8s8LraAFTXRAxzMYnNRgGPsxCudwZKVkjvGmjtx7SW/hZamt69NUmGf4xrkXA==", "license": "MIT", "engines": { "node": ">=8", @@ -8760,25 +11171,6 @@ "node": ">=8.10.0" } }, - "node_modules/recoil": { - "version": "0.7.7", - "resolved": "https://registry.npmjs.org/recoil/-/recoil-0.7.7.tgz", - "integrity": "sha512-8Og5KPQW9LwC577Vc7Ug2P0vQshkv1y3zG3tSSkWMqkWSwHmE+by06L8JtnGocjW6gcCvfwB3YtrJG6/tWivNQ==", - "dependencies": { - "hamt_plus": "1.0.2" - }, - "peerDependencies": { - "react": ">=16.13.1" - }, - "peerDependenciesMeta": { - "react-dom": { - "optional": true - }, - "react-native": { - "optional": true - } - } - }, "node_modules/redent": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/redent/-/redent-3.0.0.tgz", @@ -8797,6 +11189,72 @@ "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.14.0.tgz", "integrity": "sha512-srw17NI0TUWHuGa5CFGGmhfNIeja30WMBfbslPNhf6JrqQlLN5gcrvig1oqPxiVaXb0oW0XRKtH6Nngs5lKCIA==" }, + "node_modules/remark-gfm": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/remark-gfm/-/remark-gfm-4.0.1.tgz", + "integrity": "sha512-1quofZ2RQ9EWdeN34S79+KExV1764+wCUGop5CPL1WGdD0ocPpu91lzPGbwWMECpEpd42kJGQwzRfyov9j4yNg==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "mdast-util-gfm": "^3.0.0", + "micromark-extension-gfm": "^3.0.0", + "remark-parse": "^11.0.0", + "remark-stringify": "^11.0.0", + "unified": "^11.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-parse": { + "version": "11.0.0", + "resolved": "https://registry.npmjs.org/remark-parse/-/remark-parse-11.0.0.tgz", + "integrity": "sha512-FCxlKLNGknS5ba/1lmpYijMUzX2esxW5xQqjWxw2eHFfS2MSdaHVINFmhjo+qN1WhZhNimq0dZATN9pH0IDrpA==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "mdast-util-from-markdown": "^2.0.0", + "micromark-util-types": "^2.0.0", + "unified": "^11.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-rehype": { + "version": "11.1.2", + "resolved": "https://registry.npmjs.org/remark-rehype/-/remark-rehype-11.1.2.tgz", + "integrity": "sha512-Dh7l57ianaEoIpzbp0PC9UKAdCSVklD8E5Rpw7ETfbTl3FqcOOgq5q2LVDhgGCkaBv7p24JXikPdvhhmHvKMsw==", + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "mdast-util-to-hast": "^13.0.0", + "unified": "^11.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-stringify": { + "version": "11.0.0", + "resolved": "https://registry.npmjs.org/remark-stringify/-/remark-stringify-11.0.0.tgz", + "integrity": "sha512-1OSmLd3awB/t8qdoEOMazZkNsfVTeY4fTsgzcQFdXNq8ToTN4ZGwrMnlda4K6smTFKD+GRV6O48i6Z4iKgPPpw==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "mdast-util-to-markdown": "^2.0.0", + "unified": "^11.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, "node_modules/require-directory": { "version": "2.1.1", "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", @@ -8806,6 +11264,15 @@ "node": ">=0.10.0" } }, + "node_modules/require-from-string": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/require-from-string/-/require-from-string-2.0.2.tgz", + "integrity": "sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, "node_modules/requires-port": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/requires-port/-/requires-port-1.0.0.tgz", @@ -9079,12 +11546,11 @@ } }, "node_modules/scheduler": { - "version": "0.23.2", - "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.23.2.tgz", - "integrity": "sha512-UOShsPwz7NrMUqhR6t0hWjFduvOzbtv7toDH1/hIrfRNIDBnnBWd0CwJTGvTpngVlmwGCdP9/Zl/tVrDqcuYzQ==", - "dependencies": { - "loose-envify": "^1.1.0" - } + "version": "0.27.0", + "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.27.0.tgz", + "integrity": "sha512-eNv+WrVbKu1f3vbYJT/xtiF5syA5HPIMtf9IgY/nKg0sWqzAUEvqY/xm7OcZc/qafLx/iO9FgOmeSAp4v5ti/Q==", + "license": "MIT", + "peer": true }, "node_modules/scroll-into-view-if-needed": { "version": "3.1.0", @@ -9107,6 +11573,24 @@ "node": ">=10" } }, + "node_modules/set-function-length": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/set-function-length/-/set-function-length-1.2.2.tgz", + "integrity": "sha512-pgRc4hJ4/sNjWCSS9AmnS40x3bNMDTknHgL5UaMBTMyJnU90EgWh1Rz+MC9eFu4BuN/UwZjKQuY/1v3rM7HMfg==", + "dev": true, + "license": "MIT", + "dependencies": { + "define-data-property": "^1.1.4", + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "get-intrinsic": "^1.2.4", + "gopd": "^1.0.1", + "has-property-descriptors": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + } + }, "node_modules/shebang-command": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", @@ -9158,13 +11642,13 @@ } }, "node_modules/sonner": { - "version": "1.5.0", - "resolved": "https://registry.npmjs.org/sonner/-/sonner-1.5.0.tgz", - "integrity": "sha512-FBjhG/gnnbN6FY0jaNnqZOMmB73R+5IiyYAw8yBj7L54ER7HB3fOSE5OFiQiE2iXWxeXKvg6fIP4LtVppHEdJA==", + "version": "2.0.7", + "resolved": "https://registry.npmjs.org/sonner/-/sonner-2.0.7.tgz", + "integrity": "sha512-W6ZN4p58k8aDKA4XPcx2hpIQXBRAgyiWVkYhT7CvK6D3iAu7xjvVyhQHg2/iaKJZ1XVJ4r7XuwGL+WGEK37i9w==", "license": "MIT", "peerDependencies": { - "react": "^18.0.0", - "react-dom": "^18.0.0" + "react": "^18.0.0 || ^19.0.0 || ^19.0.0-rc", + "react-dom": "^18.0.0 || ^19.0.0 || ^19.0.0-rc" } }, "node_modules/sort-by": { @@ -9184,6 +11668,16 @@ "node": ">=0.10.0" } }, + "node_modules/space-separated-tokens": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/space-separated-tokens/-/space-separated-tokens-2.0.2.tgz", + "integrity": "sha512-PEGlAwrG8yXGXRjW32fGbg66JAlOAwbObuqVoJpv/mRgoWDQfgH1wDPvtzWyUSNAXBGSk8h755YDbbcEy3SH2Q==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, "node_modules/stackback": { "version": "0.0.2", "resolved": "https://registry.npmjs.org/stackback/-/stackback-0.0.2.tgz", @@ -9253,6 +11747,20 @@ "node": ">=8" } }, + "node_modules/stringify-entities": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/stringify-entities/-/stringify-entities-4.0.4.tgz", + "integrity": "sha512-IwfBptatlO+QCJUo19AqvrPNqlVMpW9YEL2LIVY+Rpv2qsjCGxaDLNRgeGsQWJhfItebuJhsGSLjaBbNSQ+ieg==", + "license": "MIT", + "dependencies": { + "character-entities-html4": "^2.0.0", + "character-entities-legacy": "^3.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, "node_modules/strip-ansi": { "version": "6.0.1", "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", @@ -9316,6 +11824,24 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/style-to-js": { + "version": "1.1.21", + "resolved": "https://registry.npmjs.org/style-to-js/-/style-to-js-1.1.21.tgz", + "integrity": "sha512-RjQetxJrrUJLQPHbLku6U/ocGtzyjbJMP9lCNK7Ag0CNh690nSH8woqWH9u16nMjYBAok+i7JO1NP2pOy8IsPQ==", + "license": "MIT", + "dependencies": { + "style-to-object": "1.0.14" + } + }, + "node_modules/style-to-object": { + "version": "1.0.14", + "resolved": "https://registry.npmjs.org/style-to-object/-/style-to-object-1.0.14.tgz", + "integrity": "sha512-LIN7rULI0jBscWQYaSswptyderlarFkjQ+t79nzty8tcIAceVomEVlLzH5VP4Cmsv6MtKhs7qaAiwlcp+Mgaxw==", + "license": "MIT", + "dependencies": { + "inline-style-parser": "0.2.7" + } + }, "node_modules/sucrase": { "version": "3.34.0", "resolved": "https://registry.npmjs.org/sucrase/-/sucrase-3.34.0.tgz", @@ -9523,6 +12049,7 @@ "resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-3.4.9.tgz", "integrity": "sha512-1SEOvRr6sSdV5IDf9iC+NU4dhwdqzF4zKKq3sAbasUWHEM6lsMhX+eNN5gkPx1BvLFEnZQEUFbXnGj8Qlp83Pg==", "license": "MIT", + "peer": true, "dependencies": { "@alloc/quick-lru": "^5.2.0", "arg": "^5.0.2", @@ -9705,6 +12232,7 @@ "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=12" }, @@ -9754,6 +12282,16 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/tmp": { + "version": "0.2.5", + "resolved": "https://registry.npmjs.org/tmp/-/tmp-0.2.5.tgz", + "integrity": "sha512-voyz6MApa1rQGUxT3E+BK7/ROe8itEx7vD8/HEvt4xwXucvQ5G5oeEiHkmHZJuBO21RpOf+YYm9MOivj709jow==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=14.14" + } + }, "node_modules/to-regex-range": { "version": "5.0.1", "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", @@ -9798,6 +12336,26 @@ "node": ">=18" } }, + "node_modules/trim-lines": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/trim-lines/-/trim-lines-3.0.1.tgz", + "integrity": "sha512-kRj8B+YHZCc9kQYdWfJB2/oUl9rA99qbowYYBtr4ui4mZyAQ2JpvVBd/6U2YloATfqBhBTSMhTpgBHtU0Mf3Rg==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/trough": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/trough/-/trough-2.2.0.tgz", + "integrity": "sha512-tmMpK00BjZiUyVyvrBK7knerNgmgvcV/KLVyuma/SC+TQN167GrMRciANTz09+k3zW8L8t60jWO1GpfkZdjTaw==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, "node_modules/ts-api-utils": { "version": "1.3.0", "resolved": "https://registry.npmjs.org/ts-api-utils/-/ts-api-utils-1.3.0.tgz", @@ -9848,11 +12406,12 @@ } }, "node_modules/typescript": { - "version": "5.8.2", - "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.8.2.tgz", - "integrity": "sha512-aJn6wq13/afZp/jT9QZmwEjDqqvSGp1VT5GVg+f/t6/oVyrgXM6BY1h9BRh/O5p3PlUPAe+WuiEZOmb/49RqoQ==", + "version": "5.9.3", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.9.3.tgz", + "integrity": "sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==", "devOptional": true, "license": "Apache-2.0", + "peer": true, "bin": { "tsc": "bin/tsc", "tsserver": "bin/tsserver" @@ -9889,6 +12448,93 @@ "integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==", "dev": true }, + "node_modules/unified": { + "version": "11.0.5", + "resolved": "https://registry.npmjs.org/unified/-/unified-11.0.5.tgz", + "integrity": "sha512-xKvGhPWw3k84Qjh8bI3ZeJjqnyadK+GEFtazSfZv/rKeTkTjOJho6mFqh2SM96iIcZokxiOpg78GazTSg8+KHA==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0", + "bail": "^2.0.0", + "devlop": "^1.0.0", + "extend": "^3.0.0", + "is-plain-obj": "^4.0.0", + "trough": "^2.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-is": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-6.0.1.tgz", + "integrity": "sha512-LsiILbtBETkDz8I9p1dQ0uyRUWuaQzd/cuEeS1hoRSyW5E5XGmTzlwY1OrNzzakGowI9Dr/I8HVaw4hTtnxy8g==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-position": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/unist-util-position/-/unist-util-position-5.0.0.tgz", + "integrity": "sha512-fucsC7HjXvkB5R3kTCO7kUjRdrS0BJt3M/FPxmHMBOm8JQi2BsHAHFsy27E0EolP8rp0NzXsJ+jNPyDWvOJZPA==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-stringify-position": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/unist-util-stringify-position/-/unist-util-stringify-position-4.0.0.tgz", + "integrity": "sha512-0ASV06AAoKCDkS2+xw5RXJywruurpbC4JZSm7nr7MOt1ojAzvyyaO+UxZf18j8FCF6kmzCZKcAgN/yu2gm2XgQ==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-visit": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-5.1.0.tgz", + "integrity": "sha512-m+vIdyeCOpdr/QeQCu2EzxX/ohgS8KbnPDgFni4dQsfSCtpz8UqDyY5GjRru8PDKuYn7Fq19j1CQ+nJSsGKOzg==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0", + "unist-util-is": "^6.0.0", + "unist-util-visit-parents": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-visit-parents": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-6.0.2.tgz", + "integrity": "sha512-goh1s1TBrqSqukSc8wrjwWhL0hiJxgA8m4kFxGlQ+8FYQ3C/m11FcTs4YYem7V664AhHVvgoQLk890Ssdsr2IQ==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0", + "unist-util-is": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, "node_modules/universalify": { "version": "0.2.0", "resolved": "https://registry.npmjs.org/universalify/-/universalify-0.2.0.tgz", @@ -10035,16 +12681,58 @@ "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==" }, + "node_modules/uuid": { + "version": "13.0.0", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-13.0.0.tgz", + "integrity": "sha512-XQegIaBTVUjSHliKqcnFqYypAd4S+WCYt5NIeRs6w/UAry7z8Y9j5ZwRRL4kzq9U3sD6v+85er9FvkEaBpji2w==", + "funding": [ + "https://github.com/sponsors/broofa", + "https://github.com/sponsors/ctavan" + ], + "license": "MIT", + "bin": { + "uuid": "dist-node/bin/uuid" + } + }, "node_modules/vaul": { - "version": "0.9.1", - "resolved": "https://registry.npmjs.org/vaul/-/vaul-0.9.1.tgz", - "integrity": "sha512-fAhd7i4RNMinx+WEm6pF3nOl78DFkAazcN04ElLPFF9BMCNGbY/kou8UMhIcicm0rJCNePJP0Yyza60gGOD0Jw==", + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/vaul/-/vaul-1.1.2.tgz", + "integrity": "sha512-ZFkClGpWyI2WUQjdLJ/BaGuV6AVQiJ3uELGk3OYtP+B6yCO7Cmn9vPFXVJkRaGkOJu3m8bQMgtyzNHixULceQA==", + "license": "MIT", "dependencies": { - "@radix-ui/react-dialog": "^1.0.4" + "@radix-ui/react-dialog": "^1.1.1" }, "peerDependencies": { - "react": "^16.8 || ^17.0 || ^18.0", - "react-dom": "^16.8 || ^17.0 || ^18.0" + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0.0 || ^19.0.0-rc" + } + }, + "node_modules/vfile": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/vfile/-/vfile-6.0.3.tgz", + "integrity": "sha512-KzIbH/9tXat2u30jf+smMwFCsno4wHVdNmzFyL+T/L3UGqqk6JKfVqOFOZEpZSHADH1k40ab6NUIXZq422ov3Q==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0", + "vfile-message": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/vfile-message": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/vfile-message/-/vfile-message-4.0.3.tgz", + "integrity": "sha512-QTHzsGd1EhbZs4AsQ20JX1rC3cOlt/IWJruk893DfLRr57lcnOeMaWG4K0JrRta4mIJZKth2Au3mM3u03/JWKw==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0", + "unist-util-stringify-position": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" } }, "node_modules/virtua": { @@ -10083,6 +12771,7 @@ "integrity": "sha512-+Oxm7q9hDoLMyJOYfUYBuHQo+dkAloi33apOPP56pzj+vsdJDzr+j1NISE5pyaAuKL4A3UD34qd0lx5+kfKp2g==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "esbuild": "^0.25.0", "fdir": "^6.4.4", @@ -10207,6 +12896,7 @@ "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=12" }, @@ -10220,6 +12910,7 @@ "integrity": "sha512-IP7gPK3LS3Fvn44x30X1dM9vtawm0aesAa2yBIZ9vQf+qB69NXC5776+Qmcr7ohUXIQuLhk7xQR0aSUIDPqavg==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@vitest/expect": "3.0.7", "@vitest/mocker": "3.0.7", @@ -10611,6 +13302,16 @@ "funding": { "url": "https://github.com/sponsors/colinhacks" } + }, + "node_modules/zwitch": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/zwitch/-/zwitch-2.0.4.tgz", + "integrity": "sha512-bXE4cR/kVZhKZX/RjPEflHaKVhUVl85noU3v6b8apfQEc1x4A+zBxjZ4lN8LqGd6WZ3dl98pY4o717VFmoPp+A==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } } } } diff --git a/web/package.json b/web/package.json index 46d667058..acbbd8d88 100644 --- a/web/package.json +++ b/web/package.json @@ -5,6 +5,7 @@ "type": "module", "scripts": { "dev": "vite --host", + "postinstall": "patch-package", "build": "tsc && vite build --base=/BASE_PATH/", "lint": "eslint --ext .jsx,.js,.tsx,.ts --ignore-path .gitignore .", "lint:fix": "eslint --ext .jsx,.js,.tsx,.ts --ignore-path .gitignore --fix .", @@ -27,17 +28,22 @@ "@radix-ui/react-hover-card": "^1.1.6", "@radix-ui/react-label": "^2.1.2", "@radix-ui/react-popover": "^1.1.6", + "@radix-ui/react-progress": "^1.1.8", "@radix-ui/react-radio-group": "^1.2.3", "@radix-ui/react-scroll-area": "^1.2.3", "@radix-ui/react-select": "^2.1.6", "@radix-ui/react-separator": "^1.1.7", "@radix-ui/react-slider": "^1.2.3", - "@radix-ui/react-slot": "^1.2.3", + "@radix-ui/react-slot": "1.2.4", "@radix-ui/react-switch": "^1.1.3", "@radix-ui/react-tabs": "^1.1.3", "@radix-ui/react-toggle": "^1.1.2", "@radix-ui/react-toggle-group": "^1.1.2", "@radix-ui/react-tooltip": "^1.2.8", + "@rjsf/core": "^6.3.1", + "@rjsf/shadcn": "^6.3.1", + "@rjsf/utils": "^6.3.1", + "@rjsf/validator-ajv8": "^6.3.1", "apexcharts": "^3.52.0", "axios": "^1.7.7", "class-variance-authority": "^0.7.1", @@ -46,8 +52,7 @@ "copy-to-clipboard": "^3.3.3", "date-fns": "^3.6.0", "date-fns-tz": "^3.2.0", - "embla-carousel-react": "^8.2.0", - "framer-motion": "^11.5.4", + "framer-motion": "^12.35.0", "hls.js": "^1.5.20", "i18next": "^24.2.0", "i18next-http-backend": "^3.0.1", @@ -57,28 +62,28 @@ "lodash": "^4.17.23", "lucide-react": "^0.477.0", "monaco-yaml": "^5.3.1", - "next-themes": "^0.3.0", + "next-themes": "^0.4.6", "nosleep.js": "^0.12.0", - "react": "^18.3.1", + "react": "^19.2.4", "react-apexcharts": "^1.4.1", "react-day-picker": "^9.7.0", "react-device-detect": "^2.2.3", - "react-dom": "^18.3.1", + "react-dom": "^19.2.4", "react-dropzone": "^14.3.8", - "react-grid-layout": "^1.5.0", + "react-grid-layout": "^2.2.2", "react-hook-form": "^7.52.1", "react-i18next": "^15.2.0", "react-icons": "^5.5.0", - "react-konva": "^18.2.10", + "react-konva": "^19.2.3", + "react-markdown": "^9.0.1", "react-router-dom": "^6.30.3", "react-swipeable": "^7.0.2", "react-tracked": "^2.0.1", - "react-transition-group": "^4.4.5", "react-use-websocket": "^4.8.1", - "react-zoom-pan-pinch": "3.4.4", - "recoil": "^0.7.7", + "react-zoom-pan-pinch": "^3.7.0", + "remark-gfm": "^4.0.0", "scroll-into-view-if-needed": "^3.1.0", - "sonner": "^1.5.0", + "sonner": "^2.0.7", "sort-by": "^1.2.0", "strftime": "^0.10.3", "swr": "^2.3.2", @@ -86,7 +91,7 @@ "tailwind-scrollbar": "^3.1.0", "tailwindcss-animate": "^1.0.7", "use-long-press": "^3.2.0", - "vaul": "^0.9.1", + "vaul": "^1.1.2", "vite-plugin-monaco-editor": "^1.1.0", "zod": "^3.23.8" }, @@ -95,11 +100,8 @@ "@testing-library/jest-dom": "^6.6.2", "@types/lodash": "^4.17.12", "@types/node": "^20.14.10", - "@types/react": "^18.3.2", - "@types/react-dom": "^18.3.0", - "@types/react-grid-layout": "^1.3.5", - "@types/react-icons": "^3.0.0", - "@types/react-transition-group": "^4.4.10", + "@types/react": "^19.2.14", + "@types/react-dom": "^19.2.3", "@types/strftime": "^0.9.8", "@typescript-eslint/eslint-plugin": "^7.5.0", "@typescript-eslint/parser": "^7.5.0", @@ -110,19 +112,26 @@ "eslint-config-prettier": "^9.1.0", "eslint-plugin-jest": "^28.2.0", "eslint-plugin-prettier": "^5.0.1", - "eslint-plugin-react-hooks": "^4.6.0", + "eslint-plugin-react-hooks": "^5.2.0", "eslint-plugin-react-refresh": "^0.4.8", "eslint-plugin-vitest-globals": "^1.5.0", "fake-indexeddb": "^6.0.0", "jest-websocket-mock": "^2.5.0", "jsdom": "^24.1.1", + "monaco-editor": "^0.52.0", "msw": "^2.3.5", + "patch-package": "^8.0.1", "postcss": "^8.4.47", "prettier": "^3.3.3", "prettier-plugin-tailwindcss": "^0.6.5", "tailwindcss": "^3.4.9", - "typescript": "^5.8.2", + "typescript": "^5.9.3", "vite": "^6.4.1", "vitest": "^3.0.7" + }, + "overrides": { + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-popper": "1.2.8", + "@radix-ui/react-slot": "1.2.4" } } diff --git a/web/patches/@radix-ui+react-compose-refs+1.1.2.patch b/web/patches/@radix-ui+react-compose-refs+1.1.2.patch new file mode 100644 index 000000000..0cb022b22 --- /dev/null +++ b/web/patches/@radix-ui+react-compose-refs+1.1.2.patch @@ -0,0 +1,75 @@ +diff --git a/node_modules/@radix-ui/react-compose-refs/dist/index.js b/node_modules/@radix-ui/react-compose-refs/dist/index.js +index 5ba7a95..65aa7be 100644 +--- a/node_modules/@radix-ui/react-compose-refs/dist/index.js ++++ b/node_modules/@radix-ui/react-compose-refs/dist/index.js +@@ -69,6 +69,31 @@ function composeRefs(...refs) { + }; + } + function useComposedRefs(...refs) { +- return React.useCallback(composeRefs(...refs), refs); ++ const refsRef = React.useRef(refs); ++ React.useLayoutEffect(() => { ++ refsRef.current = refs; ++ }); ++ return React.useCallback((node) => { ++ let hasCleanup = false; ++ const cleanups = refsRef.current.map((ref) => { ++ const cleanup = setRef(ref, node); ++ if (!hasCleanup && typeof cleanup === "function") { ++ hasCleanup = true; ++ } ++ return cleanup; ++ }); ++ if (hasCleanup) { ++ return () => { ++ for (let i = 0; i < cleanups.length; i++) { ++ const cleanup = cleanups[i]; ++ if (typeof cleanup === "function") { ++ cleanup(); ++ } else { ++ setRef(refsRef.current[i], null); ++ } ++ } ++ }; ++ } ++ }, []); + } + //# sourceMappingURL=index.js.map +diff --git a/node_modules/@radix-ui/react-compose-refs/dist/index.mjs b/node_modules/@radix-ui/react-compose-refs/dist/index.mjs +index 7dd9172..d1b53a5 100644 +--- a/node_modules/@radix-ui/react-compose-refs/dist/index.mjs ++++ b/node_modules/@radix-ui/react-compose-refs/dist/index.mjs +@@ -32,7 +32,32 @@ function composeRefs(...refs) { + }; + } + function useComposedRefs(...refs) { +- return React.useCallback(composeRefs(...refs), refs); ++ const refsRef = React.useRef(refs); ++ React.useLayoutEffect(() => { ++ refsRef.current = refs; ++ }); ++ return React.useCallback((node) => { ++ let hasCleanup = false; ++ const cleanups = refsRef.current.map((ref) => { ++ const cleanup = setRef(ref, node); ++ if (!hasCleanup && typeof cleanup === "function") { ++ hasCleanup = true; ++ } ++ return cleanup; ++ }); ++ if (hasCleanup) { ++ return () => { ++ for (let i = 0; i < cleanups.length; i++) { ++ const cleanup = cleanups[i]; ++ if (typeof cleanup === "function") { ++ cleanup(); ++ } else { ++ setRef(refsRef.current[i], null); ++ } ++ } ++ }; ++ } ++ }, []); + } + export { + composeRefs, diff --git a/web/patches/@radix-ui+react-slot+1.2.4.patch b/web/patches/@radix-ui+react-slot+1.2.4.patch new file mode 100644 index 000000000..62c2467e2 --- /dev/null +++ b/web/patches/@radix-ui+react-slot+1.2.4.patch @@ -0,0 +1,46 @@ +diff --git a/node_modules/@radix-ui/react-slot/dist/index.js b/node_modules/@radix-ui/react-slot/dist/index.js +index 3691205..3b62ea8 100644 +--- a/node_modules/@radix-ui/react-slot/dist/index.js ++++ b/node_modules/@radix-ui/react-slot/dist/index.js +@@ -85,11 +85,12 @@ function createSlotClone(ownerName) { + if (isLazyComponent(children) && typeof use === "function") { + children = use(children._payload); + } ++ const childrenRef = React.isValidElement(children) ? getElementRef(children) : null; ++ const composedRef = (0, import_react_compose_refs.useComposedRefs)(forwardedRef, childrenRef); + if (React.isValidElement(children)) { +- const childrenRef = getElementRef(children); + const props2 = mergeProps(slotProps, children.props); + if (children.type !== React.Fragment) { +- props2.ref = forwardedRef ? (0, import_react_compose_refs.composeRefs)(forwardedRef, childrenRef) : childrenRef; ++ props2.ref = forwardedRef ? composedRef : childrenRef; + } + return React.cloneElement(children, props2); + } +diff --git a/node_modules/@radix-ui/react-slot/dist/index.mjs b/node_modules/@radix-ui/react-slot/dist/index.mjs +index d7ea374..a990150 100644 +--- a/node_modules/@radix-ui/react-slot/dist/index.mjs ++++ b/node_modules/@radix-ui/react-slot/dist/index.mjs +@@ -1,6 +1,6 @@ + // src/slot.tsx + import * as React from "react"; +-import { composeRefs } from "@radix-ui/react-compose-refs"; ++import { composeRefs, useComposedRefs } from "@radix-ui/react-compose-refs"; + import { Fragment as Fragment2, jsx } from "react/jsx-runtime"; + var REACT_LAZY_TYPE = Symbol.for("react.lazy"); + var use = React[" use ".trim().toString()]; +@@ -45,11 +45,12 @@ function createSlotClone(ownerName) { + if (isLazyComponent(children) && typeof use === "function") { + children = use(children._payload); + } ++ const childrenRef = React.isValidElement(children) ? getElementRef(children) : null; ++ const composedRef = useComposedRefs(forwardedRef, childrenRef); + if (React.isValidElement(children)) { +- const childrenRef = getElementRef(children); + const props2 = mergeProps(slotProps, children.props); + if (children.type !== React.Fragment) { +- props2.ref = forwardedRef ? composeRefs(forwardedRef, childrenRef) : childrenRef; ++ props2.ref = forwardedRef ? composedRef : childrenRef; + } + return React.cloneElement(children, props2); + } diff --git a/web/patches/react-use-websocket+4.8.1.patch b/web/patches/react-use-websocket+4.8.1.patch new file mode 100644 index 000000000..5de81c525 --- /dev/null +++ b/web/patches/react-use-websocket+4.8.1.patch @@ -0,0 +1,23 @@ +diff --git a/node_modules/react-use-websocket/dist/lib/use-websocket.js b/node_modules/react-use-websocket/dist/lib/use-websocket.js +index f01db48..b30aff2 100644 +--- a/node_modules/react-use-websocket/dist/lib/use-websocket.js ++++ b/node_modules/react-use-websocket/dist/lib/use-websocket.js +@@ -139,15 +139,15 @@ var useWebSocket = function (url, options, connect) { + } + protectedSetLastMessage = function (message) { + if (!expectClose_1) { +- (0, react_dom_1.flushSync)(function () { return setLastMessage(message); }); ++ setLastMessage(message); + } + }; + protectedSetReadyState = function (state) { + if (!expectClose_1) { +- (0, react_dom_1.flushSync)(function () { return setReadyState(function (prev) { ++ setReadyState(function (prev) { + var _a; + return (__assign(__assign({}, prev), (convertedUrl.current && (_a = {}, _a[convertedUrl.current] = state, _a)))); +- }); }); ++ }); + } + }; + if (createOrJoin_1) { diff --git a/web/public/locales/en/common.json b/web/public/locales/en/common.json index 300f74ddb..37566117a 100644 --- a/web/public/locales/en/common.json +++ b/web/public/locales/en/common.json @@ -115,8 +115,11 @@ "internalID": "The Internal ID Frigate uses in the configuration and database" }, "button": { + "add": "Add", "apply": "Apply", + "applying": "Applying…", "reset": "Reset", + "undo": "Undo", "done": "Done", "enabled": "Enabled", "enable": "Enable", @@ -127,6 +130,7 @@ "cancel": "Cancel", "close": "Close", "copy": "Copy", + "copiedToClipboard": "Copied to clipboard", "back": "Back", "history": "History", "fullscreen": "Fullscreen", @@ -150,7 +154,14 @@ "export": "Export", "deleteNow": "Delete Now", "next": "Next", - "continue": "Continue" + "continue": "Continue", + "modified": "Modified", + "overridden": "Overridden", + "resetToGlobal": "Reset to Global", + "resetToDefault": "Reset to Default", + "saveAll": "Save All", + "savingAll": "Saving All…", + "undoAll": "Undo All" }, "menu": { "system": "System", @@ -242,9 +253,11 @@ "review": "Review", "explore": "Explore", "export": "Export", + "actions": "Actions", "uiPlayground": "UI Playground", "faceLibrary": "Face Library", "classification": "Classification", + "chat": "Chat", "user": { "title": "User", "account": "Account", diff --git a/web/public/locales/en/components/dialog.json b/web/public/locales/en/components/dialog.json index 91ff38d82..9a6f68daf 100644 --- a/web/public/locales/en/components/dialog.json +++ b/web/public/locales/en/components/dialog.json @@ -49,6 +49,10 @@ "name": { "placeholder": "Name the Export" }, + "case": { + "label": "Case", + "placeholder": "Select a case" + }, "select": "Select", "export": "Export", "selectOrExport": "Select or Export", diff --git a/web/public/locales/en/config/audio.json b/web/public/locales/en/config/audio.json deleted file mode 100644 index f9aaffa6b..000000000 --- a/web/public/locales/en/config/audio.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "label": "Global Audio events configuration.", - "properties": { - "enabled": { - "label": "Enable audio events." - }, - "max_not_heard": { - "label": "Seconds of not hearing the type of audio to end the event." - }, - "min_volume": { - "label": "Min volume required to run audio detection." - }, - "listen": { - "label": "Audio to listen for." - }, - "filters": { - "label": "Audio filters." - }, - "enabled_in_config": { - "label": "Keep track of original state of audio detection." - }, - "num_threads": { - "label": "Number of detection threads" - } - } -} \ No newline at end of file diff --git a/web/public/locales/en/config/audio_transcription.json b/web/public/locales/en/config/audio_transcription.json deleted file mode 100644 index 6922b9d80..000000000 --- a/web/public/locales/en/config/audio_transcription.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "label": "Audio transcription config.", - "properties": { - "enabled": { - "label": "Enable audio transcription." - }, - "language": { - "label": "Language abbreviation to use for audio event transcription/translation." - }, - "device": { - "label": "The device used for license plate recognition." - }, - "model_size": { - "label": "The size of the embeddings model used." - }, - "enabled_in_config": { - "label": "Keep track of original state of camera." - }, - "live_enabled": { - "label": "Enable live transcriptions." - } - } -} \ No newline at end of file diff --git a/web/public/locales/en/config/auth.json b/web/public/locales/en/config/auth.json deleted file mode 100644 index a524d8d1b..000000000 --- a/web/public/locales/en/config/auth.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "label": "Auth configuration.", - "properties": { - "enabled": { - "label": "Enable authentication" - }, - "reset_admin_password": { - "label": "Reset the admin password on startup" - }, - "cookie_name": { - "label": "Name for jwt token cookie" - }, - "cookie_secure": { - "label": "Set secure flag on cookie" - }, - "session_length": { - "label": "Session length for jwt session tokens" - }, - "refresh_time": { - "label": "Refresh the session if it is going to expire in this many seconds" - }, - "failed_login_rate_limit": { - "label": "Rate limits for failed login attempts." - }, - "trusted_proxies": { - "label": "Trusted proxies for determining IP address to rate limit" - }, - "hash_iterations": { - "label": "Password hash iterations" - }, - "roles": { - "label": "Role to camera mappings. Empty list grants access to all cameras." - } - } -} \ No newline at end of file diff --git a/web/public/locales/en/config/birdseye.json b/web/public/locales/en/config/birdseye.json deleted file mode 100644 index f122f314c..000000000 --- a/web/public/locales/en/config/birdseye.json +++ /dev/null @@ -1,37 +0,0 @@ -{ - "label": "Birdseye configuration.", - "properties": { - "enabled": { - "label": "Enable birdseye view." - }, - "mode": { - "label": "Tracking mode." - }, - "restream": { - "label": "Restream birdseye via RTSP." - }, - "width": { - "label": "Birdseye width." - }, - "height": { - "label": "Birdseye height." - }, - "quality": { - "label": "Encoding quality." - }, - "inactivity_threshold": { - "label": "Birdseye Inactivity Threshold" - }, - "layout": { - "label": "Birdseye Layout Config", - "properties": { - "scaling_factor": { - "label": "Birdseye Scaling Factor" - }, - "max_cameras": { - "label": "Max cameras" - } - } - } - } -} \ No newline at end of file diff --git a/web/public/locales/en/config/camera_groups.json b/web/public/locales/en/config/camera_groups.json deleted file mode 100644 index 2900e9c67..000000000 --- a/web/public/locales/en/config/camera_groups.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "label": "Camera group configuration", - "properties": { - "cameras": { - "label": "List of cameras in this group." - }, - "icon": { - "label": "Icon that represents camera group." - }, - "order": { - "label": "Sort order for group." - } - } -} \ No newline at end of file diff --git a/web/public/locales/en/config/cameras.json b/web/public/locales/en/config/cameras.json index 67015bde5..5880d30c3 100644 --- a/web/public/locales/en/config/cameras.json +++ b/web/public/locales/en/config/cameras.json @@ -1,761 +1,941 @@ { - "label": "Camera configuration.", - "properties": { - "name": { - "label": "Camera name." - }, - "friendly_name": { - "label": "Camera friendly name used in the Frigate UI." - }, + "label": "CameraConfig", + "name": { + "label": "Camera name", + "description": "Camera name is required" + }, + "friendly_name": { + "label": "Friendly name", + "description": "Camera friendly name used in the Frigate UI" + }, + "enabled": { + "label": "Enabled", + "description": "Enabled" + }, + "audio": { + "label": "Audio events", + "description": "Settings for audio-based event detection for this camera.", "enabled": { - "label": "Enable camera." + "label": "Enable audio detection", + "description": "Enable or disable audio event detection for this camera." }, - "audio": { - "label": "Audio events configuration.", - "properties": { - "enabled": { - "label": "Enable audio events." + "max_not_heard": { + "label": "End timeout", + "description": "Amount of seconds without the configured audio type before the audio event is ended." + }, + "min_volume": { + "label": "Minimum volume", + "description": "Minimum RMS volume threshold required to run audio detection; lower values increase sensitivity (e.g., 200 high, 500 medium, 1000 low)." + }, + "listen": { + "label": "Listen types", + "description": "List of audio event types to detect (for example: bark, fire_alarm, scream, speech, yell)." + }, + "filters": { + "label": "Audio filters", + "description": "Per-audio-type filter settings such as confidence thresholds used to reduce false positives." + }, + "enabled_in_config": { + "label": "Original audio state", + "description": "Indicates whether audio detection was originally enabled in the static config file." + }, + "num_threads": { + "label": "Detection threads", + "description": "Number of threads to use for audio detection processing." + } + }, + "audio_transcription": { + "label": "Audio transcription", + "description": "Settings for live and speech audio transcription used for events and live captions.", + "enabled": { + "label": "Enable transcription", + "description": "Enable or disable manually triggered audio event transcription." + }, + "enabled_in_config": { + "label": "Original transcription state" + }, + "live_enabled": { + "label": "Live transcription", + "description": "Enable streaming live transcription for audio as it is received." + } + }, + "birdseye": { + "label": "Birdseye", + "description": "Settings for the Birdseye composite view that composes multiple camera feeds into a single layout.", + "enabled": { + "label": "Enable Birdseye", + "description": "Enable or disable the Birdseye view feature." + }, + "mode": { + "label": "Tracking mode", + "description": "Mode for including cameras in Birdseye: 'objects', 'motion', or 'continuous'." + }, + "order": { + "label": "Position", + "description": "Numeric position controlling the camera's ordering in the Birdseye layout." + } + }, + "detect": { + "label": "Object Detection", + "description": "Settings for the detection/detect role used to run object detection and initialize trackers.", + "enabled": { + "label": "Detection enabled", + "description": "Enable or disable object detection for this camera. Detection must be enabled for object tracking to run." + }, + "height": { + "label": "Detect height", + "description": "Height (pixels) of frames used for the detect stream; leave empty to use the native stream resolution." + }, + "width": { + "label": "Detect width", + "description": "Width (pixels) of frames used for the detect stream; leave empty to use the native stream resolution." + }, + "fps": { + "label": "Detect FPS", + "description": "Desired frames per second to run detection on; lower values reduce CPU usage (recommended value is 5, only set higher - at most 10 - if tracking extremely fast moving objects)." + }, + "min_initialized": { + "label": "Minimum initialization frames", + "description": "Number of consecutive detection hits required before creating a tracked object. Increase to reduce false initializations. Default value is fps divided by 2." + }, + "max_disappeared": { + "label": "Maximum disappeared frames", + "description": "Number of frames without a detection before a tracked object is considered gone." + }, + "stationary": { + "label": "Stationary objects config", + "description": "Settings to detect and manage objects that remain stationary for a period of time.", + "interval": { + "label": "Stationary interval", + "description": "How often (in frames) to run a detection check to confirm a stationary object." + }, + "threshold": { + "label": "Stationary threshold", + "description": "Number of frames with no position change required to mark an object as stationary." + }, + "max_frames": { + "label": "Max frames", + "description": "Limits how long stationary objects are tracked before being discarded.", + "default": { + "label": "Default max frames", + "description": "Default maximum frames to track a stationary object before stopping." }, - "max_not_heard": { - "label": "Seconds of not hearing the type of audio to end the event." - }, - "min_volume": { - "label": "Min volume required to run audio detection." - }, - "listen": { - "label": "Audio to listen for." - }, - "filters": { - "label": "Audio filters." - }, - "enabled_in_config": { - "label": "Keep track of original state of audio detection." - }, - "num_threads": { - "label": "Number of detection threads" + "objects": { + "label": "Object max frames", + "description": "Per-object overrides for maximum frames to track stationary objects." } + }, + "classifier": { + "label": "Enable visual classifier", + "description": "Use a visual classifier to detect truly stationary objects even when bounding boxes jitter." } }, - "audio_transcription": { - "label": "Audio transcription config.", - "properties": { - "enabled": { - "label": "Enable audio transcription." - }, - "language": { - "label": "Language abbreviation to use for audio event transcription/translation." - }, - "device": { - "label": "The device used for license plate recognition." - }, - "model_size": { - "label": "The size of the embeddings model used." - }, - "enabled_in_config": { - "label": "Keep track of original state of camera." - }, - "live_enabled": { - "label": "Enable live transcriptions." - } + "annotation_offset": { + "label": "Annotation offset", + "description": "Milliseconds to shift detect annotations to better align timeline bounding boxes with recordings; can be positive or negative." + } + }, + "face_recognition": { + "label": "Face recognition", + "description": "Settings for face detection and recognition for this camera.", + "enabled": { + "label": "Enable face recognition", + "description": "Enable or disable face recognition." + }, + "min_area": { + "label": "Minimum face area", + "description": "Minimum area (pixels) of a detected face box required to attempt recognition." + } + }, + "ffmpeg": { + "label": "FFmpeg", + "description": "FFmpeg settings including binary path, args, hwaccel options, and per-role output args.", + "path": { + "label": "FFmpeg path", + "description": "Path to the FFmpeg binary to use or a version alias (\"5.0\" or \"7.0\")." + }, + "global_args": { + "label": "FFmpeg global arguments", + "description": "Global arguments passed to FFmpeg processes." + }, + "hwaccel_args": { + "label": "Hardware acceleration arguments", + "description": "Hardware acceleration arguments for FFmpeg. Provider-specific presets are recommended." + }, + "input_args": { + "label": "Input arguments", + "description": "Input arguments applied to FFmpeg input streams." + }, + "output_args": { + "label": "Output arguments", + "description": "Default output arguments used for different FFmpeg roles such as detect and record.", + "detect": { + "label": "Detect output arguments", + "description": "Default output arguments for detect role streams." + }, + "record": { + "label": "Record output arguments", + "description": "Default output arguments for record role streams." } }, - "birdseye": { - "label": "Birdseye camera configuration.", - "properties": { - "enabled": { - "label": "Enable birdseye view for camera." - }, - "mode": { - "label": "Tracking mode for camera." - }, - "order": { - "label": "Position of the camera in the birdseye view." - } + "retry_interval": { + "label": "FFmpeg retry time", + "description": "Seconds to wait before attempting to reconnect a camera stream after failure. Default is 10." + }, + "apple_compatibility": { + "label": "Apple compatibility", + "description": "Enable HEVC tagging for better Apple player compatibility when recording H.265." + }, + "gpu": { + "label": "GPU index", + "description": "Default GPU index used for hardware acceleration if available." + }, + "inputs": { + "label": "Camera inputs", + "description": "List of input stream definitions (paths and roles) for this camera.", + "path": { + "label": "Input path", + "description": "Camera input stream URL or path." + }, + "roles": { + "label": "Input roles", + "description": "Roles for this input stream." + }, + "global_args": { + "label": "FFmpeg global arguments", + "description": "FFmpeg global arguments for this input stream." + }, + "hwaccel_args": { + "label": "Hardware acceleration arguments", + "description": "Hardware acceleration arguments for this input stream." + }, + "input_args": { + "label": "Input arguments", + "description": "Input arguments specific to this stream." + } + } + }, + "live": { + "label": "Live playback", + "description": "Settings used by the Web UI to control live stream selection, resolution and quality.", + "streams": { + "label": "Live stream names", + "description": "Mapping of configured stream names to restream/go2rtc names used for live playback." + }, + "height": { + "label": "Live height", + "description": "Height (pixels) to render the jsmpeg live stream in the Web UI; must be <= detect stream height." + }, + "quality": { + "label": "Live quality", + "description": "Encoding quality for the jsmpeg stream (1 highest, 31 lowest)." + } + }, + "lpr": { + "label": "License Plate Recognition", + "description": "License plate recognition settings including detection thresholds, formatting, and known plates.", + "enabled": { + "label": "Enable LPR", + "description": "Enable or disable LPR on this camera." + }, + "expire_time": { + "label": "Expire seconds", + "description": "Time in seconds after which an unseen plate is expired from the tracker (for dedicated LPR cameras only)." + }, + "min_area": { + "label": "Minimum plate area", + "description": "Minimum plate area (pixels) required to attempt recognition." + }, + "enhancement": { + "label": "Enhancement level", + "description": "Enhancement level (0-10) to apply to plate crops prior to OCR; higher values may not always improve results, levels above 5 may only work with night time plates and should be used with caution." + } + }, + "motion": { + "label": "Motion detection", + "description": "Default motion detection settings for this camera.", + "enabled": { + "label": "Enable motion detection", + "description": "Enable or disable motion detection for this camera." + }, + "threshold": { + "label": "Motion threshold", + "description": "Pixel difference threshold used by the motion detector; higher values reduce sensitivity (range 1-255)." + }, + "lightning_threshold": { + "label": "Lightning threshold", + "description": "Threshold to detect and ignore brief lighting spikes (lower is more sensitive, values between 0.3 and 1.0). This does not prevent motion detection entirely; it merely causes the detector to stop analyzing additional frames once the threshold is exceeded. Motion-based recordings are still created during these events." + }, + "skip_motion_threshold": { + "label": "Skip motion threshold", + "description": "If more than this fraction of the image changes in a single frame, the detector will return no motion boxes and immediately recalibrate. This can save CPU and reduce false positives during lightning, storms, etc., but may miss real events such as a PTZ camera auto‑tracking an object. The trade‑off is between dropping a few megabytes of recordings versus reviewing a couple short clips. Range 0.0 to 1.0." + }, + "improve_contrast": { + "label": "Improve contrast", + "description": "Apply contrast improvement to frames before motion analysis to help detection." + }, + "contour_area": { + "label": "Contour area", + "description": "Minimum contour area in pixels required for a motion contour to be counted." + }, + "delta_alpha": { + "label": "Delta alpha", + "description": "Alpha blending factor used in frame differencing for motion calculation." + }, + "frame_alpha": { + "label": "Frame alpha", + "description": "Alpha value used when blending frames for motion preprocessing." + }, + "frame_height": { + "label": "Frame height", + "description": "Height in pixels to scale frames to when computing motion." + }, + "mask": { + "label": "Mask coordinates", + "description": "Ordered x,y coordinates defining the motion mask polygon used to include/exclude areas." + }, + "mqtt_off_delay": { + "label": "MQTT off delay", + "description": "Seconds to wait after last motion before publishing an MQTT 'off' state." + }, + "enabled_in_config": { + "label": "Original motion state", + "description": "Indicates whether motion detection was enabled in the original static configuration." + }, + "raw_mask": { + "label": "Raw Mask" + } + }, + "objects": { + "label": "Objects", + "description": "Object tracking defaults including which labels to track and per-object filters.", + "track": { + "label": "Objects to track", + "description": "List of object labels to track for this camera." + }, + "filters": { + "label": "Object filters", + "description": "Filters applied to detected objects to reduce false positives (area, ratio, confidence).", + "min_area": { + "label": "Minimum object area", + "description": "Minimum bounding box area (pixels or percentage) required for this object type. Can be pixels (int) or percentage (float between 0.000001 and 0.99)." + }, + "max_area": { + "label": "Maximum object area", + "description": "Maximum bounding box area (pixels or percentage) allowed for this object type. Can be pixels (int) or percentage (float between 0.000001 and 0.99)." + }, + "min_ratio": { + "label": "Minimum aspect ratio", + "description": "Minimum width/height ratio required for the bounding box to qualify." + }, + "max_ratio": { + "label": "Maximum aspect ratio", + "description": "Maximum width/height ratio allowed for the bounding box to qualify." + }, + "threshold": { + "label": "Confidence threshold", + "description": "Average detection confidence threshold required for the object to be considered a true positive." + }, + "min_score": { + "label": "Minimum confidence", + "description": "Minimum single-frame detection confidence required for the object to be counted." + }, + "mask": { + "label": "Filter mask", + "description": "Polygon coordinates defining where this filter applies within the frame." + }, + "raw_mask": { + "label": "Raw Mask" } }, - "detect": { - "label": "Object detection configuration.", - "properties": { - "enabled": { - "label": "Detection Enabled." - }, - "height": { - "label": "Height of the stream for the detect role." - }, - "width": { - "label": "Width of the stream for the detect role." - }, - "fps": { - "label": "Number of frames per second to process through detection." - }, - "min_initialized": { - "label": "Minimum number of consecutive hits for an object to be initialized by the tracker." - }, - "max_disappeared": { - "label": "Maximum number of frames the object can disappear before detection ends." - }, - "stationary": { - "label": "Stationary objects config.", - "properties": { - "interval": { - "label": "Frame interval for checking stationary objects." - }, - "threshold": { - "label": "Number of frames without a position change for an object to be considered stationary" - }, - "max_frames": { - "label": "Max frames for stationary objects.", - "properties": { - "default": { - "label": "Default max frames." - }, - "objects": { - "label": "Object specific max frames." - } - } - }, - "classifier": { - "label": "Enable visual classifier for determing if objects with jittery bounding boxes are stationary." - } - } - }, - "annotation_offset": { - "label": "Milliseconds to offset detect annotations by." - } - } + "mask": { + "label": "Object mask", + "description": "Mask polygon used to prevent object detection in specified areas." }, - "face_recognition": { - "label": "Face recognition config.", - "properties": { - "enabled": { - "label": "Enable face recognition." - }, - "min_area": { - "label": "Min area of face box to consider running face recognition." - } - } + "raw_mask": { + "label": "Raw Mask" }, - "ffmpeg": { - "label": "FFmpeg configuration for the camera.", - "properties": { - "path": { - "label": "FFmpeg path" + "genai": { + "label": "GenAI object config", + "description": "GenAI options for describing tracked objects and sending frames for generation.", + "enabled": { + "label": "Enable GenAI", + "description": "Enable GenAI generation of descriptions for tracked objects by default." + }, + "use_snapshot": { + "label": "Use snapshots", + "description": "Use object snapshots instead of thumbnails for GenAI description generation." + }, + "prompt": { + "label": "Caption prompt", + "description": "Default prompt template used when generating descriptions with GenAI." + }, + "object_prompts": { + "label": "Object prompts", + "description": "Per-object prompts to customize GenAI outputs for specific labels." + }, + "objects": { + "label": "GenAI objects", + "description": "List of object labels to send to GenAI by default." + }, + "required_zones": { + "label": "Required zones", + "description": "Zones that must be entered for objects to qualify for GenAI description generation." + }, + "debug_save_thumbnails": { + "label": "Save thumbnails", + "description": "Save thumbnails sent to GenAI for debugging and review." + }, + "send_triggers": { + "label": "GenAI triggers", + "description": "Defines when frames should be sent to GenAI (on end, after updates, etc.).", + "tracked_object_end": { + "label": "Send on end", + "description": "Send a request to GenAI when the tracked object ends." }, - "global_args": { - "label": "Global FFmpeg arguments." - }, - "hwaccel_args": { - "label": "FFmpeg hardware acceleration arguments." - }, - "input_args": { - "label": "FFmpeg input arguments." - }, - "output_args": { - "label": "FFmpeg output arguments per role.", - "properties": { - "detect": { - "label": "Detect role FFmpeg output arguments." - }, - "record": { - "label": "Record role FFmpeg output arguments." - } - } - }, - "retry_interval": { - "label": "Time in seconds to wait before FFmpeg retries connecting to the camera." - }, - "apple_compatibility": { - "label": "Set tag on HEVC (H.265) recording stream to improve compatibility with Apple players." - }, - "inputs": { - "label": "Camera inputs." + "after_significant_updates": { + "label": "Early GenAI trigger", + "description": "Send a request to GenAI after a specified number of significant updates for the tracked object." } + }, + "enabled_in_config": { + "label": "Original GenAI state", + "description": "Indicates whether GenAI was enabled in the original static config." } + } + }, + "record": { + "label": "Recording", + "description": "Recording and retention settings for this camera.", + "enabled": { + "label": "Enable recording", + "description": "Enable or disable recording for this camera." }, - "live": { - "label": "Live playback settings.", - "properties": { - "streams": { - "label": "Friendly names and restream names to use for live view." - }, - "height": { - "label": "Live camera view height" - }, - "quality": { - "label": "Live camera view quality" - } - } + "expire_interval": { + "label": "Record cleanup interval", + "description": "Minutes between cleanup passes that remove expired recording segments." }, - "lpr": { - "label": "LPR config.", - "properties": { - "enabled": { - "label": "Enable license plate recognition." - }, - "expire_time": { - "label": "Expire plates not seen after number of seconds (for dedicated LPR cameras only)." - }, - "min_area": { - "label": "Minimum area of license plate to begin running recognition." - }, - "enhancement": { - "label": "Amount of contrast adjustment and denoising to apply to license plate images before recognition." - } + "continuous": { + "label": "Continuous retention", + "description": "Number of days to retain recordings regardless of tracked objects or motion. Set to 0 if you only want to retain recordings of alerts and detections.", + "days": { + "label": "Retention days", + "description": "Days to retain recordings." } }, "motion": { - "label": "Motion detection configuration.", - "properties": { - "enabled": { - "label": "Enable motion on all cameras." + "label": "Motion retention", + "description": "Number of days to retain recordings triggered by motion regardless of tracked objects. Set to 0 if you only want to retain recordings of alerts and detections.", + "days": { + "label": "Retention days", + "description": "Days to retain recordings." + } + }, + "detections": { + "label": "Detection retention", + "description": "Recording retention settings for detection events including pre/post capture durations.", + "pre_capture": { + "label": "Pre-capture seconds", + "description": "Number of seconds before the detection event to include in the recording." + }, + "post_capture": { + "label": "Post-capture seconds", + "description": "Number of seconds after the detection event to include in the recording." + }, + "retain": { + "label": "Event retention", + "description": "Retention settings for recordings of detection events.", + "days": { + "label": "Retention days", + "description": "Number of days to retain recordings of detection events." }, - "threshold": { - "label": "Motion detection threshold (1-255)." - }, - "lightning_threshold": { - "label": "Lightning detection threshold (0.3-1.0)." - }, - "improve_contrast": { - "label": "Improve Contrast" - }, - "contour_area": { - "label": "Contour Area" - }, - "delta_alpha": { - "label": "Delta Alpha" - }, - "frame_alpha": { - "label": "Frame Alpha" - }, - "frame_height": { - "label": "Frame Height" - }, - "mask": { - "label": "Coordinates polygon for the motion mask." - }, - "mqtt_off_delay": { - "label": "Delay for updating MQTT with no motion detected." - }, - "enabled_in_config": { - "label": "Keep track of original state of motion detection." + "mode": { + "label": "Retention mode", + "description": "Mode for retention: all (save all segments), motion (save segments with motion), or active_objects (save segments with active objects)." } } }, - "objects": { - "label": "Object configuration.", - "properties": { - "track": { - "label": "Objects to track." + "alerts": { + "label": "Alert retention", + "description": "Recording retention settings for alert events including pre/post capture durations.", + "pre_capture": { + "label": "Pre-capture seconds", + "description": "Number of seconds before the detection event to include in the recording." + }, + "post_capture": { + "label": "Post-capture seconds", + "description": "Number of seconds after the detection event to include in the recording." + }, + "retain": { + "label": "Event retention", + "description": "Retention settings for recordings of detection events.", + "days": { + "label": "Retention days", + "description": "Number of days to retain recordings of detection events." }, - "filters": { - "label": "Object filters.", - "properties": { - "min_area": { - "label": "Minimum area of bounding box for object to be counted. Can be pixels (int) or percentage (float between 0.000001 and 0.99)." - }, - "max_area": { - "label": "Maximum area of bounding box for object to be counted. Can be pixels (int) or percentage (float between 0.000001 and 0.99)." - }, - "min_ratio": { - "label": "Minimum ratio of bounding box's width/height for object to be counted." - }, - "max_ratio": { - "label": "Maximum ratio of bounding box's width/height for object to be counted." - }, - "threshold": { - "label": "Average detection confidence threshold for object to be counted." - }, - "min_score": { - "label": "Minimum detection confidence for object to be counted." - }, - "mask": { - "label": "Detection area polygon mask for this filter configuration." - } - } - }, - "mask": { - "label": "Object mask." - }, - "genai": { - "label": "Config for using genai to analyze objects.", - "properties": { - "enabled": { - "label": "Enable GenAI for camera." - }, - "use_snapshot": { - "label": "Use snapshots for generating descriptions." - }, - "prompt": { - "label": "Default caption prompt." - }, - "object_prompts": { - "label": "Object specific prompts." - }, - "objects": { - "label": "List of objects to run generative AI for." - }, - "required_zones": { - "label": "List of required zones to be entered in order to run generative AI." - }, - "debug_save_thumbnails": { - "label": "Save thumbnails sent to generative AI for debugging purposes." - }, - "send_triggers": { - "label": "What triggers to use to send frames to generative AI for a tracked object.", - "properties": { - "tracked_object_end": { - "label": "Send once the object is no longer tracked." - }, - "after_significant_updates": { - "label": "Send an early request to generative AI when X frames accumulated." - } - } - }, - "enabled_in_config": { - "label": "Keep track of original state of generative AI." - } - } + "mode": { + "label": "Retention mode", + "description": "Mode for retention: all (save all segments), motion (save segments with motion), or active_objects (save segments with active objects)." } } }, - "record": { - "label": "Record configuration.", - "properties": { - "enabled": { - "label": "Enable record on all cameras." - }, - "sync_recordings": { - "label": "Sync recordings with disk on startup and once a day." - }, - "expire_interval": { - "label": "Number of minutes to wait between cleanup runs." - }, - "continuous": { - "label": "Continuous recording retention settings.", - "properties": { - "days": { - "label": "Default retention period." - } - } - }, - "motion": { - "label": "Motion recording retention settings.", - "properties": { - "days": { - "label": "Default retention period." - } - } - }, - "detections": { - "label": "Detection specific retention settings.", - "properties": { - "pre_capture": { - "label": "Seconds to retain before event starts." - }, - "post_capture": { - "label": "Seconds to retain after event ends." - }, - "retain": { - "label": "Event retention settings.", - "properties": { - "days": { - "label": "Default retention period." - }, - "mode": { - "label": "Retain mode." - } - } - } - } - }, - "alerts": { - "label": "Alert specific retention settings.", - "properties": { - "pre_capture": { - "label": "Seconds to retain before event starts." - }, - "post_capture": { - "label": "Seconds to retain after event ends." - }, - "retain": { - "label": "Event retention settings.", - "properties": { - "days": { - "label": "Default retention period." - }, - "mode": { - "label": "Retain mode." - } - } - } - } - }, - "export": { - "label": "Recording Export Config", - "properties": { - "timelapse_args": { - "label": "Timelapse Args" - } - } - }, - "preview": { - "label": "Recording Preview Config", - "properties": { - "quality": { - "label": "Quality of recording preview." - } - } - }, - "enabled_in_config": { - "label": "Keep track of original state of recording." - } + "export": { + "label": "Export config", + "description": "Settings used when exporting recordings such as timelapse and hardware acceleration.", + "hwaccel_args": { + "label": "Export hwaccel args", + "description": "Hardware acceleration args to use for export/transcode operations." } }, - "review": { - "label": "Review configuration.", - "properties": { - "alerts": { - "label": "Review alerts config.", - "properties": { - "enabled": { - "label": "Enable alerts." - }, - "labels": { - "label": "Labels to create alerts for." - }, - "required_zones": { - "label": "List of required zones to be entered in order to save the event as an alert." - }, - "enabled_in_config": { - "label": "Keep track of original state of alerts." - }, - "cutoff_time": { - "label": "Time to cutoff alerts after no alert-causing activity has occurred." - } - } - }, - "detections": { - "label": "Review detections config.", - "properties": { - "enabled": { - "label": "Enable detections." - }, - "labels": { - "label": "Labels to create detections for." - }, - "required_zones": { - "label": "List of required zones to be entered in order to save the event as a detection." - }, - "cutoff_time": { - "label": "Time to cutoff detection after no detection-causing activity has occurred." - }, - "enabled_in_config": { - "label": "Keep track of original state of detections." - } - } - }, - "genai": { - "label": "Review description genai config.", - "properties": { - "enabled": { - "label": "Enable GenAI descriptions for review items." - }, - "alerts": { - "label": "Enable GenAI for alerts." - }, - "detections": { - "label": "Enable GenAI for detections." - }, - "additional_concerns": { - "label": "Additional concerns that GenAI should make note of on this camera." - }, - "debug_save_thumbnails": { - "label": "Save thumbnails sent to generative AI for debugging purposes." - }, - "enabled_in_config": { - "label": "Keep track of original state of generative AI." - }, - "preferred_language": { - "label": "Preferred language for GenAI Response" - }, - "activity_context_prompt": { - "label": "Custom activity context prompt defining normal activity patterns for this property." - } - } - } - } - }, - "semantic_search": { - "label": "Semantic search configuration.", - "properties": { - "triggers": { - "label": "Trigger actions on tracked objects that match existing thumbnails or descriptions", - "properties": { - "enabled": { - "label": "Enable this trigger" - }, - "type": { - "label": "Type of trigger" - }, - "data": { - "label": "Trigger content (text phrase or image ID)" - }, - "threshold": { - "label": "Confidence score required to run the trigger" - }, - "actions": { - "label": "Actions to perform when trigger is matched" - } - } - } - } - }, - "snapshots": { - "label": "Snapshot configuration.", - "properties": { - "enabled": { - "label": "Snapshots enabled." - }, - "clean_copy": { - "label": "Create a clean copy of the snapshot image." - }, - "timestamp": { - "label": "Add a timestamp overlay on the snapshot." - }, - "bounding_box": { - "label": "Add a bounding box overlay on the snapshot." - }, - "crop": { - "label": "Crop the snapshot to the detected object." - }, - "required_zones": { - "label": "List of required zones to be entered in order to save a snapshot." - }, - "height": { - "label": "Snapshot image height." - }, - "retain": { - "label": "Snapshot retention.", - "properties": { - "default": { - "label": "Default retention period." - }, - "mode": { - "label": "Retain mode." - }, - "objects": { - "label": "Object retention period." - } - } - }, - "quality": { - "label": "Quality of the encoded jpeg (0-100)." - } - } - }, - "timestamp_style": { - "label": "Timestamp style configuration.", - "properties": { - "position": { - "label": "Timestamp position." - }, - "format": { - "label": "Timestamp format." - }, - "color": { - "label": "Timestamp color.", - "properties": { - "red": { - "label": "Red" - }, - "green": { - "label": "Green" - }, - "blue": { - "label": "Blue" - } - } - }, - "thickness": { - "label": "Timestamp thickness." - }, - "effect": { - "label": "Timestamp effect." - } - } - }, - "best_image_timeout": { - "label": "How long to wait for the image with the highest confidence score." - }, - "mqtt": { - "label": "MQTT configuration.", - "properties": { - "enabled": { - "label": "Send image over MQTT." - }, - "timestamp": { - "label": "Add timestamp to MQTT image." - }, - "bounding_box": { - "label": "Add bounding box to MQTT image." - }, - "crop": { - "label": "Crop MQTT image to detected object." - }, - "height": { - "label": "MQTT image height." - }, - "required_zones": { - "label": "List of required zones to be entered in order to send the image." - }, - "quality": { - "label": "Quality of the encoded jpeg (0-100)." - } - } - }, - "notifications": { - "label": "Notifications configuration.", - "properties": { - "enabled": { - "label": "Enable notifications" - }, - "email": { - "label": "Email required for push." - }, - "cooldown": { - "label": "Cooldown period for notifications (time in seconds)." - }, - "enabled_in_config": { - "label": "Keep track of original state of notifications." - } - } - }, - "onvif": { - "label": "Camera Onvif Configuration.", - "properties": { - "host": { - "label": "Onvif Host" - }, - "port": { - "label": "Onvif Port" - }, - "user": { - "label": "Onvif Username" - }, - "password": { - "label": "Onvif Password" - }, - "tls_insecure": { - "label": "Onvif Disable TLS verification" - }, - "autotracking": { - "label": "PTZ auto tracking config.", - "properties": { - "enabled": { - "label": "Enable PTZ object autotracking." - }, - "calibrate_on_startup": { - "label": "Perform a camera calibration when Frigate starts." - }, - "zooming": { - "label": "Autotracker zooming mode." - }, - "zoom_factor": { - "label": "Zooming factor (0.1-0.75)." - }, - "track": { - "label": "Objects to track." - }, - "required_zones": { - "label": "List of required zones to be entered in order to begin autotracking." - }, - "return_preset": { - "label": "Name of camera preset to return to when object tracking is over." - }, - "timeout": { - "label": "Seconds to delay before returning to preset." - }, - "movement_weights": { - "label": "Internal value used for PTZ movements based on the speed of your camera's motor." - }, - "enabled_in_config": { - "label": "Keep track of original state of autotracking." - } - } - }, - "ignore_time_mismatch": { - "label": "Onvif Ignore Time Synchronization Mismatch Between Camera and Server" - } - } - }, - "type": { - "label": "Camera Type" - }, - "ui": { - "label": "Camera UI Modifications.", - "properties": { - "order": { - "label": "Order of camera in UI." - }, - "dashboard": { - "label": "Show this camera in Frigate dashboard UI." - } - } - }, - "webui_url": { - "label": "URL to visit the camera directly from system page" - }, - "zones": { - "label": "Zone configuration.", - "properties": { - "filters": { - "label": "Zone filters.", - "properties": { - "min_area": { - "label": "Minimum area of bounding box for object to be counted. Can be pixels (int) or percentage (float between 0.000001 and 0.99)." - }, - "max_area": { - "label": "Maximum area of bounding box for object to be counted. Can be pixels (int) or percentage (float between 0.000001 and 0.99)." - }, - "min_ratio": { - "label": "Minimum ratio of bounding box's width/height for object to be counted." - }, - "max_ratio": { - "label": "Maximum ratio of bounding box's width/height for object to be counted." - }, - "threshold": { - "label": "Average detection confidence threshold for object to be counted." - }, - "min_score": { - "label": "Minimum detection confidence for object to be counted." - }, - "mask": { - "label": "Detection area polygon mask for this filter configuration." - } - } - }, - "coordinates": { - "label": "Coordinates polygon for the defined zone." - }, - "distances": { - "label": "Real-world distances for the sides of quadrilateral for the defined zone." - }, - "inertia": { - "label": "Number of consecutive frames required for object to be considered present in the zone." - }, - "loitering_time": { - "label": "Number of seconds that an object must loiter to be considered in the zone." - }, - "speed_threshold": { - "label": "Minimum speed value for an object to be considered in the zone." - }, - "objects": { - "label": "List of objects that can trigger the zone." - } + "preview": { + "label": "Preview config", + "description": "Settings controlling the quality of recording previews shown in the UI.", + "quality": { + "label": "Preview quality", + "description": "Preview quality level (very_low, low, medium, high, very_high)." } }, "enabled_in_config": { - "label": "Keep track of original state of camera." + "label": "Original recording state", + "description": "Indicates whether recording was enabled in the original static configuration." } + }, + "review": { + "label": "Review", + "description": "Settings that control alerts, detections, and GenAI review summaries used by the UI and storage for this camera.", + "alerts": { + "label": "Alerts config", + "description": "Settings for which tracked objects generate alerts and how alerts are retained.", + "enabled": { + "label": "Enable alerts", + "description": "Enable or disable alert generation for this camera." + }, + "labels": { + "label": "Alert labels", + "description": "List of object labels that qualify as alerts (for example: car, person)." + }, + "required_zones": { + "label": "Required zones", + "description": "Zones that an object must enter to be considered an alert; leave empty to allow any zone." + }, + "enabled_in_config": { + "label": "Original alerts state", + "description": "Tracks whether alerts were originally enabled in the static configuration." + }, + "cutoff_time": { + "label": "Alerts cutoff time", + "description": "Seconds to wait after no alert-causing activity before cutting off an alert." + } + }, + "detections": { + "label": "Detections config", + "description": "Settings for creating detection events (non-alert) and how long to keep them.", + "enabled": { + "label": "Enable detections", + "description": "Enable or disable detection events for this camera." + }, + "labels": { + "label": "Detection labels", + "description": "List of object labels that qualify as detection events." + }, + "required_zones": { + "label": "Required zones", + "description": "Zones that an object must enter to be considered a detection; leave empty to allow any zone." + }, + "cutoff_time": { + "label": "Detections cutoff time", + "description": "Seconds to wait after no detection-causing activity before cutting off a detection." + }, + "enabled_in_config": { + "label": "Original detections state", + "description": "Tracks whether detections were originally enabled in the static configuration." + } + }, + "genai": { + "label": "GenAI config", + "description": "Controls use of generative AI for producing descriptions and summaries of review items.", + "enabled": { + "label": "Enable GenAI descriptions", + "description": "Enable or disable GenAI-generated descriptions and summaries for review items." + }, + "alerts": { + "label": "Enable GenAI for alerts", + "description": "Use GenAI to generate descriptions for alert items." + }, + "detections": { + "label": "Enable GenAI for detections", + "description": "Use GenAI to generate descriptions for detection items." + }, + "image_source": { + "label": "Review image source", + "description": "Source of images sent to GenAI ('preview' or 'recordings'); 'recordings' uses higher quality frames but more tokens." + }, + "additional_concerns": { + "label": "Additional concerns", + "description": "A list of additional concerns or notes the GenAI should consider when evaluating activity on this camera." + }, + "debug_save_thumbnails": { + "label": "Save thumbnails", + "description": "Save thumbnails that are sent to the GenAI provider for debugging and review." + }, + "enabled_in_config": { + "label": "Original GenAI state", + "description": "Tracks whether GenAI review was originally enabled in the static configuration." + }, + "preferred_language": { + "label": "Preferred language", + "description": "Preferred language to request from the GenAI provider for generated responses." + }, + "activity_context_prompt": { + "label": "Activity context prompt", + "description": "Custom prompt describing what is and is not suspicious activity to provide context for GenAI summaries." + } + } + }, + "semantic_search": { + "label": "Semantic Search", + "description": "Settings for semantic search which builds and queries object embeddings to find similar items.", + "triggers": { + "label": "Triggers", + "description": "Actions and matching criteria for camera-specific semantic search triggers.", + "friendly_name": { + "label": "Friendly name", + "description": "Optional friendly name displayed in the UI for this trigger." + }, + "enabled": { + "label": "Enable this trigger", + "description": "Enable or disable this semantic search trigger." + }, + "type": { + "label": "Trigger type", + "description": "Type of trigger: 'thumbnail' (match against image) or 'description' (match against text)." + }, + "data": { + "label": "Trigger content", + "description": "Text phrase or thumbnail ID to match against tracked objects." + }, + "threshold": { + "label": "Trigger threshold", + "description": "Minimum similarity score (0-1) required to activate this trigger." + }, + "actions": { + "label": "Trigger actions", + "description": "List of actions to execute when trigger matches (notification, sub_label, attribute)." + } + } + }, + "snapshots": { + "label": "Snapshots", + "description": "Settings for saved JPEG snapshots of tracked objects for this camera.", + "enabled": { + "label": "Snapshots enabled", + "description": "Enable or disable saving snapshots for this camera." + }, + "clean_copy": { + "label": "Save clean copy", + "description": "Save an unannotated clean copy of snapshots in addition to annotated ones." + }, + "timestamp": { + "label": "Timestamp overlay", + "description": "Overlay a timestamp on saved snapshots." + }, + "bounding_box": { + "label": "Bounding box overlay", + "description": "Draw bounding boxes for tracked objects on saved snapshots." + }, + "crop": { + "label": "Crop snapshot", + "description": "Crop saved snapshots to the detected object's bounding box." + }, + "required_zones": { + "label": "Required zones", + "description": "Zones an object must enter for a snapshot to be saved." + }, + "height": { + "label": "Snapshot height", + "description": "Height (pixels) to resize saved snapshots to; leave empty to preserve original size." + }, + "retain": { + "label": "Snapshot retention", + "description": "Retention settings for saved snapshots including default days and per-object overrides.", + "default": { + "label": "Default retention", + "description": "Default number of days to retain snapshots." + }, + "mode": { + "label": "Retention mode", + "description": "Mode for retention: all (save all segments), motion (save segments with motion), or active_objects (save segments with active objects)." + }, + "objects": { + "label": "Object retention", + "description": "Per-object overrides for snapshot retention days." + } + }, + "quality": { + "label": "JPEG quality", + "description": "JPEG encode quality for saved snapshots (0-100)." + } + }, + "timestamp_style": { + "label": "Timestamp style", + "description": "Styling options for in-feed timestamps applied to recordings and snapshots.", + "position": { + "label": "Timestamp position", + "description": "Position of the timestamp on the image (tl/tr/bl/br)." + }, + "format": { + "label": "Timestamp format", + "description": "Datetime format string used for timestamps (Python datetime format codes)." + }, + "color": { + "label": "Timestamp color", + "description": "RGB color values for the timestamp text (all values 0-255).", + "red": { + "label": "Red", + "description": "Red component (0-255) for timestamp color." + }, + "green": { + "label": "Green", + "description": "Green component (0-255) for timestamp color." + }, + "blue": { + "label": "Blue", + "description": "Blue component (0-255) for timestamp color." + } + }, + "thickness": { + "label": "Timestamp thickness", + "description": "Line thickness of the timestamp text." + }, + "effect": { + "label": "Timestamp effect", + "description": "Visual effect for the timestamp text (none, solid, shadow)." + } + }, + "best_image_timeout": { + "label": "Best image timeout", + "description": "How long to wait for the image with the highest confidence score." + }, + "mqtt": { + "label": "MQTT", + "description": "MQTT image publishing settings.", + "enabled": { + "label": "Send image", + "description": "Enable publishing image snapshots for objects to MQTT topics for this camera." + }, + "timestamp": { + "label": "Add timestamp", + "description": "Overlay a timestamp on images published to MQTT." + }, + "bounding_box": { + "label": "Add bounding box", + "description": "Draw bounding boxes on images published over MQTT." + }, + "crop": { + "label": "Crop image", + "description": "Crop images published to MQTT to the detected object's bounding box." + }, + "height": { + "label": "Image height", + "description": "Height (pixels) to resize images published over MQTT." + }, + "required_zones": { + "label": "Required zones", + "description": "Zones that an object must enter for an MQTT image to be published." + }, + "quality": { + "label": "JPEG quality", + "description": "JPEG quality for images published to MQTT (0-100)." + } + }, + "notifications": { + "label": "Notifications", + "description": "Settings to enable and control notifications for this camera.", + "enabled": { + "label": "Enable notifications", + "description": "Enable or disable notifications for this camera." + }, + "email": { + "label": "Notification email", + "description": "Email address used for push notifications or required by certain notification providers." + }, + "cooldown": { + "label": "Cooldown period", + "description": "Cooldown (seconds) between notifications to avoid spamming recipients." + }, + "enabled_in_config": { + "label": "Original notifications state", + "description": "Indicates whether notifications were enabled in the original static configuration." + } + }, + "onvif": { + "label": "ONVIF", + "description": "ONVIF connection and PTZ autotracking settings for this camera.", + "host": { + "label": "ONVIF host", + "description": "Host (and optional scheme) for the ONVIF service for this camera." + }, + "port": { + "label": "ONVIF port", + "description": "Port number for the ONVIF service." + }, + "user": { + "label": "ONVIF username", + "description": "Username for ONVIF authentication; some devices require admin user for ONVIF." + }, + "password": { + "label": "ONVIF password", + "description": "Password for ONVIF authentication." + }, + "tls_insecure": { + "label": "Disable TLS verify", + "description": "Skip TLS verification and disable digest auth for ONVIF (unsafe; use in safe networks only)." + }, + "autotracking": { + "label": "Autotracking", + "description": "Automatically track moving objects and keep them centered in the frame using PTZ camera movements.", + "enabled": { + "label": "Enable Autotracking", + "description": "Enable or disable automatic PTZ camera tracking of detected objects." + }, + "calibrate_on_startup": { + "label": "Calibrate on start", + "description": "Measure PTZ motor speeds on startup to improve tracking accuracy. Frigate will update config with movement_weights after calibration." + }, + "zooming": { + "label": "Zoom mode", + "description": "Control zoom behavior: disabled (pan/tilt only), absolute (most compatible), or relative (concurrent pan/tilt/zoom)." + }, + "zoom_factor": { + "label": "Zoom factor", + "description": "Control zoom level on tracked objects. Lower values keep more scene in view; higher values zoom in closer but may lose tracking. Values between 0.1 and 0.75." + }, + "track": { + "label": "Tracked objects", + "description": "List of object types that should trigger autotracking." + }, + "required_zones": { + "label": "Required zones", + "description": "Objects must enter one of these zones before autotracking begins." + }, + "return_preset": { + "label": "Return preset", + "description": "ONVIF preset name configured in camera firmware to return to after tracking ends." + }, + "timeout": { + "label": "Return timeout", + "description": "Wait this many seconds after losing tracking before returning camera to preset position." + }, + "movement_weights": { + "label": "Movement weights", + "description": "Calibration values automatically generated by camera calibration. Do not modify manually." + }, + "enabled_in_config": { + "label": "Original autotrack state", + "description": "Internal field to track whether autotracking was enabled in configuration." + } + }, + "ignore_time_mismatch": { + "label": "Ignore time mismatch", + "description": "Ignore time synchronization differences between camera and Frigate server for ONVIF communication." + } + }, + "type": { + "label": "Camera type", + "description": "Camera Type" + }, + "ui": { + "label": "Camera UI", + "description": "Display ordering and visibility for this camera in the UI. Ordering affects the default dashboard. For more granular control, use camera groups.", + "order": { + "label": "UI order", + "description": "Numeric order used to sort the camera in the UI (default dashboard and lists); larger numbers appear later." + }, + "dashboard": { + "label": "Show in UI", + "description": "Toggle whether this camera is visible everywhere in the Frigate UI. Disabling this will require manually editing the config to view this camera in the UI again." + } + }, + "webui_url": { + "label": "Camera URL", + "description": "URL to visit the camera directly from system page" + }, + "zones": { + "label": "Zones", + "description": "Zones allow you to define a specific area of the frame so you can determine whether or not an object is within a particular area.", + "friendly_name": { + "label": "Zone name", + "description": "A user-friendly name for the zone, displayed in the Frigate UI. If not set, a formatted version of the zone name will be used." + }, + "enabled": { + "label": "Enabled", + "description": "Enable or disable this zone. Disabled zones are ignored at runtime." + }, + "enabled_in_config": { + "label": "Keep track of original state of zone." + }, + "filters": { + "label": "Zone filters", + "description": "Filters to apply to objects within this zone. Used to reduce false positives or restrict which objects are considered present in the zone.", + "min_area": { + "label": "Minimum object area", + "description": "Minimum bounding box area (pixels or percentage) required for this object type. Can be pixels (int) or percentage (float between 0.000001 and 0.99)." + }, + "max_area": { + "label": "Maximum object area", + "description": "Maximum bounding box area (pixels or percentage) allowed for this object type. Can be pixels (int) or percentage (float between 0.000001 and 0.99)." + }, + "min_ratio": { + "label": "Minimum aspect ratio", + "description": "Minimum width/height ratio required for the bounding box to qualify." + }, + "max_ratio": { + "label": "Maximum aspect ratio", + "description": "Maximum width/height ratio allowed for the bounding box to qualify." + }, + "threshold": { + "label": "Confidence threshold", + "description": "Average detection confidence threshold required for the object to be considered a true positive." + }, + "min_score": { + "label": "Minimum confidence", + "description": "Minimum single-frame detection confidence required for the object to be counted." + }, + "mask": { + "label": "Filter mask", + "description": "Polygon coordinates defining where this filter applies within the frame." + }, + "raw_mask": { + "label": "Raw Mask" + } + }, + "coordinates": { + "label": "Coordinates", + "description": "Polygon coordinates that define the zone area. Can be a comma-separated string or a list of coordinate strings. Coordinates should be relative (0-1) or absolute (legacy)." + }, + "distances": { + "label": "Real-world distances", + "description": "Optional real-world distances for each side of the zone quadrilateral, used for speed or distance calculations. Must have exactly 4 values if set." + }, + "inertia": { + "label": "Inertia frames", + "description": "Number of consecutive frames an object must be detected in the zone before it is considered present. Helps filter out transient detections." + }, + "loitering_time": { + "label": "Loitering seconds", + "description": "Number of seconds an object must remain in the zone to be considered as loitering. Set to 0 to disable loitering detection." + }, + "speed_threshold": { + "label": "Minimum speed", + "description": "Minimum speed (in real-world units if distances are set) required for an object to be considered present in the zone. Used for speed-based zone triggers." + }, + "objects": { + "label": "Trigger objects", + "description": "List of object types (from labelmap) that can trigger this zone. Can be a string or a list of strings. If empty, all objects are considered." + } + }, + "enabled_in_config": { + "label": "Original camera state", + "description": "Keep track of original state of camera." } -} \ No newline at end of file +} diff --git a/web/public/locales/en/config/classification.json b/web/public/locales/en/config/classification.json deleted file mode 100644 index e8014b2fa..000000000 --- a/web/public/locales/en/config/classification.json +++ /dev/null @@ -1,58 +0,0 @@ -{ - "label": "Object classification config.", - "properties": { - "bird": { - "label": "Bird classification config.", - "properties": { - "enabled": { - "label": "Enable bird classification." - }, - "threshold": { - "label": "Minimum classification score required to be considered a match." - } - } - }, - "custom": { - "label": "Custom Classification Model Configs.", - "properties": { - "enabled": { - "label": "Enable running the model." - }, - "name": { - "label": "Name of classification model." - }, - "threshold": { - "label": "Classification score threshold to change the state." - }, - "object_config": { - "properties": { - "objects": { - "label": "Object types to classify." - }, - "classification_type": { - "label": "Type of classification that is applied." - } - } - }, - "state_config": { - "properties": { - "cameras": { - "label": "Cameras to run classification on.", - "properties": { - "crop": { - "label": "Crop of image frame on this camera to run classification on." - } - } - }, - "motion": { - "label": "If classification should be run when motion is detected in the crop." - }, - "interval": { - "label": "Interval to run classification on in seconds." - } - } - } - } - } - } -} \ No newline at end of file diff --git a/web/public/locales/en/config/database.json b/web/public/locales/en/config/database.json deleted file mode 100644 index ece7ccbaa..000000000 --- a/web/public/locales/en/config/database.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "label": "Database configuration.", - "properties": { - "path": { - "label": "Database path." - } - } -} \ No newline at end of file diff --git a/web/public/locales/en/config/detect.json b/web/public/locales/en/config/detect.json deleted file mode 100644 index 9e1b59313..000000000 --- a/web/public/locales/en/config/detect.json +++ /dev/null @@ -1,51 +0,0 @@ -{ - "label": "Global object tracking configuration.", - "properties": { - "enabled": { - "label": "Detection Enabled." - }, - "height": { - "label": "Height of the stream for the detect role." - }, - "width": { - "label": "Width of the stream for the detect role." - }, - "fps": { - "label": "Number of frames per second to process through detection." - }, - "min_initialized": { - "label": "Minimum number of consecutive hits for an object to be initialized by the tracker." - }, - "max_disappeared": { - "label": "Maximum number of frames the object can disappear before detection ends." - }, - "stationary": { - "label": "Stationary objects config.", - "properties": { - "interval": { - "label": "Frame interval for checking stationary objects." - }, - "threshold": { - "label": "Number of frames without a position change for an object to be considered stationary" - }, - "max_frames": { - "label": "Max frames for stationary objects.", - "properties": { - "default": { - "label": "Default max frames." - }, - "objects": { - "label": "Object specific max frames." - } - } - }, - "classifier": { - "label": "Enable visual classifier for determing if objects with jittery bounding boxes are stationary." - } - } - }, - "annotation_offset": { - "label": "Milliseconds to offset detect annotations by." - } - } -} \ No newline at end of file diff --git a/web/public/locales/en/config/detectors.json b/web/public/locales/en/config/detectors.json deleted file mode 100644 index 1bd6fec70..000000000 --- a/web/public/locales/en/config/detectors.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "label": "Detector hardware configuration.", - "properties": { - "type": { - "label": "Detector Type" - }, - "model": { - "label": "Detector specific model configuration." - }, - "model_path": { - "label": "Detector specific model path." - } - } -} \ No newline at end of file diff --git a/web/public/locales/en/config/environment_vars.json b/web/public/locales/en/config/environment_vars.json deleted file mode 100644 index ce97ce49e..000000000 --- a/web/public/locales/en/config/environment_vars.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "label": "Frigate environment variables." -} \ No newline at end of file diff --git a/web/public/locales/en/config/face_recognition.json b/web/public/locales/en/config/face_recognition.json deleted file mode 100644 index 705d75468..000000000 --- a/web/public/locales/en/config/face_recognition.json +++ /dev/null @@ -1,36 +0,0 @@ -{ - "label": "Face recognition config.", - "properties": { - "enabled": { - "label": "Enable face recognition." - }, - "model_size": { - "label": "The size of the embeddings model used." - }, - "unknown_score": { - "label": "Minimum face distance score required to be marked as a potential match." - }, - "detection_threshold": { - "label": "Minimum face detection score required to be considered a face." - }, - "recognition_threshold": { - "label": "Minimum face distance score required to be considered a match." - }, - "min_area": { - "label": "Min area of face box to consider running face recognition." - }, - "min_faces": { - "label": "Min face recognitions for the sub label to be applied to the person object." - }, - "save_attempts": { - "label": "Number of face attempts to save in the recent recognitions tab." - }, - "blur_confidence_filter": { - "label": "Apply blur quality filter to face confidence." - }, - "device": { - "label": "The device key to use for face recognition.", - "description": "This is an override, to target a specific device. See https://onnxruntime.ai/docs/execution-providers/ for more information" - } - } -} \ No newline at end of file diff --git a/web/public/locales/en/config/ffmpeg.json b/web/public/locales/en/config/ffmpeg.json deleted file mode 100644 index 570da5a35..000000000 --- a/web/public/locales/en/config/ffmpeg.json +++ /dev/null @@ -1,34 +0,0 @@ -{ - "label": "Global FFmpeg configuration.", - "properties": { - "path": { - "label": "FFmpeg path" - }, - "global_args": { - "label": "Global FFmpeg arguments." - }, - "hwaccel_args": { - "label": "FFmpeg hardware acceleration arguments." - }, - "input_args": { - "label": "FFmpeg input arguments." - }, - "output_args": { - "label": "FFmpeg output arguments per role.", - "properties": { - "detect": { - "label": "Detect role FFmpeg output arguments." - }, - "record": { - "label": "Record role FFmpeg output arguments." - } - } - }, - "retry_interval": { - "label": "Time in seconds to wait before FFmpeg retries connecting to the camera." - }, - "apple_compatibility": { - "label": "Set tag on HEVC (H.265) recording stream to improve compatibility with Apple players." - } - } -} \ No newline at end of file diff --git a/web/public/locales/en/config/genai.json b/web/public/locales/en/config/genai.json deleted file mode 100644 index fed679d9e..000000000 --- a/web/public/locales/en/config/genai.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "label": "Generative AI configuration.", - "properties": { - "api_key": { - "label": "Provider API key." - }, - "base_url": { - "label": "Provider base url." - }, - "model": { - "label": "GenAI model." - }, - "provider": { - "label": "GenAI provider." - }, - "provider_options": { - "label": "GenAI Provider extra options." - }, - "runtime_options": { - "label": "Options to pass during inference calls." - } - } -} diff --git a/web/public/locales/en/config/global.json b/web/public/locales/en/config/global.json new file mode 100644 index 000000000..5268c1b02 --- /dev/null +++ b/web/public/locales/en/config/global.json @@ -0,0 +1,2192 @@ +{ + "version": { + "label": "Current config version", + "description": "Numeric or string version of the active configuration to help detect migrations or format changes." + }, + "safe_mode": { + "label": "Safe mode", + "description": "When enabled, start Frigate in safe mode with reduced features for troubleshooting." + }, + "environment_vars": { + "label": "Environment variables", + "description": "Key/value pairs of environment variables to set for the Frigate process in Home Assistant OS. Non-HAOS users must use Docker environment variable configuration instead." + }, + "logger": { + "label": "Logging", + "description": "Controls default log verbosity and per-component log level overrides.", + "default": { + "label": "Logging level", + "description": "Default global log verbosity (debug, info, warning, error)." + }, + "logs": { + "label": "Per-process log level", + "description": "Per-component log level overrides to increase or decrease verbosity for specific modules." + } + }, + "auth": { + "label": "Authentication", + "description": "Authentication and session-related settings including cookie and rate limit options.", + "enabled": { + "label": "Enable authentication", + "description": "Enable native authentication for the Frigate UI." + }, + "reset_admin_password": { + "label": "Reset admin password", + "description": "If true, reset the admin user's password on startup and print the new password in logs." + }, + "cookie_name": { + "label": "JWT cookie name", + "description": "Name of the cookie used to store the JWT token for native authentication." + }, + "cookie_secure": { + "label": "Secure cookie flag", + "description": "Set the secure flag on the auth cookie; should be true when using TLS." + }, + "session_length": { + "label": "Session length", + "description": "Session duration in seconds for JWT-based sessions." + }, + "refresh_time": { + "label": "Session refresh window", + "description": "When a session is within this many seconds of expiring, refresh it back to full length." + }, + "failed_login_rate_limit": { + "label": "Failed login limits", + "description": "Rate limiting rules for failed login attempts to reduce brute-force attacks." + }, + "trusted_proxies": { + "label": "Trusted proxies", + "description": "List of trusted proxy IPs used when determining client IP for rate limiting." + }, + "hash_iterations": { + "label": "Hash iterations", + "description": "Number of PBKDF2-SHA256 iterations to use when hashing user passwords." + }, + "roles": { + "label": "Role mappings", + "description": "Map roles to camera lists. An empty list grants access to all cameras for the role." + }, + "admin_first_time_login": { + "label": "First-time admin flag", + "description": "When true the UI may show a help link on the login page informing users how to sign in after an admin password reset. " + } + }, + "database": { + "label": "Database", + "description": "Settings for the SQLite database used by Frigate to store tracked object and recording metadata.", + "path": { + "label": "Database path", + "description": "Filesystem path where the Frigate SQLite database file will be stored." + } + }, + "go2rtc": { + "label": "go2rtc", + "description": "Settings for the integrated go2rtc restreaming service used for live stream relaying and translation." + }, + "mqtt": { + "label": "MQTT", + "description": "Settings for connecting and publishing telemetry, snapshots, and event details to an MQTT broker.", + "enabled": { + "label": "Enable MQTT", + "description": "Enable or disable MQTT integration for state, events, and snapshots." + }, + "host": { + "label": "MQTT host", + "description": "Hostname or IP address of the MQTT broker." + }, + "port": { + "label": "MQTT port", + "description": "Port of the MQTT broker (usually 1883 for plain MQTT)." + }, + "topic_prefix": { + "label": "Topic prefix", + "description": "MQTT topic prefix for all Frigate topics; must be unique if running multiple instances." + }, + "client_id": { + "label": "Client ID", + "description": "Client identifier used when connecting to the MQTT broker; should be unique per instance." + }, + "stats_interval": { + "label": "Stats interval", + "description": "Interval in seconds for publishing system and camera stats to MQTT." + }, + "user": { + "label": "MQTT username", + "description": "Optional MQTT username; can be provided via environment variables or secrets." + }, + "password": { + "label": "MQTT password", + "description": "Optional MQTT password; can be provided via environment variables or secrets." + }, + "tls_ca_certs": { + "label": "TLS CA certs", + "description": "Path to CA certificate for TLS connections to the broker (for self-signed certs)." + }, + "tls_client_cert": { + "label": "Client cert", + "description": "Client certificate path for TLS mutual authentication; do not set user/password when using client certs." + }, + "tls_client_key": { + "label": "Client key", + "description": "Private key path for the client certificate." + }, + "tls_insecure": { + "label": "TLS insecure", + "description": "Allow insecure TLS connections by skipping hostname verification (not recommended)." + }, + "qos": { + "label": "MQTT QoS", + "description": "Quality of Service level for MQTT publishes/subscriptions (0, 1, or 2)." + } + }, + "notifications": { + "label": "Notifications", + "description": "Settings to enable and control notifications for all cameras; can be overridden per-camera.", + "enabled": { + "label": "Enable notifications", + "description": "Enable or disable notifications for all cameras; can be overridden per-camera." + }, + "email": { + "label": "Notification email", + "description": "Email address used for push notifications or required by certain notification providers." + }, + "cooldown": { + "label": "Cooldown period", + "description": "Cooldown (seconds) between notifications to avoid spamming recipients." + }, + "enabled_in_config": { + "label": "Original notifications state", + "description": "Indicates whether notifications were enabled in the original static configuration." + } + }, + "networking": { + "label": "Networking", + "description": "Network-related settings such as IPv6 enablement for Frigate endpoints.", + "ipv6": { + "label": "IPv6 configuration", + "description": "IPv6-specific settings for Frigate network services.", + "enabled": { + "label": "Enable IPv6", + "description": "Enable IPv6 support for Frigate services (API and UI) where applicable." + } + }, + "listen": { + "label": "Listening ports configuration", + "description": "Configuration for internal and external listening ports. This is for advanced users. For the majority of use cases it's recommended to change the ports section of your Docker compose file.", + "internal": { + "label": "Internal port", + "description": "Internal listening port for Frigate (default 5000)." + }, + "external": { + "label": "External port", + "description": "External listening port for Frigate (default 8971)." + } + } + }, + "proxy": { + "label": "Proxy", + "description": "Settings for integrating Frigate behind a reverse proxy that passes authenticated user headers.", + "header_map": { + "label": "Header mapping", + "description": "Map incoming proxy headers to Frigate user and role fields for proxy-based auth.", + "user": { + "label": "User header", + "description": "Header containing the authenticated username provided by the upstream proxy." + }, + "role": { + "label": "Role header", + "description": "Header containing the authenticated user's role or groups from the upstream proxy." + }, + "role_map": { + "label": "Role mapping", + "description": "Map upstream group values to Frigate roles (for example map admin groups to the admin role)." + } + }, + "logout_url": { + "label": "Logout URL", + "description": "URL to redirect users to when logging out via the proxy." + }, + "auth_secret": { + "label": "Proxy secret", + "description": "Optional secret checked against the X-Proxy-Secret header to verify trusted proxies." + }, + "default_role": { + "label": "Default role", + "description": "Default role assigned to proxy-authenticated users when no role mapping applies (admin or viewer)." + }, + "separator": { + "label": "Separator character", + "description": "Character used to split multiple values provided in proxy headers." + } + }, + "telemetry": { + "label": "Telemetry", + "description": "System telemetry and stats options including GPU and network bandwidth monitoring.", + "network_interfaces": { + "label": "Network interfaces", + "description": "List of network interface name prefixes to monitor for bandwidth statistics." + }, + "stats": { + "label": "System stats", + "description": "Options to enable/disable collection of various system and GPU statistics.", + "amd_gpu_stats": { + "label": "AMD GPU stats", + "description": "Enable collection of AMD GPU statistics if an AMD GPU is present." + }, + "intel_gpu_stats": { + "label": "Intel GPU stats", + "description": "Enable collection of Intel GPU statistics if an Intel GPU is present." + }, + "network_bandwidth": { + "label": "Network bandwidth", + "description": "Enable per-process network bandwidth monitoring for camera ffmpeg processes and detectors (requires capabilities)." + }, + "intel_gpu_device": { + "label": "SR-IOV device", + "description": "Device identifier used when treating Intel GPUs as SR-IOV to fix GPU stats." + } + }, + "version_check": { + "label": "Version check", + "description": "Enable an outbound check to detect if a newer Frigate version is available." + } + }, + "tls": { + "label": "TLS", + "description": "TLS settings for Frigate's web endpoints (port 8971).", + "enabled": { + "label": "Enable TLS", + "description": "Enable TLS for Frigate's web UI and API on the configured TLS port." + } + }, + "ui": { + "label": "UI", + "description": "User interface preferences such as timezone, time/date formatting, and units.", + "timezone": { + "label": "Timezone", + "description": "Optional timezone to display across the UI (defaults to browser local time if unset)." + }, + "time_format": { + "label": "Time format", + "description": "Time format to use in the UI (browser, 12hour, or 24hour)." + }, + "date_style": { + "label": "Date style", + "description": "Date style to use in the UI (full, long, medium, short)." + }, + "time_style": { + "label": "Time style", + "description": "Time style to use in the UI (full, long, medium, short)." + }, + "unit_system": { + "label": "Unit system", + "description": "Unit system for display (metric or imperial) used in the UI and MQTT." + } + }, + "detectors": { + "label": "Detector hardware", + "description": "Configuration for object detectors (CPU, GPU, ONNX backends) and any detector-specific model settings.", + "type": { + "label": "Detector Type", + "description": "Type of detector to use for object detection (for example 'cpu', 'edgetpu', 'openvino')." + }, + "cpu": { + "label": "CPU", + "description": "CPU TFLite detector that runs TensorFlow Lite models on the host CPU without hardware acceleration. Not recommended.", + "type": { + "label": "Type" + }, + "model": { + "label": "Detector specific model configuration", + "description": "Detector-specific model configuration options (path, input size, etc.).", + "path": { + "label": "Custom Object detection model path", + "description": "Path to a custom detection model file (or plus:// for Frigate+ models)." + }, + "labelmap_path": { + "label": "Label map for custom object detector", + "description": "Path to a labelmap file that maps numeric classes to string labels for the detector." + }, + "width": { + "label": "Object detection model input width", + "description": "Width of the model input tensor in pixels." + }, + "height": { + "label": "Object detection model input height", + "description": "Height of the model input tensor in pixels." + }, + "labelmap": { + "label": "Labelmap customization", + "description": "Overrides or remapping entries to merge into the standard labelmap." + }, + "attributes_map": { + "label": "Map of object labels to their attribute labels", + "description": "Mapping from object labels to attribute labels used to attach metadata (for example 'car' -> ['license_plate'])." + }, + "input_tensor": { + "label": "Model Input Tensor Shape", + "description": "Tensor format expected by the model: 'nhwc' or 'nchw'." + }, + "input_pixel_format": { + "label": "Model Input Pixel Color Format", + "description": "Pixel colorspace expected by the model: 'rgb', 'bgr', or 'yuv'." + }, + "input_dtype": { + "label": "Model Input D Type", + "description": "Data type of the model input tensor (for example 'float32')." + }, + "model_type": { + "label": "Object Detection Model Type", + "description": "Detector model architecture type (ssd, yolox, yolonas) used by some detectors for optimization." + } + }, + "model_path": { + "label": "Detector specific model path", + "description": "File path to the detector model binary if required by the chosen detector." + }, + "num_threads": { + "label": "Number of detection threads", + "description": "The number of threads used for CPU-based inference." + } + }, + "deepstack": { + "label": "DeepStack", + "description": "DeepStack/CodeProject.AI detector that sends images to a remote DeepStack HTTP API for inference. Not recommended.", + "type": { + "label": "Type" + }, + "model": { + "label": "Detector specific model configuration", + "description": "Detector-specific model configuration options (path, input size, etc.).", + "path": { + "label": "Custom Object detection model path", + "description": "Path to a custom detection model file (or plus:// for Frigate+ models)." + }, + "labelmap_path": { + "label": "Label map for custom object detector", + "description": "Path to a labelmap file that maps numeric classes to string labels for the detector." + }, + "width": { + "label": "Object detection model input width", + "description": "Width of the model input tensor in pixels." + }, + "height": { + "label": "Object detection model input height", + "description": "Height of the model input tensor in pixels." + }, + "labelmap": { + "label": "Labelmap customization", + "description": "Overrides or remapping entries to merge into the standard labelmap." + }, + "attributes_map": { + "label": "Map of object labels to their attribute labels", + "description": "Mapping from object labels to attribute labels used to attach metadata (for example 'car' -> ['license_plate'])." + }, + "input_tensor": { + "label": "Model Input Tensor Shape", + "description": "Tensor format expected by the model: 'nhwc' or 'nchw'." + }, + "input_pixel_format": { + "label": "Model Input Pixel Color Format", + "description": "Pixel colorspace expected by the model: 'rgb', 'bgr', or 'yuv'." + }, + "input_dtype": { + "label": "Model Input D Type", + "description": "Data type of the model input tensor (for example 'float32')." + }, + "model_type": { + "label": "Object Detection Model Type", + "description": "Detector model architecture type (ssd, yolox, yolonas) used by some detectors for optimization." + } + }, + "model_path": { + "label": "Detector specific model path", + "description": "File path to the detector model binary if required by the chosen detector." + }, + "api_url": { + "label": "DeepStack API URL", + "description": "The URL of the DeepStack API." + }, + "api_timeout": { + "label": "DeepStack API timeout (in seconds)", + "description": "Maximum time allowed for a DeepStack API request." + }, + "api_key": { + "label": "DeepStack API key (if required)", + "description": "Optional API key for authenticated DeepStack services." + } + }, + "degirum": { + "label": "DeGirum", + "description": "DeGirum detector for running models via DeGirum cloud or local inference services.", + "type": { + "label": "Type" + }, + "model": { + "label": "Detector specific model configuration", + "description": "Detector-specific model configuration options (path, input size, etc.).", + "path": { + "label": "Custom Object detection model path", + "description": "Path to a custom detection model file (or plus:// for Frigate+ models)." + }, + "labelmap_path": { + "label": "Label map for custom object detector", + "description": "Path to a labelmap file that maps numeric classes to string labels for the detector." + }, + "width": { + "label": "Object detection model input width", + "description": "Width of the model input tensor in pixels." + }, + "height": { + "label": "Object detection model input height", + "description": "Height of the model input tensor in pixels." + }, + "labelmap": { + "label": "Labelmap customization", + "description": "Overrides or remapping entries to merge into the standard labelmap." + }, + "attributes_map": { + "label": "Map of object labels to their attribute labels", + "description": "Mapping from object labels to attribute labels used to attach metadata (for example 'car' -> ['license_plate'])." + }, + "input_tensor": { + "label": "Model Input Tensor Shape", + "description": "Tensor format expected by the model: 'nhwc' or 'nchw'." + }, + "input_pixel_format": { + "label": "Model Input Pixel Color Format", + "description": "Pixel colorspace expected by the model: 'rgb', 'bgr', or 'yuv'." + }, + "input_dtype": { + "label": "Model Input D Type", + "description": "Data type of the model input tensor (for example 'float32')." + }, + "model_type": { + "label": "Object Detection Model Type", + "description": "Detector model architecture type (ssd, yolox, yolonas) used by some detectors for optimization." + } + }, + "model_path": { + "label": "Detector specific model path", + "description": "File path to the detector model binary if required by the chosen detector." + }, + "location": { + "label": "Inference Location", + "description": "Location of the DeGirim inference engine (e.g. '@cloud', '127.0.0.1')." + }, + "zoo": { + "label": "Model Zoo", + "description": "Path or URL to the DeGirum model zoo." + }, + "token": { + "label": "DeGirum Cloud Token", + "description": "Token for DeGirum Cloud access." + } + }, + "edgetpu": { + "label": "EdgeTPU", + "description": "EdgeTPU detector that runs TensorFlow Lite models compiled for Coral EdgeTPU using the EdgeTPU delegate.", + "type": { + "label": "Type" + }, + "model": { + "label": "Detector specific model configuration", + "description": "Detector-specific model configuration options (path, input size, etc.).", + "path": { + "label": "Custom Object detection model path", + "description": "Path to a custom detection model file (or plus:// for Frigate+ models)." + }, + "labelmap_path": { + "label": "Label map for custom object detector", + "description": "Path to a labelmap file that maps numeric classes to string labels for the detector." + }, + "width": { + "label": "Object detection model input width", + "description": "Width of the model input tensor in pixels." + }, + "height": { + "label": "Object detection model input height", + "description": "Height of the model input tensor in pixels." + }, + "labelmap": { + "label": "Labelmap customization", + "description": "Overrides or remapping entries to merge into the standard labelmap." + }, + "attributes_map": { + "label": "Map of object labels to their attribute labels", + "description": "Mapping from object labels to attribute labels used to attach metadata (for example 'car' -> ['license_plate'])." + }, + "input_tensor": { + "label": "Model Input Tensor Shape", + "description": "Tensor format expected by the model: 'nhwc' or 'nchw'." + }, + "input_pixel_format": { + "label": "Model Input Pixel Color Format", + "description": "Pixel colorspace expected by the model: 'rgb', 'bgr', or 'yuv'." + }, + "input_dtype": { + "label": "Model Input D Type", + "description": "Data type of the model input tensor (for example 'float32')." + }, + "model_type": { + "label": "Object Detection Model Type", + "description": "Detector model architecture type (ssd, yolox, yolonas) used by some detectors for optimization." + } + }, + "model_path": { + "label": "Detector specific model path", + "description": "File path to the detector model binary if required by the chosen detector." + }, + "device": { + "label": "Device Type", + "description": "The device to use for EdgeTPU inference (e.g. 'usb', 'pci')." + } + }, + "hailo8l": { + "label": "Hailo-8/Hailo-8L", + "description": "Hailo-8/Hailo-8L detector using HEF models and the HailoRT SDK for inference on Hailo hardware.", + "type": { + "label": "Type" + }, + "model": { + "label": "Detector specific model configuration", + "description": "Detector-specific model configuration options (path, input size, etc.).", + "path": { + "label": "Custom Object detection model path", + "description": "Path to a custom detection model file (or plus:// for Frigate+ models)." + }, + "labelmap_path": { + "label": "Label map for custom object detector", + "description": "Path to a labelmap file that maps numeric classes to string labels for the detector." + }, + "width": { + "label": "Object detection model input width", + "description": "Width of the model input tensor in pixels." + }, + "height": { + "label": "Object detection model input height", + "description": "Height of the model input tensor in pixels." + }, + "labelmap": { + "label": "Labelmap customization", + "description": "Overrides or remapping entries to merge into the standard labelmap." + }, + "attributes_map": { + "label": "Map of object labels to their attribute labels", + "description": "Mapping from object labels to attribute labels used to attach metadata (for example 'car' -> ['license_plate'])." + }, + "input_tensor": { + "label": "Model Input Tensor Shape", + "description": "Tensor format expected by the model: 'nhwc' or 'nchw'." + }, + "input_pixel_format": { + "label": "Model Input Pixel Color Format", + "description": "Pixel colorspace expected by the model: 'rgb', 'bgr', or 'yuv'." + }, + "input_dtype": { + "label": "Model Input D Type", + "description": "Data type of the model input tensor (for example 'float32')." + }, + "model_type": { + "label": "Object Detection Model Type", + "description": "Detector model architecture type (ssd, yolox, yolonas) used by some detectors for optimization." + } + }, + "model_path": { + "label": "Detector specific model path", + "description": "File path to the detector model binary if required by the chosen detector." + }, + "device": { + "label": "Device Type", + "description": "The device to use for Hailo inference (e.g. 'PCIe', 'M.2')." + } + }, + "memryx": { + "label": "MemryX", + "description": "MemryX MX3 detector that runs compiled DFP models on MemryX accelerators.", + "type": { + "label": "Type" + }, + "model": { + "label": "Detector specific model configuration", + "description": "Detector-specific model configuration options (path, input size, etc.).", + "path": { + "label": "Custom Object detection model path", + "description": "Path to a custom detection model file (or plus:// for Frigate+ models)." + }, + "labelmap_path": { + "label": "Label map for custom object detector", + "description": "Path to a labelmap file that maps numeric classes to string labels for the detector." + }, + "width": { + "label": "Object detection model input width", + "description": "Width of the model input tensor in pixels." + }, + "height": { + "label": "Object detection model input height", + "description": "Height of the model input tensor in pixels." + }, + "labelmap": { + "label": "Labelmap customization", + "description": "Overrides or remapping entries to merge into the standard labelmap." + }, + "attributes_map": { + "label": "Map of object labels to their attribute labels", + "description": "Mapping from object labels to attribute labels used to attach metadata (for example 'car' -> ['license_plate'])." + }, + "input_tensor": { + "label": "Model Input Tensor Shape", + "description": "Tensor format expected by the model: 'nhwc' or 'nchw'." + }, + "input_pixel_format": { + "label": "Model Input Pixel Color Format", + "description": "Pixel colorspace expected by the model: 'rgb', 'bgr', or 'yuv'." + }, + "input_dtype": { + "label": "Model Input D Type", + "description": "Data type of the model input tensor (for example 'float32')." + }, + "model_type": { + "label": "Object Detection Model Type", + "description": "Detector model architecture type (ssd, yolox, yolonas) used by some detectors for optimization." + } + }, + "model_path": { + "label": "Detector specific model path", + "description": "File path to the detector model binary if required by the chosen detector." + }, + "device": { + "label": "Device Path", + "description": "The device to use for MemryX inference (e.g. 'PCIe')." + } + }, + "onnx": { + "label": "ONNX", + "description": "ONNX detector for running ONNX models; will use available acceleration backends (CUDA/ROCm/OpenVINO) when available.", + "type": { + "label": "Type" + }, + "model": { + "label": "Detector specific model configuration", + "description": "Detector-specific model configuration options (path, input size, etc.).", + "path": { + "label": "Custom Object detection model path", + "description": "Path to a custom detection model file (or plus:// for Frigate+ models)." + }, + "labelmap_path": { + "label": "Label map for custom object detector", + "description": "Path to a labelmap file that maps numeric classes to string labels for the detector." + }, + "width": { + "label": "Object detection model input width", + "description": "Width of the model input tensor in pixels." + }, + "height": { + "label": "Object detection model input height", + "description": "Height of the model input tensor in pixels." + }, + "labelmap": { + "label": "Labelmap customization", + "description": "Overrides or remapping entries to merge into the standard labelmap." + }, + "attributes_map": { + "label": "Map of object labels to their attribute labels", + "description": "Mapping from object labels to attribute labels used to attach metadata (for example 'car' -> ['license_plate'])." + }, + "input_tensor": { + "label": "Model Input Tensor Shape", + "description": "Tensor format expected by the model: 'nhwc' or 'nchw'." + }, + "input_pixel_format": { + "label": "Model Input Pixel Color Format", + "description": "Pixel colorspace expected by the model: 'rgb', 'bgr', or 'yuv'." + }, + "input_dtype": { + "label": "Model Input D Type", + "description": "Data type of the model input tensor (for example 'float32')." + }, + "model_type": { + "label": "Object Detection Model Type", + "description": "Detector model architecture type (ssd, yolox, yolonas) used by some detectors for optimization." + } + }, + "model_path": { + "label": "Detector specific model path", + "description": "File path to the detector model binary if required by the chosen detector." + }, + "device": { + "label": "Device Type", + "description": "The device to use for ONNX inference (e.g. 'AUTO', 'CPU', 'GPU')." + } + }, + "openvino": { + "label": "OpenVINO", + "description": "OpenVINO detector for AMD and Intel CPUs, Intel GPUs and Intel VPU hardware.", + "type": { + "label": "Type" + }, + "model": { + "label": "Detector specific model configuration", + "description": "Detector-specific model configuration options (path, input size, etc.).", + "path": { + "label": "Custom Object detection model path", + "description": "Path to a custom detection model file (or plus:// for Frigate+ models)." + }, + "labelmap_path": { + "label": "Label map for custom object detector", + "description": "Path to a labelmap file that maps numeric classes to string labels for the detector." + }, + "width": { + "label": "Object detection model input width", + "description": "Width of the model input tensor in pixels." + }, + "height": { + "label": "Object detection model input height", + "description": "Height of the model input tensor in pixels." + }, + "labelmap": { + "label": "Labelmap customization", + "description": "Overrides or remapping entries to merge into the standard labelmap." + }, + "attributes_map": { + "label": "Map of object labels to their attribute labels", + "description": "Mapping from object labels to attribute labels used to attach metadata (for example 'car' -> ['license_plate'])." + }, + "input_tensor": { + "label": "Model Input Tensor Shape", + "description": "Tensor format expected by the model: 'nhwc' or 'nchw'." + }, + "input_pixel_format": { + "label": "Model Input Pixel Color Format", + "description": "Pixel colorspace expected by the model: 'rgb', 'bgr', or 'yuv'." + }, + "input_dtype": { + "label": "Model Input D Type", + "description": "Data type of the model input tensor (for example 'float32')." + }, + "model_type": { + "label": "Object Detection Model Type", + "description": "Detector model architecture type (ssd, yolox, yolonas) used by some detectors for optimization." + } + }, + "model_path": { + "label": "Detector specific model path", + "description": "File path to the detector model binary if required by the chosen detector." + }, + "device": { + "label": "Device Type", + "description": "The device to use for OpenVINO inference (e.g. 'CPU', 'GPU', 'NPU')." + } + }, + "rknn": { + "label": "RKNN", + "description": "RKNN detector for Rockchip NPUs; runs compiled RKNN models on Rockchip hardware.", + "type": { + "label": "Type" + }, + "model": { + "label": "Detector specific model configuration", + "description": "Detector-specific model configuration options (path, input size, etc.).", + "path": { + "label": "Custom Object detection model path", + "description": "Path to a custom detection model file (or plus:// for Frigate+ models)." + }, + "labelmap_path": { + "label": "Label map for custom object detector", + "description": "Path to a labelmap file that maps numeric classes to string labels for the detector." + }, + "width": { + "label": "Object detection model input width", + "description": "Width of the model input tensor in pixels." + }, + "height": { + "label": "Object detection model input height", + "description": "Height of the model input tensor in pixels." + }, + "labelmap": { + "label": "Labelmap customization", + "description": "Overrides or remapping entries to merge into the standard labelmap." + }, + "attributes_map": { + "label": "Map of object labels to their attribute labels", + "description": "Mapping from object labels to attribute labels used to attach metadata (for example 'car' -> ['license_plate'])." + }, + "input_tensor": { + "label": "Model Input Tensor Shape", + "description": "Tensor format expected by the model: 'nhwc' or 'nchw'." + }, + "input_pixel_format": { + "label": "Model Input Pixel Color Format", + "description": "Pixel colorspace expected by the model: 'rgb', 'bgr', or 'yuv'." + }, + "input_dtype": { + "label": "Model Input D Type", + "description": "Data type of the model input tensor (for example 'float32')." + }, + "model_type": { + "label": "Object Detection Model Type", + "description": "Detector model architecture type (ssd, yolox, yolonas) used by some detectors for optimization." + } + }, + "model_path": { + "label": "Detector specific model path", + "description": "File path to the detector model binary if required by the chosen detector." + }, + "num_cores": { + "label": "Number of NPU cores to use.", + "description": "The number of NPU cores to use (0 for auto)." + } + }, + "synaptics": { + "label": "Synaptics", + "description": "Synaptics NPU detector for models in .synap format using the Synap SDK on Synaptics hardware.", + "type": { + "label": "Type" + }, + "model": { + "label": "Detector specific model configuration", + "description": "Detector-specific model configuration options (path, input size, etc.).", + "path": { + "label": "Custom Object detection model path", + "description": "Path to a custom detection model file (or plus:// for Frigate+ models)." + }, + "labelmap_path": { + "label": "Label map for custom object detector", + "description": "Path to a labelmap file that maps numeric classes to string labels for the detector." + }, + "width": { + "label": "Object detection model input width", + "description": "Width of the model input tensor in pixels." + }, + "height": { + "label": "Object detection model input height", + "description": "Height of the model input tensor in pixels." + }, + "labelmap": { + "label": "Labelmap customization", + "description": "Overrides or remapping entries to merge into the standard labelmap." + }, + "attributes_map": { + "label": "Map of object labels to their attribute labels", + "description": "Mapping from object labels to attribute labels used to attach metadata (for example 'car' -> ['license_plate'])." + }, + "input_tensor": { + "label": "Model Input Tensor Shape", + "description": "Tensor format expected by the model: 'nhwc' or 'nchw'." + }, + "input_pixel_format": { + "label": "Model Input Pixel Color Format", + "description": "Pixel colorspace expected by the model: 'rgb', 'bgr', or 'yuv'." + }, + "input_dtype": { + "label": "Model Input D Type", + "description": "Data type of the model input tensor (for example 'float32')." + }, + "model_type": { + "label": "Object Detection Model Type", + "description": "Detector model architecture type (ssd, yolox, yolonas) used by some detectors for optimization." + } + }, + "model_path": { + "label": "Detector specific model path", + "description": "File path to the detector model binary if required by the chosen detector." + } + }, + "teflon_tfl": { + "label": "Teflon", + "description": "Teflon delegate detector for TFLite using Mesa Teflon delegate library to accelerate inference on supported GPUs.", + "type": { + "label": "Type" + }, + "model": { + "label": "Detector specific model configuration", + "description": "Detector-specific model configuration options (path, input size, etc.).", + "path": { + "label": "Custom Object detection model path", + "description": "Path to a custom detection model file (or plus:// for Frigate+ models)." + }, + "labelmap_path": { + "label": "Label map for custom object detector", + "description": "Path to a labelmap file that maps numeric classes to string labels for the detector." + }, + "width": { + "label": "Object detection model input width", + "description": "Width of the model input tensor in pixels." + }, + "height": { + "label": "Object detection model input height", + "description": "Height of the model input tensor in pixels." + }, + "labelmap": { + "label": "Labelmap customization", + "description": "Overrides or remapping entries to merge into the standard labelmap." + }, + "attributes_map": { + "label": "Map of object labels to their attribute labels", + "description": "Mapping from object labels to attribute labels used to attach metadata (for example 'car' -> ['license_plate'])." + }, + "input_tensor": { + "label": "Model Input Tensor Shape", + "description": "Tensor format expected by the model: 'nhwc' or 'nchw'." + }, + "input_pixel_format": { + "label": "Model Input Pixel Color Format", + "description": "Pixel colorspace expected by the model: 'rgb', 'bgr', or 'yuv'." + }, + "input_dtype": { + "label": "Model Input D Type", + "description": "Data type of the model input tensor (for example 'float32')." + }, + "model_type": { + "label": "Object Detection Model Type", + "description": "Detector model architecture type (ssd, yolox, yolonas) used by some detectors for optimization." + } + }, + "model_path": { + "label": "Detector specific model path", + "description": "File path to the detector model binary if required by the chosen detector." + } + }, + "tensorrt": { + "label": "TensorRT", + "description": "TensorRT detector for Nvidia Jetson devices using serialized TensorRT engines for accelerated inference.", + "type": { + "label": "Type" + }, + "model": { + "label": "Detector specific model configuration", + "description": "Detector-specific model configuration options (path, input size, etc.).", + "path": { + "label": "Custom Object detection model path", + "description": "Path to a custom detection model file (or plus:// for Frigate+ models)." + }, + "labelmap_path": { + "label": "Label map for custom object detector", + "description": "Path to a labelmap file that maps numeric classes to string labels for the detector." + }, + "width": { + "label": "Object detection model input width", + "description": "Width of the model input tensor in pixels." + }, + "height": { + "label": "Object detection model input height", + "description": "Height of the model input tensor in pixels." + }, + "labelmap": { + "label": "Labelmap customization", + "description": "Overrides or remapping entries to merge into the standard labelmap." + }, + "attributes_map": { + "label": "Map of object labels to their attribute labels", + "description": "Mapping from object labels to attribute labels used to attach metadata (for example 'car' -> ['license_plate'])." + }, + "input_tensor": { + "label": "Model Input Tensor Shape", + "description": "Tensor format expected by the model: 'nhwc' or 'nchw'." + }, + "input_pixel_format": { + "label": "Model Input Pixel Color Format", + "description": "Pixel colorspace expected by the model: 'rgb', 'bgr', or 'yuv'." + }, + "input_dtype": { + "label": "Model Input D Type", + "description": "Data type of the model input tensor (for example 'float32')." + }, + "model_type": { + "label": "Object Detection Model Type", + "description": "Detector model architecture type (ssd, yolox, yolonas) used by some detectors for optimization." + } + }, + "model_path": { + "label": "Detector specific model path", + "description": "File path to the detector model binary if required by the chosen detector." + }, + "device": { + "label": "GPU Device Index", + "description": "The GPU device index to use." + } + }, + "zmq": { + "label": "ZMQ IPC", + "description": "ZMQ IPC detector that offloads inference to an external process via a ZeroMQ IPC endpoint.", + "type": { + "label": "Type" + }, + "model": { + "label": "Detector specific model configuration", + "description": "Detector-specific model configuration options (path, input size, etc.).", + "path": { + "label": "Custom Object detection model path", + "description": "Path to a custom detection model file (or plus:// for Frigate+ models)." + }, + "labelmap_path": { + "label": "Label map for custom object detector", + "description": "Path to a labelmap file that maps numeric classes to string labels for the detector." + }, + "width": { + "label": "Object detection model input width", + "description": "Width of the model input tensor in pixels." + }, + "height": { + "label": "Object detection model input height", + "description": "Height of the model input tensor in pixels." + }, + "labelmap": { + "label": "Labelmap customization", + "description": "Overrides or remapping entries to merge into the standard labelmap." + }, + "attributes_map": { + "label": "Map of object labels to their attribute labels", + "description": "Mapping from object labels to attribute labels used to attach metadata (for example 'car' -> ['license_plate'])." + }, + "input_tensor": { + "label": "Model Input Tensor Shape", + "description": "Tensor format expected by the model: 'nhwc' or 'nchw'." + }, + "input_pixel_format": { + "label": "Model Input Pixel Color Format", + "description": "Pixel colorspace expected by the model: 'rgb', 'bgr', or 'yuv'." + }, + "input_dtype": { + "label": "Model Input D Type", + "description": "Data type of the model input tensor (for example 'float32')." + }, + "model_type": { + "label": "Object Detection Model Type", + "description": "Detector model architecture type (ssd, yolox, yolonas) used by some detectors for optimization." + } + }, + "model_path": { + "label": "Detector specific model path", + "description": "File path to the detector model binary if required by the chosen detector." + }, + "endpoint": { + "label": "ZMQ IPC endpoint", + "description": "The ZMQ endpoint to connect to." + }, + "request_timeout_ms": { + "label": "ZMQ request timeout in milliseconds", + "description": "Timeout for ZMQ requests in milliseconds." + }, + "linger_ms": { + "label": "ZMQ socket linger in milliseconds", + "description": "Socket linger period in milliseconds." + } + } + }, + "model": { + "label": "Detection model", + "description": "Settings to configure a custom object detection model and its input shape.", + "path": { + "label": "Custom Object detection model path", + "description": "Path to a custom detection model file (or plus:// for Frigate+ models)." + }, + "labelmap_path": { + "label": "Label map for custom object detector", + "description": "Path to a labelmap file that maps numeric classes to string labels for the detector." + }, + "width": { + "label": "Object detection model input width", + "description": "Width of the model input tensor in pixels." + }, + "height": { + "label": "Object detection model input height", + "description": "Height of the model input tensor in pixels." + }, + "labelmap": { + "label": "Labelmap customization", + "description": "Overrides or remapping entries to merge into the standard labelmap." + }, + "attributes_map": { + "label": "Map of object labels to their attribute labels", + "description": "Mapping from object labels to attribute labels used to attach metadata (for example 'car' -> ['license_plate'])." + }, + "input_tensor": { + "label": "Model Input Tensor Shape", + "description": "Tensor format expected by the model: 'nhwc' or 'nchw'." + }, + "input_pixel_format": { + "label": "Model Input Pixel Color Format", + "description": "Pixel colorspace expected by the model: 'rgb', 'bgr', or 'yuv'." + }, + "input_dtype": { + "label": "Model Input D Type", + "description": "Data type of the model input tensor (for example 'float32')." + }, + "model_type": { + "label": "Object Detection Model Type", + "description": "Detector model architecture type (ssd, yolox, yolonas) used by some detectors for optimization." + } + }, + "genai": { + "label": "Generative AI configuration (named providers).", + "description": "Settings for integrated generative AI providers used to generate object descriptions and review summaries.", + "api_key": { + "label": "API key", + "description": "API key required by some providers (can also be set via environment variables)." + }, + "base_url": { + "label": "Base URL", + "description": "Base URL for self-hosted or compatible providers (for example an Ollama instance)." + }, + "model": { + "label": "Model", + "description": "The model to use from the provider for generating descriptions or summaries." + }, + "provider": { + "label": "Provider", + "description": "The GenAI provider to use (for example: ollama, gemini, openai)." + }, + "roles": { + "label": "Roles", + "description": "GenAI roles (tools, vision, embeddings); one provider per role." + }, + "provider_options": { + "label": "Provider options", + "description": "Additional provider-specific options to pass to the GenAI client." + }, + "runtime_options": { + "label": "Runtime options", + "description": "Runtime options passed to the provider for each inference call." + } + }, + "audio": { + "label": "Audio events", + "description": "Settings for audio-based event detection for all cameras; can be overridden per-camera.", + "enabled": { + "label": "Enable audio detection", + "description": "Enable or disable audio event detection for all cameras; can be overridden per-camera." + }, + "max_not_heard": { + "label": "End timeout", + "description": "Amount of seconds without the configured audio type before the audio event is ended." + }, + "min_volume": { + "label": "Minimum volume", + "description": "Minimum RMS volume threshold required to run audio detection; lower values increase sensitivity (e.g., 200 high, 500 medium, 1000 low)." + }, + "listen": { + "label": "Listen types", + "description": "List of audio event types to detect (for example: bark, fire_alarm, scream, speech, yell)." + }, + "filters": { + "label": "Audio filters", + "description": "Per-audio-type filter settings such as confidence thresholds used to reduce false positives." + }, + "enabled_in_config": { + "label": "Original audio state", + "description": "Indicates whether audio detection was originally enabled in the static config file." + }, + "num_threads": { + "label": "Detection threads", + "description": "Number of threads to use for audio detection processing." + } + }, + "birdseye": { + "label": "Birdseye", + "description": "Settings for the Birdseye composite view that composes multiple camera feeds into a single layout.", + "enabled": { + "label": "Enable Birdseye", + "description": "Enable or disable the Birdseye view feature." + }, + "mode": { + "label": "Tracking mode", + "description": "Mode for including cameras in Birdseye: 'objects', 'motion', or 'continuous'." + }, + "restream": { + "label": "Restream RTSP", + "description": "Re-stream the Birdseye output as an RTSP feed; enabling this will keep Birdseye running continuously." + }, + "width": { + "label": "Width", + "description": "Output width (pixels) of the composed Birdseye frame." + }, + "height": { + "label": "Height", + "description": "Output height (pixels) of the composed Birdseye frame." + }, + "quality": { + "label": "Encoding quality", + "description": "Encoding quality for the Birdseye mpeg1 feed (1 highest quality, 31 lowest)." + }, + "inactivity_threshold": { + "label": "Inactivity threshold", + "description": "Seconds of inactivity after which a camera will stop being shown in Birdseye." + }, + "layout": { + "label": "Layout", + "description": "Layout options for the Birdseye composition.", + "scaling_factor": { + "label": "Scaling factor", + "description": "Scaling factor used by the layout calculator (range 1.0 to 5.0)." + }, + "max_cameras": { + "label": "Max cameras", + "description": "Maximum number of cameras to display at once in Birdseye; shows the most recent cameras." + } + }, + "idle_heartbeat_fps": { + "label": "Idle heartbeat FPS", + "description": "Frames-per-second to resend the last composed Birdseye frame when idle; set to 0 to disable." + }, + "order": { + "label": "Position", + "description": "Numeric position controlling the camera's ordering in the Birdseye layout." + } + }, + "detect": { + "label": "Object Detection", + "description": "Settings for the detection/detect role used to run object detection and initialize trackers.", + "enabled": { + "label": "Detection enabled", + "description": "Enable or disable object detection for all cameras; can be overridden per-camera. Detection must be enabled for object tracking to run." + }, + "height": { + "label": "Detect height", + "description": "Height (pixels) of frames used for the detect stream; leave empty to use the native stream resolution." + }, + "width": { + "label": "Detect width", + "description": "Width (pixels) of frames used for the detect stream; leave empty to use the native stream resolution." + }, + "fps": { + "label": "Detect FPS", + "description": "Desired frames per second to run detection on; lower values reduce CPU usage (recommended value is 5, only set higher - at most 10 - if tracking extremely fast moving objects)." + }, + "min_initialized": { + "label": "Minimum initialization frames", + "description": "Number of consecutive detection hits required before creating a tracked object. Increase to reduce false initializations. Default value is fps divided by 2." + }, + "max_disappeared": { + "label": "Maximum disappeared frames", + "description": "Number of frames without a detection before a tracked object is considered gone." + }, + "stationary": { + "label": "Stationary objects config", + "description": "Settings to detect and manage objects that remain stationary for a period of time.", + "interval": { + "label": "Stationary interval", + "description": "How often (in frames) to run a detection check to confirm a stationary object." + }, + "threshold": { + "label": "Stationary threshold", + "description": "Number of frames with no position change required to mark an object as stationary." + }, + "max_frames": { + "label": "Max frames", + "description": "Limits how long stationary objects are tracked before being discarded.", + "default": { + "label": "Default max frames", + "description": "Default maximum frames to track a stationary object before stopping." + }, + "objects": { + "label": "Object max frames", + "description": "Per-object overrides for maximum frames to track stationary objects." + } + }, + "classifier": { + "label": "Enable visual classifier", + "description": "Use a visual classifier to detect truly stationary objects even when bounding boxes jitter." + } + }, + "annotation_offset": { + "label": "Annotation offset", + "description": "Milliseconds to shift detect annotations to better align timeline bounding boxes with recordings; can be positive or negative." + } + }, + "ffmpeg": { + "label": "FFmpeg", + "description": "FFmpeg settings including binary path, args, hwaccel options, and per-role output args.", + "path": { + "label": "FFmpeg path", + "description": "Path to the FFmpeg binary to use or a version alias (\"5.0\" or \"7.0\")." + }, + "global_args": { + "label": "FFmpeg global arguments", + "description": "Global arguments passed to FFmpeg processes." + }, + "hwaccel_args": { + "label": "Hardware acceleration arguments", + "description": "Hardware acceleration arguments for FFmpeg. Provider-specific presets are recommended." + }, + "input_args": { + "label": "Input arguments", + "description": "Input arguments applied to FFmpeg input streams." + }, + "output_args": { + "label": "Output arguments", + "description": "Default output arguments used for different FFmpeg roles such as detect and record.", + "detect": { + "label": "Detect output arguments", + "description": "Default output arguments for detect role streams." + }, + "record": { + "label": "Record output arguments", + "description": "Default output arguments for record role streams." + } + }, + "retry_interval": { + "label": "FFmpeg retry time", + "description": "Seconds to wait before attempting to reconnect a camera stream after failure. Default is 10." + }, + "apple_compatibility": { + "label": "Apple compatibility", + "description": "Enable HEVC tagging for better Apple player compatibility when recording H.265." + }, + "gpu": { + "label": "GPU index", + "description": "Default GPU index used for hardware acceleration if available." + }, + "inputs": { + "label": "Camera inputs", + "description": "List of input stream definitions (paths and roles) for this camera.", + "path": { + "label": "Input path", + "description": "Camera input stream URL or path." + }, + "roles": { + "label": "Input roles", + "description": "Roles for this input stream." + }, + "global_args": { + "label": "FFmpeg global arguments", + "description": "FFmpeg global arguments for this input stream." + }, + "hwaccel_args": { + "label": "Hardware acceleration arguments", + "description": "Hardware acceleration arguments for this input stream." + }, + "input_args": { + "label": "Input arguments", + "description": "Input arguments specific to this stream." + } + } + }, + "live": { + "label": "Live playback", + "description": "Settings used by the Web UI to control live stream resolution and quality.", + "streams": { + "label": "Live stream names", + "description": "Mapping of configured stream names to restream/go2rtc names used for live playback." + }, + "height": { + "label": "Live height", + "description": "Height (pixels) to render the jsmpeg live stream in the Web UI; must be <= detect stream height." + }, + "quality": { + "label": "Live quality", + "description": "Encoding quality for the jsmpeg stream (1 highest, 31 lowest)." + } + }, + "motion": { + "label": "Motion detection", + "description": "Default motion detection settings applied to cameras unless overridden per-camera.", + "enabled": { + "label": "Enable motion detection", + "description": "Enable or disable motion detection for all cameras; can be overridden per-camera." + }, + "threshold": { + "label": "Motion threshold", + "description": "Pixel difference threshold used by the motion detector; higher values reduce sensitivity (range 1-255)." + }, + "lightning_threshold": { + "label": "Lightning threshold", + "description": "Threshold to detect and ignore brief lighting spikes (lower is more sensitive, values between 0.3 and 1.0). This does not prevent motion detection entirely; it merely causes the detector to stop analyzing additional frames once the threshold is exceeded. Motion-based recordings are still created during these events." + }, + "skip_motion_threshold": { + "label": "Skip motion threshold", + "description": "If more than this fraction of the image changes in a single frame, the detector will return no motion boxes and immediately recalibrate. This can save CPU and reduce false positives during lightning, storms, etc., but may miss real events such as a PTZ camera auto‑tracking an object. The trade‑off is between dropping a few megabytes of recordings versus reviewing a couple short clips. Range 0.0 to 1.0." + }, + "improve_contrast": { + "label": "Improve contrast", + "description": "Apply contrast improvement to frames before motion analysis to help detection." + }, + "contour_area": { + "label": "Contour area", + "description": "Minimum contour area in pixels required for a motion contour to be counted." + }, + "delta_alpha": { + "label": "Delta alpha", + "description": "Alpha blending factor used in frame differencing for motion calculation." + }, + "frame_alpha": { + "label": "Frame alpha", + "description": "Alpha value used when blending frames for motion preprocessing." + }, + "frame_height": { + "label": "Frame height", + "description": "Height in pixels to scale frames to when computing motion." + }, + "mask": { + "label": "Mask coordinates", + "description": "Ordered x,y coordinates defining the motion mask polygon used to include/exclude areas." + }, + "mqtt_off_delay": { + "label": "MQTT off delay", + "description": "Seconds to wait after last motion before publishing an MQTT 'off' state." + }, + "enabled_in_config": { + "label": "Original motion state", + "description": "Indicates whether motion detection was enabled in the original static configuration." + }, + "raw_mask": { + "label": "Raw Mask" + } + }, + "objects": { + "label": "Objects", + "description": "Object tracking defaults including which labels to track and per-object filters.", + "track": { + "label": "Objects to track", + "description": "List of object labels to track for all cameras; can be overridden per-camera." + }, + "filters": { + "label": "Object filters", + "description": "Filters applied to detected objects to reduce false positives (area, ratio, confidence).", + "min_area": { + "label": "Minimum object area", + "description": "Minimum bounding box area (pixels or percentage) required for this object type. Can be pixels (int) or percentage (float between 0.000001 and 0.99)." + }, + "max_area": { + "label": "Maximum object area", + "description": "Maximum bounding box area (pixels or percentage) allowed for this object type. Can be pixels (int) or percentage (float between 0.000001 and 0.99)." + }, + "min_ratio": { + "label": "Minimum aspect ratio", + "description": "Minimum width/height ratio required for the bounding box to qualify." + }, + "max_ratio": { + "label": "Maximum aspect ratio", + "description": "Maximum width/height ratio allowed for the bounding box to qualify." + }, + "threshold": { + "label": "Confidence threshold", + "description": "Average detection confidence threshold required for the object to be considered a true positive." + }, + "min_score": { + "label": "Minimum confidence", + "description": "Minimum single-frame detection confidence required for the object to be counted." + }, + "mask": { + "label": "Filter mask", + "description": "Polygon coordinates defining where this filter applies within the frame." + }, + "raw_mask": { + "label": "Raw Mask" + } + }, + "mask": { + "label": "Object mask", + "description": "Mask polygon used to prevent object detection in specified areas." + }, + "raw_mask": { + "label": "Raw Mask" + }, + "genai": { + "label": "GenAI object config", + "description": "GenAI options for describing tracked objects and sending frames for generation.", + "enabled": { + "label": "Enable GenAI", + "description": "Enable GenAI generation of descriptions for tracked objects by default." + }, + "use_snapshot": { + "label": "Use snapshots", + "description": "Use object snapshots instead of thumbnails for GenAI description generation." + }, + "prompt": { + "label": "Caption prompt", + "description": "Default prompt template used when generating descriptions with GenAI." + }, + "object_prompts": { + "label": "Object prompts", + "description": "Per-object prompts to customize GenAI outputs for specific labels." + }, + "objects": { + "label": "GenAI objects", + "description": "List of object labels to send to GenAI by default." + }, + "required_zones": { + "label": "Required zones", + "description": "Zones that must be entered for objects to qualify for GenAI description generation." + }, + "debug_save_thumbnails": { + "label": "Save thumbnails", + "description": "Save thumbnails sent to GenAI for debugging and review." + }, + "send_triggers": { + "label": "GenAI triggers", + "description": "Defines when frames should be sent to GenAI (on end, after updates, etc.).", + "tracked_object_end": { + "label": "Send on end", + "description": "Send a request to GenAI when the tracked object ends." + }, + "after_significant_updates": { + "label": "Early GenAI trigger", + "description": "Send a request to GenAI after a specified number of significant updates for the tracked object." + } + }, + "enabled_in_config": { + "label": "Original GenAI state", + "description": "Indicates whether GenAI was enabled in the original static config." + } + } + }, + "record": { + "label": "Recording", + "description": "Recording and retention settings applied to cameras unless overridden per-camera.", + "enabled": { + "label": "Enable recording", + "description": "Enable or disable recording for all cameras; can be overridden per-camera." + }, + "expire_interval": { + "label": "Record cleanup interval", + "description": "Minutes between cleanup passes that remove expired recording segments." + }, + "continuous": { + "label": "Continuous retention", + "description": "Number of days to retain recordings regardless of tracked objects or motion. Set to 0 if you only want to retain recordings of alerts and detections.", + "days": { + "label": "Retention days", + "description": "Days to retain recordings." + } + }, + "motion": { + "label": "Motion retention", + "description": "Number of days to retain recordings triggered by motion regardless of tracked objects. Set to 0 if you only want to retain recordings of alerts and detections.", + "days": { + "label": "Retention days", + "description": "Days to retain recordings." + } + }, + "detections": { + "label": "Detection retention", + "description": "Recording retention settings for detection events including pre/post capture durations.", + "pre_capture": { + "label": "Pre-capture seconds", + "description": "Number of seconds before the detection event to include in the recording." + }, + "post_capture": { + "label": "Post-capture seconds", + "description": "Number of seconds after the detection event to include in the recording." + }, + "retain": { + "label": "Event retention", + "description": "Retention settings for recordings of detection events.", + "days": { + "label": "Retention days", + "description": "Number of days to retain recordings of detection events." + }, + "mode": { + "label": "Retention mode", + "description": "Mode for retention: all (save all segments), motion (save segments with motion), or active_objects (save segments with active objects)." + } + } + }, + "alerts": { + "label": "Alert retention", + "description": "Recording retention settings for alert events including pre/post capture durations.", + "pre_capture": { + "label": "Pre-capture seconds", + "description": "Number of seconds before the detection event to include in the recording." + }, + "post_capture": { + "label": "Post-capture seconds", + "description": "Number of seconds after the detection event to include in the recording." + }, + "retain": { + "label": "Event retention", + "description": "Retention settings for recordings of detection events.", + "days": { + "label": "Retention days", + "description": "Number of days to retain recordings of detection events." + }, + "mode": { + "label": "Retention mode", + "description": "Mode for retention: all (save all segments), motion (save segments with motion), or active_objects (save segments with active objects)." + } + } + }, + "export": { + "label": "Export config", + "description": "Settings used when exporting recordings such as timelapse and hardware acceleration.", + "hwaccel_args": { + "label": "Export hwaccel args", + "description": "Hardware acceleration args to use for export/transcode operations." + } + }, + "preview": { + "label": "Preview config", + "description": "Settings controlling the quality of recording previews shown in the UI.", + "quality": { + "label": "Preview quality", + "description": "Preview quality level (very_low, low, medium, high, very_high)." + } + }, + "enabled_in_config": { + "label": "Original recording state", + "description": "Indicates whether recording was enabled in the original static configuration." + } + }, + "review": { + "label": "Review", + "description": "Settings that control alerts, detections, and GenAI review summaries used by the UI and storage.", + "alerts": { + "label": "Alerts config", + "description": "Settings for which tracked objects generate alerts and how alerts are retained.", + "enabled": { + "label": "Enable alerts", + "description": "Enable or disable alert generation for all cameras; can be overridden per-camera." + }, + "labels": { + "label": "Alert labels", + "description": "List of object labels that qualify as alerts (for example: car, person)." + }, + "required_zones": { + "label": "Required zones", + "description": "Zones that an object must enter to be considered an alert; leave empty to allow any zone." + }, + "enabled_in_config": { + "label": "Original alerts state", + "description": "Tracks whether alerts were originally enabled in the static configuration." + }, + "cutoff_time": { + "label": "Alerts cutoff time", + "description": "Seconds to wait after no alert-causing activity before cutting off an alert." + } + }, + "detections": { + "label": "Detections config", + "description": "Settings for creating detection events (non-alert) and how long to keep them.", + "enabled": { + "label": "Enable detections", + "description": "Enable or disable detection events for all cameras; can be overridden per-camera." + }, + "labels": { + "label": "Detection labels", + "description": "List of object labels that qualify as detection events." + }, + "required_zones": { + "label": "Required zones", + "description": "Zones that an object must enter to be considered a detection; leave empty to allow any zone." + }, + "cutoff_time": { + "label": "Detections cutoff time", + "description": "Seconds to wait after no detection-causing activity before cutting off a detection." + }, + "enabled_in_config": { + "label": "Original detections state", + "description": "Tracks whether detections were originally enabled in the static configuration." + } + }, + "genai": { + "label": "GenAI config", + "description": "Controls use of generative AI for producing descriptions and summaries of review items.", + "enabled": { + "label": "Enable GenAI descriptions", + "description": "Enable or disable GenAI-generated descriptions and summaries for review items." + }, + "alerts": { + "label": "Enable GenAI for alerts", + "description": "Use GenAI to generate descriptions for alert items." + }, + "detections": { + "label": "Enable GenAI for detections", + "description": "Use GenAI to generate descriptions for detection items." + }, + "image_source": { + "label": "Review image source", + "description": "Source of images sent to GenAI ('preview' or 'recordings'); 'recordings' uses higher quality frames but more tokens." + }, + "additional_concerns": { + "label": "Additional concerns", + "description": "A list of additional concerns or notes the GenAI should consider when evaluating activity on this camera." + }, + "debug_save_thumbnails": { + "label": "Save thumbnails", + "description": "Save thumbnails that are sent to the GenAI provider for debugging and review." + }, + "enabled_in_config": { + "label": "Original GenAI state", + "description": "Tracks whether GenAI review was originally enabled in the static configuration." + }, + "preferred_language": { + "label": "Preferred language", + "description": "Preferred language to request from the GenAI provider for generated responses." + }, + "activity_context_prompt": { + "label": "Activity context prompt", + "description": "Custom prompt describing what is and is not suspicious activity to provide context for GenAI summaries." + } + } + }, + "snapshots": { + "label": "Snapshots", + "description": "Settings for saved JPEG snapshots of tracked objects for all cameras; can be overridden per-camera.", + "enabled": { + "label": "Snapshots enabled", + "description": "Enable or disable saving snapshots for all cameras; can be overridden per-camera." + }, + "clean_copy": { + "label": "Save clean copy", + "description": "Save an unannotated clean copy of snapshots in addition to annotated ones." + }, + "timestamp": { + "label": "Timestamp overlay", + "description": "Overlay a timestamp on saved snapshots." + }, + "bounding_box": { + "label": "Bounding box overlay", + "description": "Draw bounding boxes for tracked objects on saved snapshots." + }, + "crop": { + "label": "Crop snapshot", + "description": "Crop saved snapshots to the detected object's bounding box." + }, + "required_zones": { + "label": "Required zones", + "description": "Zones an object must enter for a snapshot to be saved." + }, + "height": { + "label": "Snapshot height", + "description": "Height (pixels) to resize saved snapshots to; leave empty to preserve original size." + }, + "retain": { + "label": "Snapshot retention", + "description": "Retention settings for saved snapshots including default days and per-object overrides.", + "default": { + "label": "Default retention", + "description": "Default number of days to retain snapshots." + }, + "mode": { + "label": "Retention mode", + "description": "Mode for retention: all (save all segments), motion (save segments with motion), or active_objects (save segments with active objects)." + }, + "objects": { + "label": "Object retention", + "description": "Per-object overrides for snapshot retention days." + } + }, + "quality": { + "label": "JPEG quality", + "description": "JPEG encode quality for saved snapshots (0-100)." + } + }, + "timestamp_style": { + "label": "Timestamp style", + "description": "Styling options for in-feed timestamps applied to debug view and snapshots.", + "position": { + "label": "Timestamp position", + "description": "Position of the timestamp on the image (tl/tr/bl/br)." + }, + "format": { + "label": "Timestamp format", + "description": "Datetime format string used for timestamps (Python datetime format codes)." + }, + "color": { + "label": "Timestamp color", + "description": "RGB color values for the timestamp text (all values 0-255).", + "red": { + "label": "Red", + "description": "Red component (0-255) for timestamp color." + }, + "green": { + "label": "Green", + "description": "Green component (0-255) for timestamp color." + }, + "blue": { + "label": "Blue", + "description": "Blue component (0-255) for timestamp color." + } + }, + "thickness": { + "label": "Timestamp thickness", + "description": "Line thickness of the timestamp text." + }, + "effect": { + "label": "Timestamp effect", + "description": "Visual effect for the timestamp text (none, solid, shadow)." + } + }, + "audio_transcription": { + "label": "Audio transcription", + "description": "Settings for live and speech audio transcription used for events and live captions.", + "enabled": { + "label": "Enable audio transcription", + "description": "Enable or disable automatic audio transcription for all cameras; can be overridden per-camera." + }, + "language": { + "label": "Transcription language", + "description": "Language code used for transcription/translation (for example 'en' for English). See https://whisper-api.com/docs/languages/ for supported language codes." + }, + "device": { + "label": "Transcription device", + "description": "Device key (CPU/GPU) to run the transcription model on. Only NVIDIA CUDA GPUs are currently supported for transcription." + }, + "model_size": { + "label": "Model size", + "description": "Model size to use for offline audio event transcription." + }, + "live_enabled": { + "label": "Live transcription", + "description": "Enable streaming live transcription for audio as it is received." + } + }, + "classification": { + "label": "Object classification", + "description": "Settings for classification models used to refine object labels or state classification.", + "bird": { + "label": "Bird classification config", + "description": "Settings specific to bird classification models.", + "enabled": { + "label": "Bird classification", + "description": "Enable or disable bird classification." + }, + "threshold": { + "label": "Minimum score", + "description": "Minimum classification score required to accept a bird classification." + } + }, + "custom": { + "label": "Custom Classification Models", + "description": "Configuration for custom classification models used for objects or state detection.", + "enabled": { + "label": "Enable model", + "description": "Enable or disable the custom classification model." + }, + "name": { + "label": "Model name", + "description": "Identifier for the custom classification model to use." + }, + "threshold": { + "label": "Score threshold", + "description": "Score threshold used to change the classification state." + }, + "save_attempts": { + "label": "Save attempts", + "description": "How many classification attempts to save for recent classifications UI." + }, + "object_config": { + "objects": { + "label": "Classify objects", + "description": "List of object types to run object classification on." + }, + "classification_type": { + "label": "Classification type", + "description": "Classification type applied: 'sub_label' (adds sub_label) or other supported types." + } + }, + "state_config": { + "cameras": { + "label": "Classification cameras", + "description": "Per-camera crop and settings for running state classification.", + "crop": { + "label": "Classification crop", + "description": "Crop coordinates to use for running classification on this camera." + } + }, + "motion": { + "label": "Run on motion", + "description": "If true, run classification when motion is detected within the specified crop." + }, + "interval": { + "label": "Classification interval", + "description": "Interval (seconds) between periodic classification runs for state classification." + } + } + } + }, + "semantic_search": { + "label": "Semantic Search", + "description": "Settings for Semantic Search which builds and queries object embeddings to find similar items.", + "enabled": { + "label": "Enable semantic search", + "description": "Enable or disable the semantic search feature." + }, + "reindex": { + "label": "Reindex on startup", + "description": "Trigger a full reindex of historical tracked objects into the embeddings database." + }, + "model": { + "label": "Semantic search model", + "description": "The embeddings model to use for semantic search (for example 'jinav1')." + }, + "model_size": { + "label": "Model size", + "description": "Select model size; 'small' runs on CPU and 'large' typically requires GPU." + }, + "device": { + "label": "Device", + "description": "This is an override, to target a specific device. See https://onnxruntime.ai/docs/execution-providers/ for more information" + }, + "triggers": { + "label": "Triggers", + "description": "Actions and matching criteria for camera-specific semantic search triggers.", + "friendly_name": { + "label": "Friendly name", + "description": "Optional friendly name displayed in the UI for this trigger." + }, + "enabled": { + "label": "Enable this trigger", + "description": "Enable or disable this semantic search trigger." + }, + "type": { + "label": "Trigger type", + "description": "Type of trigger: 'thumbnail' (match against image) or 'description' (match against text)." + }, + "data": { + "label": "Trigger content", + "description": "Text phrase or thumbnail ID to match against tracked objects." + }, + "threshold": { + "label": "Trigger threshold", + "description": "Minimum similarity score (0-1) required to activate this trigger." + }, + "actions": { + "label": "Trigger actions", + "description": "List of actions to execute when trigger matches (notification, sub_label, attribute)." + } + } + }, + "face_recognition": { + "label": "Face recognition", + "description": "Settings for face detection and recognition for all cameras; can be overridden per-camera.", + "enabled": { + "label": "Enable face recognition", + "description": "Enable or disable face recognition for all cameras; can be overridden per-camera." + }, + "model_size": { + "label": "Model size", + "description": "Model size to use for face embeddings (small/large); larger may require GPU." + }, + "unknown_score": { + "label": "Unknown score threshold", + "description": "Distance threshold below which a face is considered a potential match (higher = stricter)." + }, + "detection_threshold": { + "label": "Detection threshold", + "description": "Minimum detection confidence required to consider a face detection valid." + }, + "recognition_threshold": { + "label": "Recognition threshold", + "description": "Face embedding distance threshold to consider two faces a match." + }, + "min_area": { + "label": "Minimum face area", + "description": "Minimum area (pixels) of a detected face box required to attempt recognition." + }, + "min_faces": { + "label": "Minimum faces", + "description": "Minimum number of face recognitions required before applying a recognized sub-label to a person." + }, + "save_attempts": { + "label": "Save attempts", + "description": "Number of face recognition attempts to retain for recent recognition UI." + }, + "blur_confidence_filter": { + "label": "Blur confidence filter", + "description": "Adjust confidence scores based on image blur to reduce false positives for poor quality faces." + }, + "device": { + "label": "Device", + "description": "This is an override, to target a specific device. See https://onnxruntime.ai/docs/execution-providers/ for more information" + } + }, + "lpr": { + "label": "License Plate Recognition", + "description": "License plate recognition settings including detection thresholds, formatting, and known plates.", + "enabled": { + "label": "Enable LPR", + "description": "Enable or disable license plate recognition for all cameras; can be overridden per-camera." + }, + "model_size": { + "label": "Model size", + "description": "Model size used for text detection/recognition. Most users should use 'small'." + }, + "detection_threshold": { + "label": "Detection threshold", + "description": "Detection confidence threshold to begin running OCR on a suspected plate." + }, + "min_area": { + "label": "Minimum plate area", + "description": "Minimum plate area (pixels) required to attempt recognition." + }, + "recognition_threshold": { + "label": "Recognition threshold", + "description": "Confidence threshold required for recognized plate text to be attached as a sub-label." + }, + "min_plate_length": { + "label": "Min plate length", + "description": "Minimum number of characters a recognized plate must contain to be considered valid." + }, + "format": { + "label": "Plate format regex", + "description": "Optional regex to validate recognized plate strings against an expected format." + }, + "match_distance": { + "label": "Match distance", + "description": "Number of character mismatches allowed when comparing detected plates to known plates." + }, + "known_plates": { + "label": "Known plates", + "description": "List of plates or regexes to specially track or alert on." + }, + "enhancement": { + "label": "Enhancement level", + "description": "Enhancement level (0-10) to apply to plate crops prior to OCR; higher values may not always improve results, levels above 5 may only work with night time plates and should be used with caution." + }, + "debug_save_plates": { + "label": "Save debug plates", + "description": "Save plate crop images for debugging LPR performance." + }, + "device": { + "label": "Device", + "description": "This is an override, to target a specific device. See https://onnxruntime.ai/docs/execution-providers/ for more information" + }, + "replace_rules": { + "label": "Replacement rules", + "description": "Regex replacement rules used to normalize detected plate strings before matching.", + "pattern": { + "label": "Regex pattern" + }, + "replacement": { + "label": "Replacement string" + } + }, + "expire_time": { + "label": "Expire seconds", + "description": "Time in seconds after which an unseen plate is expired from the tracker (for dedicated LPR cameras only)." + } + }, + "camera_groups": { + "label": "Camera groups", + "description": "Configuration for named camera groups used to organize cameras in the UI.", + "cameras": { + "label": "Camera list", + "description": "Array of camera names included in this group." + }, + "icon": { + "label": "Group icon", + "description": "Icon used to represent the camera group in the UI." + }, + "order": { + "label": "Sort order", + "description": "Numeric order used to sort camera groups in the UI; larger numbers appear later." + } + }, + "camera_mqtt": { + "label": "MQTT", + "description": "MQTT image publishing settings.", + "enabled": { + "label": "Send image", + "description": "Enable publishing image snapshots for objects to MQTT topics for this camera." + }, + "timestamp": { + "label": "Add timestamp", + "description": "Overlay a timestamp on images published to MQTT." + }, + "bounding_box": { + "label": "Add bounding box", + "description": "Draw bounding boxes on images published over MQTT." + }, + "crop": { + "label": "Crop image", + "description": "Crop images published to MQTT to the detected object's bounding box." + }, + "height": { + "label": "Image height", + "description": "Height (pixels) to resize images published over MQTT." + }, + "required_zones": { + "label": "Required zones", + "description": "Zones that an object must enter for an MQTT image to be published." + }, + "quality": { + "label": "JPEG quality", + "description": "JPEG quality for images published to MQTT (0-100)." + } + }, + "camera_ui": { + "label": "Camera UI", + "description": "Display ordering and visibility for this camera in the UI. Ordering affects the default dashboard. For more granular control, use camera groups.", + "order": { + "label": "UI order", + "description": "Numeric order used to sort the camera in the UI (default dashboard and lists); larger numbers appear later." + }, + "dashboard": { + "label": "Show in UI", + "description": "Toggle whether this camera is visible everywhere in the Frigate UI. Disabling this will require manually editing the config to view this camera in the UI again." + } + }, + "onvif": { + "label": "ONVIF", + "description": "ONVIF connection and PTZ autotracking settings for this camera.", + "host": { + "label": "ONVIF host", + "description": "Host (and optional scheme) for the ONVIF service for this camera." + }, + "port": { + "label": "ONVIF port", + "description": "Port number for the ONVIF service." + }, + "user": { + "label": "ONVIF username", + "description": "Username for ONVIF authentication; some devices require admin user for ONVIF." + }, + "password": { + "label": "ONVIF password", + "description": "Password for ONVIF authentication." + }, + "tls_insecure": { + "label": "Disable TLS verify", + "description": "Skip TLS verification and disable digest auth for ONVIF (unsafe; use in safe networks only)." + }, + "autotracking": { + "label": "Autotracking", + "description": "Automatically track moving objects and keep them centered in the frame using PTZ camera movements.", + "enabled": { + "label": "Enable Autotracking", + "description": "Enable or disable automatic PTZ camera tracking of detected objects." + }, + "calibrate_on_startup": { + "label": "Calibrate on start", + "description": "Measure PTZ motor speeds on startup to improve tracking accuracy. Frigate will update config with movement_weights after calibration." + }, + "zooming": { + "label": "Zoom mode", + "description": "Control zoom behavior: disabled (pan/tilt only), absolute (most compatible), or relative (concurrent pan/tilt/zoom)." + }, + "zoom_factor": { + "label": "Zoom factor", + "description": "Control zoom level on tracked objects. Lower values keep more scene in view; higher values zoom in closer but may lose tracking. Values between 0.1 and 0.75." + }, + "track": { + "label": "Tracked objects", + "description": "List of object types that should trigger autotracking." + }, + "required_zones": { + "label": "Required zones", + "description": "Objects must enter one of these zones before autotracking begins." + }, + "return_preset": { + "label": "Return preset", + "description": "ONVIF preset name configured in camera firmware to return to after tracking ends." + }, + "timeout": { + "label": "Return timeout", + "description": "Wait this many seconds after losing tracking before returning camera to preset position." + }, + "movement_weights": { + "label": "Movement weights", + "description": "Calibration values automatically generated by camera calibration. Do not modify manually." + }, + "enabled_in_config": { + "label": "Original autotrack state", + "description": "Internal field to track whether autotracking was enabled in configuration." + } + }, + "ignore_time_mismatch": { + "label": "Ignore time mismatch", + "description": "Ignore time synchronization differences between camera and Frigate server for ONVIF communication." + } + } +} diff --git a/web/public/locales/en/config/go2rtc.json b/web/public/locales/en/config/go2rtc.json deleted file mode 100644 index 76ec33020..000000000 --- a/web/public/locales/en/config/go2rtc.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "label": "Global restream configuration." -} \ No newline at end of file diff --git a/web/public/locales/en/config/groups.json b/web/public/locales/en/config/groups.json new file mode 100644 index 000000000..1663ad169 --- /dev/null +++ b/web/public/locales/en/config/groups.json @@ -0,0 +1,73 @@ +{ + "audio": { + "global": { + "detection": "Global Detection", + "sensitivity": "Global Sensitivity" + }, + "cameras": { + "detection": "Detection", + "sensitivity": "Sensitivity" + } + }, + "timestamp_style": { + "global": { + "appearance": "Global Appearance" + }, + "cameras": { + "appearance": "Appearance" + } + }, + "motion": { + "global": { + "sensitivity": "Global Sensitivity", + "algorithm": "Global Algorithm" + }, + "cameras": { + "sensitivity": "Sensitivity", + "algorithm": "Algorithm" + } + }, + "snapshots": { + "global": { + "display": "Global Display" + }, + "cameras": { + "display": "Display" + } + }, + "detect": { + "global": { + "resolution": "Global Resolution", + "tracking": "Global Tracking" + }, + "cameras": { + "resolution": "Resolution", + "tracking": "Tracking" + } + }, + "objects": { + "global": { + "tracking": "Global Tracking", + "filtering": "Global Filtering" + }, + "cameras": { + "tracking": "Tracking", + "filtering": "Filtering" + } + }, + "record": { + "global": { + "retention": "Global Retention", + "events": "Global Events" + }, + "cameras": { + "retention": "Retention", + "events": "Events" + } + }, + "ffmpeg": { + "cameras": { + "cameraFfmpeg": "Camera-specific FFmpeg arguments" + } + } +} diff --git a/web/public/locales/en/config/live.json b/web/public/locales/en/config/live.json deleted file mode 100644 index 362170137..000000000 --- a/web/public/locales/en/config/live.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "label": "Live playback settings.", - "properties": { - "streams": { - "label": "Friendly names and restream names to use for live view." - }, - "height": { - "label": "Live camera view height" - }, - "quality": { - "label": "Live camera view quality" - } - } -} \ No newline at end of file diff --git a/web/public/locales/en/config/logger.json b/web/public/locales/en/config/logger.json deleted file mode 100644 index 3d51786a7..000000000 --- a/web/public/locales/en/config/logger.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "label": "Logging configuration.", - "properties": { - "default": { - "label": "Default logging level." - }, - "logs": { - "label": "Log level for specified processes." - } - } -} \ No newline at end of file diff --git a/web/public/locales/en/config/lpr.json b/web/public/locales/en/config/lpr.json deleted file mode 100644 index 951d1f8f6..000000000 --- a/web/public/locales/en/config/lpr.json +++ /dev/null @@ -1,45 +0,0 @@ -{ - "label": "License Plate recognition config.", - "properties": { - "enabled": { - "label": "Enable license plate recognition." - }, - "model_size": { - "label": "The size of the embeddings model used." - }, - "detection_threshold": { - "label": "License plate object confidence score required to begin running recognition." - }, - "min_area": { - "label": "Minimum area of license plate to begin running recognition." - }, - "recognition_threshold": { - "label": "Recognition confidence score required to add the plate to the object as a sub label." - }, - "min_plate_length": { - "label": "Minimum number of characters a license plate must have to be added to the object as a sub label." - }, - "format": { - "label": "Regular expression for the expected format of license plate." - }, - "match_distance": { - "label": "Allow this number of missing/incorrect characters to still cause a detected plate to match a known plate." - }, - "known_plates": { - "label": "Known plates to track (strings or regular expressions)." - }, - "enhancement": { - "label": "Amount of contrast adjustment and denoising to apply to license plate images before recognition." - }, - "debug_save_plates": { - "label": "Save plates captured for LPR for debugging purposes." - }, - "device": { - "label": "The device key to use for LPR.", - "description": "This is an override, to target a specific device. See https://onnxruntime.ai/docs/execution-providers/ for more information" - }, - "replace_rules": { - "label": "List of regex replacement rules for normalizing detected plates. Each rule has 'pattern' and 'replacement'." - } - } -} \ No newline at end of file diff --git a/web/public/locales/en/config/model.json b/web/public/locales/en/config/model.json deleted file mode 100644 index 0bc2c1ddf..000000000 --- a/web/public/locales/en/config/model.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "label": "Detection model configuration.", - "properties": { - "path": { - "label": "Custom Object detection model path." - }, - "labelmap_path": { - "label": "Label map for custom object detector." - }, - "width": { - "label": "Object detection model input width." - }, - "height": { - "label": "Object detection model input height." - }, - "labelmap": { - "label": "Labelmap customization." - }, - "attributes_map": { - "label": "Map of object labels to their attribute labels." - }, - "input_tensor": { - "label": "Model Input Tensor Shape" - }, - "input_pixel_format": { - "label": "Model Input Pixel Color Format" - }, - "input_dtype": { - "label": "Model Input D Type" - }, - "model_type": { - "label": "Object Detection Model Type" - } - } -} \ No newline at end of file diff --git a/web/public/locales/en/config/motion.json b/web/public/locales/en/config/motion.json deleted file mode 100644 index 183bfdf34..000000000 --- a/web/public/locales/en/config/motion.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "label": "Global motion detection configuration." -} \ No newline at end of file diff --git a/web/public/locales/en/config/mqtt.json b/web/public/locales/en/config/mqtt.json deleted file mode 100644 index d2625ac83..000000000 --- a/web/public/locales/en/config/mqtt.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "label": "MQTT configuration.", - "properties": { - "enabled": { - "label": "Enable MQTT Communication." - }, - "host": { - "label": "MQTT Host" - }, - "port": { - "label": "MQTT Port" - }, - "topic_prefix": { - "label": "MQTT Topic Prefix" - }, - "client_id": { - "label": "MQTT Client ID" - }, - "stats_interval": { - "label": "MQTT Camera Stats Interval" - }, - "user": { - "label": "MQTT Username" - }, - "password": { - "label": "MQTT Password" - }, - "tls_ca_certs": { - "label": "MQTT TLS CA Certificates" - }, - "tls_client_cert": { - "label": "MQTT TLS Client Certificate" - }, - "tls_client_key": { - "label": "MQTT TLS Client Key" - }, - "tls_insecure": { - "label": "MQTT TLS Insecure" - }, - "qos": { - "label": "MQTT QoS" - } - } -} \ No newline at end of file diff --git a/web/public/locales/en/config/networking.json b/web/public/locales/en/config/networking.json deleted file mode 100644 index 0f8d9cc54..000000000 --- a/web/public/locales/en/config/networking.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "label": "Networking configuration", - "properties": { - "ipv6": { - "label": "Network configuration", - "properties": { - "enabled": { - "label": "Enable IPv6 for port 5000 and/or 8971" - } - } - } - } -} \ No newline at end of file diff --git a/web/public/locales/en/config/notifications.json b/web/public/locales/en/config/notifications.json deleted file mode 100644 index b529f10e0..000000000 --- a/web/public/locales/en/config/notifications.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "label": "Global notification configuration.", - "properties": { - "enabled": { - "label": "Enable notifications" - }, - "email": { - "label": "Email required for push." - }, - "cooldown": { - "label": "Cooldown period for notifications (time in seconds)." - }, - "enabled_in_config": { - "label": "Keep track of original state of notifications." - } - } -} \ No newline at end of file diff --git a/web/public/locales/en/config/objects.json b/web/public/locales/en/config/objects.json deleted file mode 100644 index f041672a0..000000000 --- a/web/public/locales/en/config/objects.json +++ /dev/null @@ -1,77 +0,0 @@ -{ - "label": "Global object configuration.", - "properties": { - "track": { - "label": "Objects to track." - }, - "filters": { - "label": "Object filters.", - "properties": { - "min_area": { - "label": "Minimum area of bounding box for object to be counted. Can be pixels (int) or percentage (float between 0.000001 and 0.99)." - }, - "max_area": { - "label": "Maximum area of bounding box for object to be counted. Can be pixels (int) or percentage (float between 0.000001 and 0.99)." - }, - "min_ratio": { - "label": "Minimum ratio of bounding box's width/height for object to be counted." - }, - "max_ratio": { - "label": "Maximum ratio of bounding box's width/height for object to be counted." - }, - "threshold": { - "label": "Average detection confidence threshold for object to be counted." - }, - "min_score": { - "label": "Minimum detection confidence for object to be counted." - }, - "mask": { - "label": "Detection area polygon mask for this filter configuration." - } - } - }, - "mask": { - "label": "Object mask." - }, - "genai": { - "label": "Config for using genai to analyze objects.", - "properties": { - "enabled": { - "label": "Enable GenAI for camera." - }, - "use_snapshot": { - "label": "Use snapshots for generating descriptions." - }, - "prompt": { - "label": "Default caption prompt." - }, - "object_prompts": { - "label": "Object specific prompts." - }, - "objects": { - "label": "List of objects to run generative AI for." - }, - "required_zones": { - "label": "List of required zones to be entered in order to run generative AI." - }, - "debug_save_thumbnails": { - "label": "Save thumbnails sent to generative AI for debugging purposes." - }, - "send_triggers": { - "label": "What triggers to use to send frames to generative AI for a tracked object.", - "properties": { - "tracked_object_end": { - "label": "Send once the object is no longer tracked." - }, - "after_significant_updates": { - "label": "Send an early request to generative AI when X frames accumulated." - } - } - }, - "enabled_in_config": { - "label": "Keep track of original state of generative AI." - } - } - } - } -} \ No newline at end of file diff --git a/web/public/locales/en/config/proxy.json b/web/public/locales/en/config/proxy.json deleted file mode 100644 index 732d6fafd..000000000 --- a/web/public/locales/en/config/proxy.json +++ /dev/null @@ -1,31 +0,0 @@ -{ - "label": "Proxy configuration.", - "properties": { - "header_map": { - "label": "Header mapping definitions for proxy user passing.", - "properties": { - "user": { - "label": "Header name from upstream proxy to identify user." - }, - "role": { - "label": "Header name from upstream proxy to identify user role." - }, - "role_map": { - "label": "Mapping of Frigate roles to upstream group values. " - } - } - }, - "logout_url": { - "label": "Redirect url for logging out with proxy." - }, - "auth_secret": { - "label": "Secret value for proxy authentication." - }, - "default_role": { - "label": "Default role for proxy users." - }, - "separator": { - "label": "The character used to separate values in a mapped header." - } - } -} \ No newline at end of file diff --git a/web/public/locales/en/config/record.json b/web/public/locales/en/config/record.json deleted file mode 100644 index 81139084e..000000000 --- a/web/public/locales/en/config/record.json +++ /dev/null @@ -1,93 +0,0 @@ -{ - "label": "Global record configuration.", - "properties": { - "enabled": { - "label": "Enable record on all cameras." - }, - "sync_recordings": { - "label": "Sync recordings with disk on startup and once a day." - }, - "expire_interval": { - "label": "Number of minutes to wait between cleanup runs." - }, - "continuous": { - "label": "Continuous recording retention settings.", - "properties": { - "days": { - "label": "Default retention period." - } - } - }, - "motion": { - "label": "Motion recording retention settings.", - "properties": { - "days": { - "label": "Default retention period." - } - } - }, - "detections": { - "label": "Detection specific retention settings.", - "properties": { - "pre_capture": { - "label": "Seconds to retain before event starts." - }, - "post_capture": { - "label": "Seconds to retain after event ends." - }, - "retain": { - "label": "Event retention settings.", - "properties": { - "days": { - "label": "Default retention period." - }, - "mode": { - "label": "Retain mode." - } - } - } - } - }, - "alerts": { - "label": "Alert specific retention settings.", - "properties": { - "pre_capture": { - "label": "Seconds to retain before event starts." - }, - "post_capture": { - "label": "Seconds to retain after event ends." - }, - "retain": { - "label": "Event retention settings.", - "properties": { - "days": { - "label": "Default retention period." - }, - "mode": { - "label": "Retain mode." - } - } - } - } - }, - "export": { - "label": "Recording Export Config", - "properties": { - "timelapse_args": { - "label": "Timelapse Args" - } - } - }, - "preview": { - "label": "Recording Preview Config", - "properties": { - "quality": { - "label": "Quality of recording preview." - } - } - }, - "enabled_in_config": { - "label": "Keep track of original state of recording." - } - } -} \ No newline at end of file diff --git a/web/public/locales/en/config/review.json b/web/public/locales/en/config/review.json deleted file mode 100644 index dba83ee1c..000000000 --- a/web/public/locales/en/config/review.json +++ /dev/null @@ -1,74 +0,0 @@ -{ - "label": "Review configuration.", - "properties": { - "alerts": { - "label": "Review alerts config.", - "properties": { - "enabled": { - "label": "Enable alerts." - }, - "labels": { - "label": "Labels to create alerts for." - }, - "required_zones": { - "label": "List of required zones to be entered in order to save the event as an alert." - }, - "enabled_in_config": { - "label": "Keep track of original state of alerts." - }, - "cutoff_time": { - "label": "Time to cutoff alerts after no alert-causing activity has occurred." - } - } - }, - "detections": { - "label": "Review detections config.", - "properties": { - "enabled": { - "label": "Enable detections." - }, - "labels": { - "label": "Labels to create detections for." - }, - "required_zones": { - "label": "List of required zones to be entered in order to save the event as a detection." - }, - "cutoff_time": { - "label": "Time to cutoff detection after no detection-causing activity has occurred." - }, - "enabled_in_config": { - "label": "Keep track of original state of detections." - } - } - }, - "genai": { - "label": "Review description genai config.", - "properties": { - "enabled": { - "label": "Enable GenAI descriptions for review items." - }, - "alerts": { - "label": "Enable GenAI for alerts." - }, - "detections": { - "label": "Enable GenAI for detections." - }, - "additional_concerns": { - "label": "Additional concerns that GenAI should make note of on this camera." - }, - "debug_save_thumbnails": { - "label": "Save thumbnails sent to generative AI for debugging purposes." - }, - "enabled_in_config": { - "label": "Keep track of original state of generative AI." - }, - "preferred_language": { - "label": "Preferred language for GenAI Response" - }, - "activity_context_prompt": { - "label": "Custom activity context prompt defining normal activity patterns for this property." - } - } - } - } -} diff --git a/web/public/locales/en/config/safe_mode.json b/web/public/locales/en/config/safe_mode.json deleted file mode 100644 index 352f78b29..000000000 --- a/web/public/locales/en/config/safe_mode.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "label": "If Frigate should be started in safe mode." -} \ No newline at end of file diff --git a/web/public/locales/en/config/semantic_search.json b/web/public/locales/en/config/semantic_search.json deleted file mode 100644 index 2c46640bb..000000000 --- a/web/public/locales/en/config/semantic_search.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "label": "Semantic search configuration.", - "properties": { - "enabled": { - "label": "Enable semantic search." - }, - "reindex": { - "label": "Reindex all tracked objects on startup." - }, - "model": { - "label": "The CLIP model to use for semantic search." - }, - "model_size": { - "label": "The size of the embeddings model used." - }, - "device": { - "label": "The device key to use for semantic search.", - "description": "This is an override, to target a specific device. See https://onnxruntime.ai/docs/execution-providers/ for more information" - } - } -} \ No newline at end of file diff --git a/web/public/locales/en/config/snapshots.json b/web/public/locales/en/config/snapshots.json deleted file mode 100644 index a6336140e..000000000 --- a/web/public/locales/en/config/snapshots.json +++ /dev/null @@ -1,43 +0,0 @@ -{ - "label": "Global snapshots configuration.", - "properties": { - "enabled": { - "label": "Snapshots enabled." - }, - "clean_copy": { - "label": "Create a clean copy of the snapshot image." - }, - "timestamp": { - "label": "Add a timestamp overlay on the snapshot." - }, - "bounding_box": { - "label": "Add a bounding box overlay on the snapshot." - }, - "crop": { - "label": "Crop the snapshot to the detected object." - }, - "required_zones": { - "label": "List of required zones to be entered in order to save a snapshot." - }, - "height": { - "label": "Snapshot image height." - }, - "retain": { - "label": "Snapshot retention.", - "properties": { - "default": { - "label": "Default retention period." - }, - "mode": { - "label": "Retain mode." - }, - "objects": { - "label": "Object retention period." - } - } - }, - "quality": { - "label": "Quality of the encoded jpeg (0-100)." - } - } -} \ No newline at end of file diff --git a/web/public/locales/en/config/telemetry.json b/web/public/locales/en/config/telemetry.json deleted file mode 100644 index 802ced2a0..000000000 --- a/web/public/locales/en/config/telemetry.json +++ /dev/null @@ -1,28 +0,0 @@ -{ - "label": "Telemetry configuration.", - "properties": { - "network_interfaces": { - "label": "Enabled network interfaces for bandwidth calculation." - }, - "stats": { - "label": "System Stats Configuration", - "properties": { - "amd_gpu_stats": { - "label": "Enable AMD GPU stats." - }, - "intel_gpu_stats": { - "label": "Enable Intel GPU stats." - }, - "network_bandwidth": { - "label": "Enable network bandwidth for ffmpeg processes." - }, - "intel_gpu_device": { - "label": "Define the device to use when gathering SR-IOV stats." - } - } - }, - "version_check": { - "label": "Enable latest version check." - } - } -} \ No newline at end of file diff --git a/web/public/locales/en/config/timestamp_style.json b/web/public/locales/en/config/timestamp_style.json deleted file mode 100644 index 6a3119423..000000000 --- a/web/public/locales/en/config/timestamp_style.json +++ /dev/null @@ -1,31 +0,0 @@ -{ - "label": "Global timestamp style configuration.", - "properties": { - "position": { - "label": "Timestamp position." - }, - "format": { - "label": "Timestamp format." - }, - "color": { - "label": "Timestamp color.", - "properties": { - "red": { - "label": "Red" - }, - "green": { - "label": "Green" - }, - "blue": { - "label": "Blue" - } - } - }, - "thickness": { - "label": "Timestamp thickness." - }, - "effect": { - "label": "Timestamp effect." - } - } -} \ No newline at end of file diff --git a/web/public/locales/en/config/tls.json b/web/public/locales/en/config/tls.json deleted file mode 100644 index 58493ff40..000000000 --- a/web/public/locales/en/config/tls.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "label": "TLS configuration.", - "properties": { - "enabled": { - "label": "Enable TLS for port 8971" - } - } -} \ No newline at end of file diff --git a/web/public/locales/en/config/ui.json b/web/public/locales/en/config/ui.json deleted file mode 100644 index cdd91cb53..000000000 --- a/web/public/locales/en/config/ui.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "label": "UI configuration.", - "properties": { - "timezone": { - "label": "Override UI timezone." - }, - "time_format": { - "label": "Override UI time format." - }, - "date_style": { - "label": "Override UI dateStyle." - }, - "time_style": { - "label": "Override UI timeStyle." - }, - "unit_system": { - "label": "The unit system to use for measurements." - } - } -} diff --git a/web/public/locales/en/config/validation.json b/web/public/locales/en/config/validation.json new file mode 100644 index 000000000..6f3b5f686 --- /dev/null +++ b/web/public/locales/en/config/validation.json @@ -0,0 +1,32 @@ +{ + "minimum": "Must be at least {{limit}}", + "maximum": "Must be at most {{limit}}", + "exclusiveMinimum": "Must be greater than {{limit}}", + "exclusiveMaximum": "Must be less than {{limit}}", + "minLength": "Must be at least {{limit}} character(s)", + "maxLength": "Must be at most {{limit}} character(s)", + "minItems": "Must have at least {{limit}} items", + "maxItems": "Must have at most {{limit}} items", + "pattern": "Invalid format", + "required": "This field is required", + "type": "Invalid value type", + "enum": "Must be one of the allowed values", + "const": "Value does not match expected constant", + "uniqueItems": "All items must be unique", + "format": "Invalid format", + "additionalProperties": "Unknown property is not allowed", + "oneOf": "Must match exactly one of the allowed schemas", + "anyOf": "Must match at least one of the allowed schemas", + "proxy": { + "header_map": { + "roleHeaderRequired": "Role header is required when role mappings are configured." + } + }, + "ffmpeg": { + "inputs": { + "rolesUnique": "Each role can only be assigned to one input stream.", + "detectRequired": "At least one input stream must be assigned the 'detect' role.", + "hwaccelDetectOnly": "Only the input stream with the detect role can define hardware acceleration arguments." + } + } +} diff --git a/web/public/locales/en/config/version.json b/web/public/locales/en/config/version.json deleted file mode 100644 index e777d7573..000000000 --- a/web/public/locales/en/config/version.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "label": "Current config version." -} \ No newline at end of file diff --git a/web/public/locales/en/views/chat.json b/web/public/locales/en/views/chat.json new file mode 100644 index 000000000..ec9e65e6e --- /dev/null +++ b/web/public/locales/en/views/chat.json @@ -0,0 +1,24 @@ +{ + "title": "Frigate Chat", + "subtitle": "Your AI assistant for camera management and insights", + "placeholder": "Ask anything...", + "error": "Something went wrong. Please try again.", + "processing": "Processing...", + "toolsUsed": "Used: {{tools}}", + "showTools": "Show tools ({{count}})", + "hideTools": "Hide tools", + "call": "Call", + "result": "Result", + "arguments": "Arguments:", + "response": "Response:", + "send": "Send", + "suggested_requests": "Try asking:", + "starting_requests": { + "show_recent_events": "Show recent events", + "show_camera_status": "Show camera status" + }, + "starting_requests_prompts": { + "show_recent_events": "Show me the recent events from the last hour", + "show_camera_status": "What is the current status of my cameras?" + } +} diff --git a/web/public/locales/en/views/events.json b/web/public/locales/en/views/events.json index ea3ee853d..ec0b29116 100644 --- a/web/public/locales/en/views/events.json +++ b/web/public/locales/en/views/events.json @@ -61,5 +61,25 @@ "detected": "detected", "normalActivity": "Normal", "needsReview": "Needs review", - "securityConcern": "Security concern" + "securityConcern": "Security concern", + "motionSearch": { + "menuItem": "Motion search", + "openMenu": "Camera options" + }, + "motionPreviews": { + "menuItem": "View motion previews", + "title": "Motion previews: {{camera}}", + "mobileSettingsTitle": "Motion Preview Settings", + "mobileSettingsDesc": "Adjust playback speed and dimming, and choose a date to review motion-only clips.", + "dim": "Dim", + "dimAria": "Adjust dimming intensity", + "dimDesc": "Increase dimming to increase motion area visibility.", + "speed": "Speed", + "speedAria": "Select preview playback speed", + "speedDesc": "Choose how quickly preview clips play.", + "back": "Back", + "empty": "No previews available", + "noPreview": "Preview unavailable", + "seekAria": "Seek {{camera}} player to {{time}}" + } } diff --git a/web/public/locales/en/views/explore.json b/web/public/locales/en/views/explore.json index 53b04e6c4..661a9a5e9 100644 --- a/web/public/locales/en/views/explore.json +++ b/web/public/locales/en/views/explore.json @@ -216,6 +216,10 @@ }, "hideObjectDetails": { "label": "Hide object path" + }, + "debugReplay": { + "label": "Debug replay", + "aria": "View this tracked object in the debug replay view" } }, "dialog": { diff --git a/web/public/locales/en/views/exports.json b/web/public/locales/en/views/exports.json index 4a79d20e1..8f9e8205e 100644 --- a/web/public/locales/en/views/exports.json +++ b/web/public/locales/en/views/exports.json @@ -2,6 +2,10 @@ "documentTitle": "Export - Frigate", "search": "Search", "noExports": "No exports found", + "headings": { + "cases": "Cases", + "uncategorizedExports": "Uncategorized Exports" + }, "deleteExport": "Delete Export", "deleteExport.desc": "Are you sure you want to delete {{exportName}}?", "editExport": { @@ -13,11 +17,21 @@ "shareExport": "Share export", "downloadVideo": "Download video", "editName": "Edit name", - "deleteExport": "Delete export" + "deleteExport": "Delete export", + "assignToCase": "Add to case" }, "toast": { "error": { - "renameExportFailed": "Failed to rename export: {{errorMessage}}" + "renameExportFailed": "Failed to rename export: {{errorMessage}}", + "assignCaseFailed": "Failed to update case assignment: {{errorMessage}}" } + }, + "caseDialog": { + "title": "Add to case", + "description": "Choose an existing case or create a new one.", + "selectLabel": "Case", + "newCaseOption": "Create new case", + "nameLabel": "Case name", + "descriptionLabel": "Description" } } diff --git a/web/public/locales/en/views/motionSearch.json b/web/public/locales/en/views/motionSearch.json new file mode 100644 index 000000000..6e22c3203 --- /dev/null +++ b/web/public/locales/en/views/motionSearch.json @@ -0,0 +1,75 @@ +{ + "documentTitle": "Motion Search - Frigate", + "title": "Motion Search", + "description": "Draw a polygon to define the region of interest, and specify a time range to search for motion changes within that region.", + "selectCamera": "Motion Search is loading", + "startSearch": "Start Search", + "searchStarted": "Search started", + "searchCancelled": "Search cancelled", + "cancelSearch": "Cancel", + "searching": "Search in progress.", + "searchComplete": "Search complete", + "noResultsYet": "Run a search to find motion changes in the selected region", + "noChangesFound": "No pixel changes detected in the selected region", + "changesFound_one": "Found {{count}} motion change", + "changesFound_other": "Found {{count}} motion changes", + "framesProcessed": "{{count}} frames processed", + "jumpToTime": "Jump to this time", + "results": "Results", + "showSegmentHeatmap": "Heatmap", + "newSearch": "New Search", + "clearResults": "Clear Results", + "clearROI": "Clear polygon", + "polygonControls": { + "points_one": "{{count}} point", + "points_other": "{{count}} points", + "undo": "Undo last point", + "reset": "Reset polygon" + }, + "motionHeatmapLabel": "Motion Heatmap", + "dialog": { + "title": "Motion Search", + "cameraLabel": "Camera", + "previewAlt": "Camera preview for {{camera}}" + }, + "timeRange": { + "title": "Search Range", + "start": "Start time", + "end": "End time" + }, + "settings": { + "title": "Search Settings", + "parallelMode": "Parallel mode", + "parallelModeDesc": "Scan multiple recording segments at the same time (faster, but significantly more CPU intensive)", + "threshold": "Sensitivity Threshold", + "thresholdDesc": "Lower values detect smaller changes (1-255)", + "minArea": "Minimum Change Area", + "minAreaDesc": "Minimum percentage of the region of interest that must change to be considered significant", + "frameSkip": "Frame Skip", + "frameSkipDesc": "Process every Nth frame. Set this to your camera's frame rate to process one frame per second (e.g. 5 for a 5 FPS camera, 30 for a 30 FPS camera). Higher values will be faster, but may miss short motion events.", + "maxResults": "Maximum Results", + "maxResultsDesc": "Stop after this many matching timestamps" + }, + "errors": { + "noCamera": "Please select a camera", + "noROI": "Please draw a region of interest", + "noTimeRange": "Please select a time range", + "invalidTimeRange": "End time must be after start time", + "searchFailed": "Search failed: {{message}}", + "polygonTooSmall": "Polygon must have at least 3 points", + "unknown": "Unknown error" + }, + "changePercentage": "{{percentage}}% changed", + "metrics": { + "title": "Search Metrics", + "segmentsScanned": "Segments scanned", + "segmentsProcessed": "Processed", + "segmentsSkippedInactive": "Skipped (no activity)", + "segmentsSkippedHeatmap": "Skipped (no ROI overlap)", + "fallbackFullRange": "Fallback full-range scan", + "framesDecoded": "Frames decoded", + "wallTime": "Search time", + "segmentErrors": "Segment errors", + "seconds": "{{seconds}}s" + } +} diff --git a/web/public/locales/en/views/replay.json b/web/public/locales/en/views/replay.json new file mode 100644 index 000000000..a966626f5 --- /dev/null +++ b/web/public/locales/en/views/replay.json @@ -0,0 +1,54 @@ +{ + "title": "Debug Replay", + "description": "Replay camera recordings for debugging. The object list shows a time-delayed summary of detected objects and the Messages tab shows a stream of Frigate's internal messages from the replay footage.", + "websocket_messages": "Messages", + "dialog": { + "title": "Start Debug Replay", + "description": "Create a temporary replay camera that loops historical footage for debugging object detection and tracking issues. The replay camera will have the same detection configuration as the source camera. Choose a time range to begin.", + "camera": "Source Camera", + "timeRange": "Time Range", + "preset": { + "1m": "Last 1 Minute", + "5m": "Last 5 Minutes", + "timeline": "From Timeline", + "custom": "Custom" + }, + "startButton": "Start Replay", + "selectFromTimeline": "Select", + "starting": "Starting replay...", + "startLabel": "Start", + "endLabel": "End", + "toast": { + "success": "Debug replay started successfully", + "error": "Failed to start debug replay: {{error}}", + "alreadyActive": "A replay session is already active", + "stopped": "Debug replay stopped", + "stopError": "Failed to stop debug replay: {{error}}", + "goToReplay": "Go to Replay" + } + }, + "page": { + "noSession": "No Active Replay Session", + "noSessionDesc": "Start a debug replay from the History view by clicking the Debug Replay button in the toolbar.", + "goToRecordings": "Go to History", + "sourceCamera": "Source Camera", + "replayCamera": "Replay Camera", + "initializingReplay": "Initializing replay...", + "stoppingReplay": "Stopping replay...", + "stopReplay": "Stop Replay", + "confirmStop": { + "title": "Stop Debug Replay?", + "description": "This will stop the replay session and clean up all temporary data. Are you sure?", + "confirm": "Stop Replay", + "cancel": "Cancel" + }, + "activity": "Activity", + "objects": "Object List", + "audioDetections": "Audio Detections", + "noActivity": "No activity detected", + "activeTracking": "Active tracking", + "noActiveTracking": "No active tracking", + "configuration": "Configuration", + "configurationDesc": "Fine tune motion detection and object tracking settings for the debug replay camera. No changes are saved to your Frigate configuration file." + } +} diff --git a/web/public/locales/en/views/settings.json b/web/public/locales/en/views/settings.json index ea2869986..81c9b8075 100644 --- a/web/public/locales/en/views/settings.json +++ b/web/public/locales/en/views/settings.json @@ -8,23 +8,83 @@ "masksAndZones": "Mask and Zone Editor - Frigate", "motionTuner": "Motion Tuner - Frigate", "object": "Debug - Frigate", - "general": "UI Settings - Frigate", + "general": "Profile Settings - Frigate", + "globalConfig": "Global Configuration - Frigate", + "cameraConfig": "Camera Configuration - Frigate", "frigatePlus": "Frigate+ Settings - Frigate", - "notifications": "Notification Settings - Frigate" + "notifications": "Notification Settings - Frigate", + "maintenance": "Maintenance - Frigate" }, "menu": { + "general": "General", + "globalConfig": "Global configuration", + "system": "System", + "integrations": "Integrations", + "cameras": "Camera configuration", "ui": "UI", - "enrichments": "Enrichments", + "profileSettings": "Profile settings", + "globalDetect": "Object detection", + "globalRecording": "Recording", + "globalSnapshots": "Snapshots", + "globalFfmpeg": "FFmpeg", + "globalMotion": "Motion detection", + "globalObjects": "Objects", + "globalReview": "Review", + "globalAudioEvents": "Audio events", + "globalLivePlayback": "Live playback", + "globalTimestampStyle": "Timestamp style", + "systemDatabase": "Database", + "systemTls": "TLS", + "systemAuthentication": "Authentication", + "systemNetworking": "Networking", + "systemProxy": "Proxy", + "systemUi": "UI", + "systemLogging": "Logging", + "systemEnvironmentVariables": "Environment variables", + "systemTelemetry": "Telemetry", + "systemBirdseye": "Birdseye", + "systemFfmpeg": "FFmpeg", + "systemDetectorHardware": "Detector hardware", + "systemDetectionModel": "Detection model", + "systemMqtt": "MQTT", + "integrationSemanticSearch": "Semantic search", + "integrationGenerativeAi": "Generative AI", + "integrationFaceRecognition": "Face recognition", + "integrationLpr": "License plate recognition", + "integrationObjectClassification": "Object classification", + "integrationAudioTranscription": "Audio transcription", + "cameraDetect": "Object detection", + "cameraFfmpeg": "FFmpeg", + "cameraRecording": "Recording", + "cameraSnapshots": "Snapshots", + "cameraMotion": "Motion detection", + "cameraObjects": "Objects", + "cameraConfigReview": "Review", + "cameraAudioEvents": "Audio events", + "cameraAudioTranscription": "Audio transcription", + "cameraNotifications": "Notifications", + "cameraLivePlayback": "Live playback", + "cameraBirdseye": "Birdseye", + "cameraFaceRecognition": "Face recognition", + "cameraLpr": "License plate recognition", + "cameraMqttConfig": "MQTT", + "cameraOnvif": "ONVIF", + "cameraUi": "Camera UI", + "cameraTimestampStyle": "Timestamp style", + "cameraMqtt": "Camera MQTT", "cameraManagement": "Management", "cameraReview": "Review", "masksAndZones": "Masks / Zones", - "motionTuner": "Motion Tuner", - "triggers": "Triggers", - "debug": "Debug", + "motionTuner": "Motion tuner", + "enrichments": "Enrichments", "users": "Users", "roles": "Roles", "notifications": "Notifications", - "frigateplus": "Frigate+" + "triggers": "Triggers", + "debug": "Debug", + "frigateplus": "Frigate+", + "mediaSync": "Media sync", + "regionGrid": "Region grid" }, "dialog": { "unsavedChanges": { @@ -32,12 +92,29 @@ "desc": "Do you want to save your changes before continuing?" } }, + "saveAllPreview": { + "title": "Changes to be saved", + "triggerLabel": "Review pending changes", + "empty": "No pending changes.", + "scope": { + "label": "Scope", + "global": "Global", + "camera": "Camera: {{cameraName}}" + }, + "field": { + "label": "Field" + }, + "value": { + "label": "New value", + "reset": "Reset" + } + }, "cameraSetting": { "camera": "Camera", "noCamera": "No Camera" }, "general": { - "title": "UI Settings", + "title": "Profile Settings", "liveDashboard": { "title": "Live Dashboard", "automaticLiveView": { @@ -106,7 +183,7 @@ "desc": "Semantic Search in Frigate allows you to find tracked objects within your review items using either the image itself, a user-defined text description, or an automatically generated one.", "reindexNow": { "label": "Reindex Now", - "desc": "Reindexing will regenerate embeddings for all tracked object. This process runs in the background and may max out your CPU and take a fair amount of time depending on the number of tracked objects you have.", + "desc": "Reindexing will regenerate embeddings for all tracked objects. This process runs in the background and may max out your CPU and take a fair amount of time depending on the number of tracked objects you have.", "confirmTitle": "Confirm Reindexing", "confirmDesc": "Are you sure you want to reindex all tracked object embeddings? This process will run in the background but it may max out your CPU and take a fair amount of time. You can watch the progress on the Explore page.", "confirmButton": "Reindex", @@ -350,7 +427,11 @@ "backToSettings": "Back to Camera Settings", "streams": { "title": "Enable / Disable Cameras", - "desc": "Temporarily disable a camera until Frigate restarts. Disabling a camera completely stops Frigate's processing of this camera's streams. Detection, recording, and debugging will be unavailable.
Note: This does not disable go2rtc restreams." + "enableLabel": "Enabled cameras", + "enableDesc": "Temporarily disable an enabled camera until Frigate restarts. Disabling a camera completely stops Frigate's processing of this camera's streams. Detection, recording, and debugging will be unavailable.
Note: This does not disable go2rtc restreams.", + "disableLabel": "Disabled cameras", + "disableDesc": "Enable a camera that is currently not visible in the UI and disabled in the configuration. A restart of Frigate is required after enabling.", + "enableSuccess": "Enabled {{cameraName}} in configuration. Restart Frigate to apply the changes." }, "cameraConfig": { "add": "Add Camera", @@ -425,6 +506,7 @@ "all": "All Masks and Zones" }, "restart_required": "Restart required (masks/zones changed)", + "disabledInConfig": "Item is disabled in the config file", "toast": { "success": { "copyCoordinates": "Copied coordinates for {{polyName}} to clipboard." @@ -434,7 +516,7 @@ } }, "motionMaskLabel": "Motion Mask {{number}}", - "objectMaskLabel": "Object Mask {{number}} ({{label}})", + "objectMaskLabel": "Object Mask {{number}}", "form": { "zoneName": { "error": { @@ -508,6 +590,10 @@ "inputPlaceHolder": "Enter a name…", "tips": "Name must be at least 2 characters, must have at least one letter, and must not be the name of a camera or another zone on this camera." }, + "enabled": { + "title": "Enabled", + "description": "Whether this zone is active and enabled in the config file. If disabled, it cannot be enabled by MQTT. Disabled zones are ignored at runtime." + }, "inertia": { "title": "Inertia", "desc": "Specifies how many frames that an object must be in a zone before they are considered in the zone. Default: 3" @@ -552,12 +638,18 @@ }, "add": "New Motion Mask", "edit": "Edit Motion Mask", + "defaultName": "Motion Mask {{number}}", "context": { "title": "Motion masks are used to prevent unwanted types of motion from triggering detection (example: tree branches, camera timestamps). Motion masks should be used very sparingly, over-masking will make it more difficult for objects to be tracked." }, "point_one": "{{count}} point", "point_other": "{{count}} points", "clickDrawPolygon": "Click to draw a polygon on the image.", + "name": { + "title": "Name", + "description": "An optional friendly name for this motion mask.", + "placeholder": "Enter a name..." + }, "polygonAreaTooLarge": { "title": "The motion mask is covering {{polygonArea}}% of the camera frame. Large motion masks are not recommended.", "tips": "Motion masks do not prevent objects from being detected. You should use a required zone instead." @@ -582,6 +674,11 @@ "point_one": "{{count}} point", "point_other": "{{count}} points", "clickDrawPolygon": "Click to draw a polygon on the image.", + "name": { + "title": "Name", + "description": "An optional friendly name for this object mask.", + "placeholder": "Enter a name..." + }, "objects": { "title": "Objects", "desc": "The object type that applies to this object mask.", @@ -593,6 +690,12 @@ "noName": "Object Mask has been saved." } } + }, + "masks": { + "enabled": { + "title": "Enabled", + "description": "Whether this mask is enabled in the config file. If disabled, it cannot be enabled by MQTT. Disabled masks are ignored at runtime." + } } }, "motionDetectionTuner": { @@ -906,6 +1009,13 @@ }, "frigatePlus": { "title": "Frigate+ Settings", + "description": "Frigate+ is a subscription service that provides access to additional features and capabilities for your Frigate instance, including the ability to use custom object detection models trained on your own data. You can manage your Frigate+ model settings here.", + "cardTitles": { + "api": "API", + "currentModel": "Current Model", + "otherModels": "Other Models", + "configuration": "Configuration" + }, "apiKey": { "title": "Frigate+ API Key", "validated": "Frigate+ API key is detected and validated", @@ -947,6 +1057,15 @@ "error": "Failed to save config changes: {{errorMessage}}" } }, + "detectionModel": { + "plusActive": { + "title": "Frigate+ model management", + "label": "Current model source", + "description": "This instance is running a Frigate+ model. Select or change your model in Frigate+ settings.", + "goToFrigatePlus": "Go to Frigate+ settings", + "showModelForm": "Manually configure a model" + } + }, "triggers": { "documentTitle": "Triggers", "semanticSearch": { @@ -1067,5 +1186,237 @@ "deleteTriggerFailed": "Failed to delete trigger: {{errorMessage}}" } } - } + }, + "maintenance": { + "title": "Maintenance", + "sync": { + "title": "Media Sync", + "desc": "Frigate will periodically clean up media on a regular schedule according to your retention configuration. It is normal to see a few orphaned files as Frigate runs. Use this feature to remove orphaned media files from disk that are no longer referenced in the database.", + "started": "Media sync started.", + "alreadyRunning": "A sync job is already running", + "error": "Failed to start sync", + "currentStatus": "Status", + "jobId": "Job ID", + "startTime": "Start Time", + "endTime": "End Time", + "statusLabel": "Status", + "results": "Results", + "errorLabel": "Error", + "mediaTypes": "Media Types", + "allMedia": "All Media", + "dryRun": "Dry Run", + "dryRunEnabled": "No files will be deleted", + "dryRunDisabled": "Files will be deleted", + "force": "Force", + "forceDesc": "Bypass safety threshold and complete sync even if more than 50% of the files would be deleted.", + "running": "Sync Running...", + "start": "Start Sync", + "inProgress": "Sync is in progress. This page is disabled.", + "status": { + "queued": "Queued", + "running": "Running", + "completed": "Completed", + "failed": "Failed", + "notRunning": "Not Running" + }, + "resultsFields": { + "filesChecked": "Files Checked", + "orphansFound": "Orphans Found", + "orphansDeleted": "Orphans Deleted", + "aborted": "Aborted. Deletion would exceed safety threshold.", + "error": "Error", + "totals": "Totals" + }, + "event_snapshots": "Tracked Object Snapshots", + "event_thumbnails": "Tracked Object Thumbnails", + "review_thumbnails": "Review Thumbnails", + "previews": "Previews", + "exports": "Exports", + "recordings": "Recordings" + }, + "regionGrid": { + "title": "Region Grid", + "desc": "The region grid is an optimization that learns where objects of different sizes typically appear in each camera's field of view. Frigate uses this data to efficiently size detection regions. The grid is automatically built over time from tracked object data.", + "clear": "Clear region grid", + "clearConfirmTitle": "Clear Region Grid", + "clearConfirmDesc": "Clearing the region grid is not recommended unless you have recently changed your detector model size or have changed your camera's physical position and are having object tracking issues. The grid will be automatically rebuilt over time as objects are tracked. A Frigate restart is required for changes to take effect.", + "clearSuccess": "Region grid cleared successfully", + "clearError": "Failed to clear region grid", + "restartRequired": "Restart required for region grid changes to take effect" + } + }, + "configForm": { + "global": { + "title": "Global Settings", + "description": "These settings apply to all cameras unless overridden in the camera-specific settings." + }, + "camera": { + "title": "Camera Settings", + "description": "These settings apply only to this camera and override the global settings." + }, + "advancedSettingsCount": "Advanced Settings ({{count}})", + "advancedCount": "Advanced ({{count}})", + "showAdvanced": "Show Advanced Settings", + "tabs": { + "sharedDefaults": "Shared Defaults", + "system": "System", + "integrations": "Integrations" + }, + "additionalProperties": { + "keyLabel": "Key", + "valueLabel": "Value", + "keyPlaceholder": "New key", + "remove": "Remove" + }, + "timezone": { + "defaultOption": "Use browser timezone" + }, + "roleMap": { + "empty": "No role mappings", + "roleLabel": "Role", + "groupsLabel": "Groups", + "addMapping": "Add role mapping", + "remove": "Remove" + }, + "ffmpegArgs": { + "preset": "Preset", + "manual": "Manual arguments", + "inherit": "Inherit from camera setting", + "selectPreset": "Select preset", + "manualPlaceholder": "Enter FFmpeg arguments" + }, + "cameraInputs": { + "itemTitle": "Stream {{index}}" + }, + "restartRequiredField": "Restart required", + "restartRequiredFooter": "Configuration changed - Restart required", + "sections": { + "detect": "Detection", + "record": "Recording", + "snapshots": "Snapshots", + "motion": "Motion", + "objects": "Objects", + "review": "Review", + "audio": "Audio", + "notifications": "Notifications", + "live": "Live View", + "timestamp_style": "Timestamps", + "mqtt": "MQTT", + "database": "Database", + "telemetry": "Telemetry", + "auth": "Authentication", + "tls": "TLS", + "proxy": "Proxy", + "go2rtc": "go2rtc", + "ffmpeg": "FFmpeg", + "detectors": "Detectors", + "model": "Model", + "semantic_search": "Semantic Search", + "genai": "GenAI", + "face_recognition": "Face Recognition", + "lpr": "License Plate Recognition", + "birdseye": "Birdseye" + }, + "detect": { + "title": "Detection Settings" + }, + "detectors": { + "title": "Detector Settings", + "singleType": "Only one {{type}} detector is allowed.", + "keyRequired": "Detector name is required.", + "keyDuplicate": "Detector name already exists.", + "noSchema": "No detector schemas are available.", + "none": "No detector instances configured.", + "add": "Add detector" + }, + "record": { + "title": "Recording Settings" + }, + "snapshots": { + "title": "Snapshot Settings" + }, + "motion": { + "title": "Motion Settings" + }, + "objects": { + "title": "Object Settings" + }, + "audioLabels": { + "summary": "{{count}} audio labels selected", + "empty": "No audio labels available" + }, + "objectLabels": { + "summary": "{{count}} object types selected", + "empty": "No object labels available" + }, + "filters": { + "objectFieldLabel": "{{field}} for {{label}}" + }, + "zoneNames": { + "summary": "{{count}} selected", + "empty": "No zones available" + }, + "inputRoles": { + "summary": "{{count}} roles selected", + "empty": "No roles available", + "options": { + "detect": "Detect", + "record": "Record", + "audio": "Audio" + } + }, + "review": { + "title": "Review Settings" + }, + "audio": { + "title": "Audio Settings" + }, + "notifications": { + "title": "Notification Settings" + }, + "live": { + "title": "Live View Settings" + }, + "timestamp_style": { + "title": "Timestamp Settings" + }, + "searchPlaceholder": "Search..." + }, + "globalConfig": { + "title": "Global Configuration", + "description": "Configure global settings that apply to all cameras unless overridden.", + "toast": { + "success": "Global settings saved successfully", + "error": "Failed to save global settings", + "validationError": "Validation failed" + } + }, + "cameraConfig": { + "title": "Camera Configuration", + "description": "Configure settings for individual cameras. Settings override global defaults.", + "overriddenBadge": "Overridden", + "resetToGlobal": "Reset to Global", + "toast": { + "success": "Camera settings saved successfully", + "error": "Failed to save camera settings" + } + }, + "toast": { + "success": "Settings saved successfully", + "applied": "Settings applied successfully", + "successRestartRequired": "Settings saved successfully. Restart Frigate to apply your changes.", + "error": "Failed to save settings", + "validationError": "Validation failed: {{message}}", + "resetSuccess": "Reset to global defaults", + "resetError": "Failed to reset settings", + "saveAllSuccess_one": "Saved {{count}} section successfully.", + "saveAllSuccess_other": "All {{count}} sections saved successfully.", + "saveAllPartial_one": "{{successCount}} of {{totalCount}} section saved. {{failCount}} failed.", + "saveAllPartial_other": "{{successCount}} of {{totalCount}} sections saved. {{failCount}} failed.", + "saveAllFailure": "Failed to save all sections." + }, + "unsavedChanges": "You have unsaved changes", + "confirmReset": "Confirm Reset", + "resetToDefaultDescription": "This will reset all settings in this section to their default values. This action cannot be undone.", + "resetToGlobalDescription": "This will reset the settings in this section to the global defaults. This action cannot be undone." } diff --git a/web/public/locales/en/views/system.json b/web/public/locales/en/views/system.json index da774e302..faaff31c9 100644 --- a/web/public/locales/en/views/system.json +++ b/web/public/locales/en/views/system.json @@ -7,12 +7,39 @@ "logs": { "frigate": "Frigate Logs - Frigate", "go2rtc": "Go2RTC Logs - Frigate", - "nginx": "Nginx Logs - Frigate" + "nginx": "Nginx Logs - Frigate", + "websocket": "Messages Logs - Frigate" } }, "title": "System", "metrics": "System metrics", "logs": { + "websocket": { + "label": "Messages", + "pause": "Pause", + "resume": "Resume", + "clear": "Clear", + "filter": { + "all": "All topics", + "topics": "Topics", + "events": "Events", + "reviews": "Reviews", + "classification": "Classification", + "face_recognition": "Face Recognition", + "lpr": "LPR", + "camera_activity": "Camera activity", + "system": "System", + "camera": "Camera", + "all_cameras": "All cameras", + "cameras_count_one": "{{count}} Camera", + "cameras_count_other": "{{count}} Cameras" + }, + "empty": "No messages captured yet", + "count": "{{count}} messages", + "expanded": { + "payload": "Payload" + } + }, "download": { "label": "Download Logs" }, @@ -51,6 +78,7 @@ "gpuMemory": "GPU Memory", "gpuEncoder": "GPU Encoder", "gpuDecoder": "GPU Decoder", + "gpuTemperature": "GPU Temperature", "gpuInfo": { "vainfoOutput": { "title": "Vainfo Output", @@ -77,6 +105,7 @@ }, "npuUsage": "NPU Usage", "npuMemory": "NPU Memory", + "npuTemperature": "NPU Temperature", "intelGpuWarning": { "title": "Intel GPU Stats Warning", "message": "GPU stats unavailable", @@ -158,6 +187,17 @@ "cameraDetectionsPerSecond": "{{camName}} detections per second", "cameraSkippedDetectionsPerSecond": "{{camName}} skipped detections per second" }, + "connectionQuality": { + "title": "Connection Quality", + "excellent": "Excellent", + "fair": "Fair", + "poor": "Poor", + "unusable": "Unusable", + "fps": "FPS", + "expectedFps": "Expected FPS", + "reconnectsLastHour": "Reconnects (last hour)", + "stallsLastHour": "Stalls (last hour)" + }, "toast": { "success": { "copyToClipboard": "Copied probe data to clipboard." @@ -176,7 +216,8 @@ "cameraIsOffline": "{{camera}} is offline", "detectIsSlow": "{{detect}} is slow ({{speed}} ms)", "detectIsVerySlow": "{{detect}} is very slow ({{speed}} ms)", - "shmTooLow": "/dev/shm allocation ({{total}} MB) should be increased to at least {{min}} MB." + "shmTooLow": "/dev/shm allocation ({{total}} MB) should be increased to at least {{min}} MB.", + "debugReplayActive": "Debug replay session is active" }, "enrichments": { "title": "Enrichments", diff --git a/web/src/App.tsx b/web/src/App.tsx index d7a9ec3e9..21babc2b9 100644 --- a/web/src/App.tsx +++ b/web/src/App.tsx @@ -27,8 +27,10 @@ const Settings = lazy(() => import("@/pages/Settings")); const UIPlayground = lazy(() => import("@/pages/UIPlayground")); const FaceLibrary = lazy(() => import("@/pages/FaceLibrary")); const Classification = lazy(() => import("@/pages/ClassificationModel")); +const Chat = lazy(() => import("@/pages/Chat")); const Logs = lazy(() => import("@/pages/Logs")); const AccessDenied = lazy(() => import("@/pages/AccessDenied")); +const Replay = lazy(() => import("@/pages/Replay")); function App() { const { data: config } = useSWR("config", { @@ -106,7 +108,9 @@ function DefaultAppView() { } /> } /> } /> - } /> + } /> + } />{" "} + } />{" "} } /> } /> diff --git a/web/src/api/ws.tsx b/web/src/api/ws.tsx index 44d45ea2f..07d44d67a 100644 --- a/web/src/api/ws.tsx +++ b/web/src/api/ws.tsx @@ -1,5 +1,5 @@ import { baseUrl } from "./baseUrl"; -import { useCallback, useEffect, useState } from "react"; +import { useCallback, useEffect, useRef, useState } from "react"; import useWebSocket, { ReadyState } from "react-use-websocket"; import { EmbeddingsReindexProgressType, @@ -11,11 +11,19 @@ import { TrackedObjectUpdateReturnType, TriggerStatus, FrigateAudioDetections, + Job, } from "@/types/ws"; import { FrigateStats } from "@/types/stats"; import { createContainer } from "react-tracked"; import useDeepMemo from "@/hooks/use-deep-memo"; +export type WsFeedMessage = { + topic: string; + payload: unknown; + timestamp: number; + id: string; +}; + type Update = { topic: string; payload: unknown; @@ -28,6 +36,9 @@ type WsState = { type useValueReturn = [WsState, (update: Update) => void]; +const wsMessageSubscribers = new Set<(msg: WsFeedMessage) => void>(); +let wsMessageIdCounter = 0; + function useValue(): useValueReturn { const wsUrl = `${baseUrl.replace(/^http/, "ws")}ws`; @@ -42,8 +53,13 @@ function useValue(): useValueReturn { return; } - const cameraActivity: { [key: string]: FrigateCameraState } = - JSON.parse(activityValue); + let cameraActivity: { [key: string]: Partial }; + + try { + cameraActivity = JSON.parse(activityValue); + } catch { + return; + } if (Object.keys(cameraActivity).length === 0) { return; @@ -52,6 +68,12 @@ function useValue(): useValueReturn { const cameraStates: WsState = {}; Object.entries(cameraActivity).forEach(([name, state]) => { + const cameraConfig = state?.config; + + if (!cameraConfig) { + return; + } + const { record, detect, @@ -66,7 +88,7 @@ function useValue(): useValueReturn { detections, object_descriptions, review_descriptions, - } = state["config"]; + } = cameraConfig; cameraStates[`${name}/recordings/state`] = record ? "ON" : "OFF"; cameraStates[`${name}/enabled/state`] = enabled ? "ON" : "OFF"; cameraStates[`${name}/detect/state`] = detect ? "ON" : "OFF"; @@ -114,6 +136,17 @@ function useValue(): useValueReturn { ...prevState, [data.topic]: data.payload, })); + + // Notify feed subscribers + if (wsMessageSubscribers.size > 0) { + const feedMsg: WsFeedMessage = { + topic: data.topic, + payload: data.payload, + timestamp: Date.now(), + id: String(wsMessageIdCounter++), + }; + wsMessageSubscribers.forEach((cb) => cb(feedMsg)); + } } }, onOpen: () => { @@ -303,6 +336,57 @@ export function useReviewDescriptionState(camera: string): { return { payload: payload as ToggleableSetting, send }; } +export function useMotionMaskState( + camera: string, + maskName: string, +): { + payload: ToggleableSetting; + send: (payload: ToggleableSetting, retain?: boolean) => void; +} { + const { + value: { payload }, + send, + } = useWs( + `${camera}/motion_mask/${maskName}/state`, + `${camera}/motion_mask/${maskName}/set`, + ); + return { payload: payload as ToggleableSetting, send }; +} + +export function useObjectMaskState( + camera: string, + maskName: string, +): { + payload: ToggleableSetting; + send: (payload: ToggleableSetting, retain?: boolean) => void; +} { + const { + value: { payload }, + send, + } = useWs( + `${camera}/object_mask/${maskName}/state`, + `${camera}/object_mask/${maskName}/set`, + ); + return { payload: payload as ToggleableSetting, send }; +} + +export function useZoneState( + camera: string, + zoneName: string, +): { + payload: ToggleableSetting; + send: (payload: ToggleableSetting, retain?: boolean) => void; +} { + const { + value: { payload }, + send, + } = useWs( + `${camera}/zone/${zoneName}/state`, + `${camera}/zone/${zoneName}/set`, + ); + return { payload: payload as ToggleableSetting, send }; +} + export function usePtzCommand(camera: string): { payload: string; send: (payload: string, retain?: boolean) => void; @@ -651,3 +735,53 @@ export function useTriggers(): { payload: TriggerStatus } { : { name: "", camera: "", event_id: "", type: "", score: 0 }; return { payload: useDeepMemo(parsed) }; } + +export function useJobStatus( + jobType: string, + revalidateOnFocus: boolean = true, +): { payload: Job | null } { + const { + value: { payload }, + send: sendCommand, + } = useWs("job_state", "jobState"); + + const jobData = useDeepMemo( + payload && typeof payload === "string" ? JSON.parse(payload) : {}, + ); + const currentJob = jobData[jobType] || null; + + useEffect(() => { + let listener: (() => void) | undefined; + if (revalidateOnFocus) { + sendCommand("jobState"); + listener = () => { + if (document.visibilityState === "visible") { + sendCommand("jobState"); + } + }; + addEventListener("visibilitychange", listener); + } + + return () => { + if (listener) { + removeEventListener("visibilitychange", listener); + } + }; + // eslint-disable-next-line react-hooks/exhaustive-deps + }, [revalidateOnFocus]); + + return { payload: currentJob as Job | null }; +} + +export function useWsMessageSubscribe(callback: (msg: WsFeedMessage) => void) { + const callbackRef = useRef(callback); + callbackRef.current = callback; + + useEffect(() => { + const handler = (msg: WsFeedMessage) => callbackRef.current(msg); + wsMessageSubscribers.add(handler); + return () => { + wsMessageSubscribers.delete(handler); + }; + }, []); +} diff --git a/web/src/components/auth/ProtectedRoute.tsx b/web/src/components/auth/ProtectedRoute.tsx index cedf5a15a..a7d1b3596 100644 --- a/web/src/components/auth/ProtectedRoute.tsx +++ b/web/src/components/auth/ProtectedRoute.tsx @@ -47,7 +47,7 @@ export default function ProtectedRoute({ return ; } - // Authenticated mode (8971): require login + // Authenticated mode (external port): require login if (!auth.user) { return ( diff --git a/web/src/components/camera/CameraImage.tsx b/web/src/components/camera/CameraImage.tsx index 716e63f57..f0c05995e 100644 --- a/web/src/components/camera/CameraImage.tsx +++ b/web/src/components/camera/CameraImage.tsx @@ -26,7 +26,8 @@ export default function CameraImage({ const containerRef = useRef(null); const imgRef = useRef(null); - const { name } = config ? config.cameras[camera] : ""; + const cameraConfig = config?.cameras?.[camera]; + const { name } = cameraConfig ?? { name: camera }; const { payload: enabledState } = useEnabledState(camera); const enabled = enabledState ? enabledState === "ON" : true; @@ -34,15 +35,15 @@ export default function CameraImage({ useResizeObserver(containerRef); const requestHeight = useMemo(() => { - if (!config || containerHeight == 0) { + if (!cameraConfig || containerHeight == 0) { return 360; } return Math.min( - config.cameras[camera].detect.height, + cameraConfig.detect.height, Math.round(containerHeight * (isDesktop ? 1.1 : 1.25)), ); - }, [config, camera, containerHeight]); + }, [cameraConfig, containerHeight]); const [isPortraitImage, setIsPortraitImage] = useState(false); diff --git a/web/src/components/camera/ConnectionQualityIndicator.tsx b/web/src/components/camera/ConnectionQualityIndicator.tsx new file mode 100644 index 000000000..3ea3c4f19 --- /dev/null +++ b/web/src/components/camera/ConnectionQualityIndicator.tsx @@ -0,0 +1,76 @@ +import { useTranslation } from "react-i18next"; +import { + Tooltip, + TooltipContent, + TooltipTrigger, +} from "@/components/ui/tooltip"; +import { cn } from "@/lib/utils"; + +type ConnectionQualityIndicatorProps = { + quality: "excellent" | "fair" | "poor" | "unusable"; + expectedFps: number; + reconnects: number; + stalls: number; +}; + +export function ConnectionQualityIndicator({ + quality, + expectedFps, + reconnects, + stalls, +}: ConnectionQualityIndicatorProps) { + const { t } = useTranslation(["views/system"]); + + const getColorClass = (quality: string): string => { + switch (quality) { + case "excellent": + return "bg-success"; + case "fair": + return "bg-yellow-500"; + case "poor": + return "bg-orange-500"; + case "unusable": + return "bg-destructive"; + default: + return "bg-gray-500"; + } + }; + + const qualityLabel = t(`cameras.connectionQuality.${quality}`); + + return ( + + +
+ + +
+
+ {t("cameras.connectionQuality.title")} +
+
+
{qualityLabel}
+
+
+ {t("cameras.connectionQuality.expectedFps")}:{" "} + {expectedFps.toFixed(1)} {t("cameras.connectionQuality.fps")} +
+
+ {t("cameras.connectionQuality.reconnectsLastHour")}:{" "} + {reconnects} +
+
+ {t("cameras.connectionQuality.stallsLastHour")}: {stalls} +
+
+
+
+
+ + ); +} diff --git a/web/src/components/card/ExportCard.tsx b/web/src/components/card/ExportCard.tsx index 021524532..c8d9c4c65 100644 --- a/web/src/components/card/ExportCard.tsx +++ b/web/src/components/card/ExportCard.tsx @@ -1,9 +1,8 @@ import ActivityIndicator from "../indicators/activity-indicator"; -import { LuTrash } from "react-icons/lu"; import { Button } from "../ui/button"; -import { useCallback, useState } from "react"; -import { isDesktop, isMobile } from "react-device-detect"; -import { FaDownload, FaPlay, FaShareAlt } from "react-icons/fa"; +import { useCallback, useMemo, useState } from "react"; +import { isMobile } from "react-device-detect"; +import { FiMoreVertical } from "react-icons/fi"; import { Skeleton } from "../ui/skeleton"; import { Dialog, @@ -14,35 +13,81 @@ import { } from "../ui/dialog"; import { Input } from "../ui/input"; import useKeyboardListener from "@/hooks/use-keyboard-listener"; -import { DeleteClipType, Export } from "@/types/export"; -import { MdEditSquare } from "react-icons/md"; +import { DeleteClipType, Export, ExportCase } from "@/types/export"; import { baseUrl } from "@/api/baseUrl"; import { cn } from "@/lib/utils"; import { shareOrCopy } from "@/utils/browserUtil"; import { useTranslation } from "react-i18next"; import { ImageShadowOverlay } from "../overlay/ImageShadowOverlay"; import BlurredIconButton from "../button/BlurredIconButton"; -import { Tooltip, TooltipContent, TooltipTrigger } from "../ui/tooltip"; import { useIsAdmin } from "@/hooks/use-is-admin"; +import { + DropdownMenu, + DropdownMenuContent, + DropdownMenuItem, + DropdownMenuTrigger, +} from "../ui/dropdown-menu"; +import { FaFolder } from "react-icons/fa"; -type ExportProps = { +type CaseCardProps = { + className: string; + exportCase: ExportCase; + exports: Export[]; + onSelect: () => void; +}; +export function CaseCard({ + className, + exportCase, + exports, + onSelect, +}: CaseCardProps) { + const firstExport = useMemo( + () => exports.find((exp) => exp.thumb_path && exp.thumb_path.length > 0), + [exports], + ); + + return ( +
onSelect()} + > + {firstExport && ( + + )} +
+
+ +
{exportCase.name}
+
+
+ ); +} + +type ExportCardProps = { className: string; exportedRecording: Export; onSelect: (selected: Export) => void; onRename: (original: string, update: string) => void; onDelete: ({ file, exportName }: DeleteClipType) => void; + onAssignToCase?: (selected: Export) => void; }; - -export default function ExportCard({ +export function ExportCard({ className, exportedRecording, onSelect, onRename, onDelete, -}: ExportProps) { + onAssignToCase, +}: ExportCardProps) { const { t } = useTranslation(["views/exports"]); const isAdmin = useIsAdmin(); - const [hovered, setHovered] = useState(false); const [loading, setLoading] = useState( exportedRecording.thumb_path.length > 0, ); @@ -136,12 +181,14 @@ export default function ExportCard({
setHovered(true) : undefined} - onMouseLeave={isDesktop ? () => setHovered(false) : undefined} - onClick={isDesktop ? undefined : () => setHovered(!hovered)} + onClick={() => { + if (!exportedRecording.in_progress) { + onSelect(exportedRecording); + } + }} > {exportedRecording.in_progress ? ( @@ -158,95 +205,88 @@ export default function ExportCard({ )} )} - {hovered && ( - <> -
-
-
- {!exportedRecording.in_progress && ( - - - - shareOrCopy( - `${baseUrl}export?id=${exportedRecording.id}`, - exportedRecording.name.replaceAll("_", " "), - ) - } - > - - - - {t("tooltip.shareExport")} - - )} - {!exportedRecording.in_progress && ( + {!exportedRecording.in_progress && ( +
+ + + e.stopPropagation()} + > + + + + + { + e.stopPropagation(); + shareOrCopy( + `${baseUrl}export?id=${exportedRecording.id}`, + exportedRecording.name.replaceAll("_", " "), + ); + }} + > + {t("tooltip.shareExport")} + + e.stopPropagation()} > - - - - - - - - {t("tooltip.downloadVideo")} - - + {t("tooltip.downloadVideo")} - )} - {isAdmin && !exportedRecording.in_progress && ( - - - - setEditName({ - original: exportedRecording.name, - update: undefined, - }) - } - > - - - - {t("tooltip.editName")} - + + {isAdmin && onAssignToCase && ( + { + e.stopPropagation(); + onAssignToCase(exportedRecording); + }} + > + {t("tooltip.assignToCase")} + )} {isAdmin && ( - - - - onDelete({ - file: exportedRecording.id, - exportName: exportedRecording.name, - }) - } - > - - - - {t("tooltip.deleteExport")} - + { + e.stopPropagation(); + setEditName({ + original: exportedRecording.name, + update: undefined, + }); + }} + > + {t("tooltip.editName")} + )} -
-
- - {!exportedRecording.in_progress && ( - - )} - + {isAdmin && ( + { + e.stopPropagation(); + onDelete({ + file: exportedRecording.id, + exportName: exportedRecording.name, + }); + }} + > + {t("tooltip.deleteExport")} + + )} + + +
)} {loading && ( diff --git a/web/src/components/card/SettingsGroupCard.tsx b/web/src/components/card/SettingsGroupCard.tsx new file mode 100644 index 000000000..4bfaa1402 --- /dev/null +++ b/web/src/components/card/SettingsGroupCard.tsx @@ -0,0 +1,56 @@ +import { ReactNode } from "react"; +import { Label } from "../ui/label"; + +export const SPLIT_ROW_CLASS_NAME = + "space-y-2 md:grid md:grid-cols-[minmax(14rem,24rem)_minmax(0,1fr)] md:items-start md:gap-x-6 md:space-y-0"; +export const DESCRIPTION_CLASS_NAME = "text-sm text-muted-foreground"; +export const CONTROL_COLUMN_CLASS_NAME = "w-full md:max-w-2xl"; + +type SettingsGroupCardProps = { + title: string | ReactNode; + children: ReactNode; +}; + +export function SettingsGroupCard({ title, children }: SettingsGroupCardProps) { + return ( +
+
+ {title} +
+ {children} +
+ ); +} + +type SplitCardRowProps = { + label: ReactNode; + description?: ReactNode; + content: ReactNode; +}; + +export function SplitCardRow({ + label, + description, + content, +}: SplitCardRowProps) { + return ( +
+
+ + {description && ( +
+ {description} +
+ )} +
+
+ {content} + {description && ( +
+ {description} +
+ )} +
+
+ ); +} diff --git a/web/src/components/chat/ChatEventThumbnailsRow.tsx b/web/src/components/chat/ChatEventThumbnailsRow.tsx new file mode 100644 index 000000000..bf2c5e88f --- /dev/null +++ b/web/src/components/chat/ChatEventThumbnailsRow.tsx @@ -0,0 +1,42 @@ +import { useApiHost } from "@/api"; + +type ChatEventThumbnailsRowProps = { + events: { id: string }[]; +}; + +/** + * Horizontal scroll row of event thumbnail images for chat (e.g. after search_objects). + * Renders nothing when events is empty. + */ +export function ChatEventThumbnailsRow({ + events, +}: ChatEventThumbnailsRowProps) { + const apiHost = useApiHost(); + + if (events.length === 0) return null; + + return ( +
+
+
+ {events.map((event) => ( + + + + ))} +
+
+
+ ); +} diff --git a/web/src/components/chat/ChatMessage.tsx b/web/src/components/chat/ChatMessage.tsx new file mode 100644 index 000000000..a644a9d7d --- /dev/null +++ b/web/src/components/chat/ChatMessage.tsx @@ -0,0 +1,208 @@ +import { useState, useEffect, useRef } from "react"; +import ReactMarkdown from "react-markdown"; +import remarkGfm from "remark-gfm"; +import { useTranslation } from "react-i18next"; +import copy from "copy-to-clipboard"; +import { toast } from "sonner"; +import { FaCopy, FaPencilAlt } from "react-icons/fa"; +import { FaArrowUpLong } from "react-icons/fa6"; +import { Button } from "@/components/ui/button"; +import { Textarea } from "@/components/ui/textarea"; +import { + Tooltip, + TooltipContent, + TooltipTrigger, +} from "@/components/ui/tooltip"; +import { cn } from "@/lib/utils"; + +type MessageBubbleProps = { + role: "user" | "assistant"; + content: string; + messageIndex?: number; + onEditSubmit?: (messageIndex: number, newContent: string) => void; + isComplete?: boolean; +}; + +export function MessageBubble({ + role, + content, + messageIndex = 0, + onEditSubmit, + isComplete = true, +}: MessageBubbleProps) { + const { t } = useTranslation(["views/chat", "common"]); + const isUser = role === "user"; + const [isEditing, setIsEditing] = useState(false); + const [draftContent, setDraftContent] = useState(content); + const editInputRef = useRef(null); + + useEffect(() => { + setDraftContent(content); + }, [content]); + + useEffect(() => { + if (isEditing) { + editInputRef.current?.focus(); + editInputRef.current?.setSelectionRange( + editInputRef.current.value.length, + editInputRef.current.value.length, + ); + } + }, [isEditing]); + + const handleCopy = () => { + const text = content?.trim() || ""; + if (!text) return; + if (copy(text)) { + toast.success(t("button.copiedToClipboard", { ns: "common" })); + } + }; + + const handleEditClick = () => { + setDraftContent(content); + setIsEditing(true); + }; + + const handleEditSubmit = () => { + const trimmed = draftContent.trim(); + if (!trimmed || onEditSubmit == null) return; + onEditSubmit(messageIndex, trimmed); + setIsEditing(false); + }; + + const handleEditCancel = () => { + setDraftContent(content); + setIsEditing(false); + }; + + const handleEditKeyDown = (e: React.KeyboardEvent) => { + if (e.key === "Enter" && !e.shiftKey) { + e.preventDefault(); + handleEditSubmit(); + } + if (e.key === "Escape") { + handleEditCancel(); + } + }; + + if (isUser && isEditing) { + return ( +
+