Compare commits

..

No commits in common. "dev" and "v0.17.0-rc2" have entirely different histories.

593 changed files with 9654 additions and 50865 deletions

View File

@ -229,7 +229,6 @@ Reolink
restream
restreamed
restreaming
RJSF
rkmpp
rknn
rkrga

View File

@ -324,12 +324,6 @@ try:
value = await sensor.read()
except Exception: # ❌ Too broad
logger.error("Failed")
# Returning exceptions in JSON responses
except ValueError as e:
return JSONResponse(
content={"success": False, "message": str(e)},
)
```
### ✅ Use These Instead
@ -359,16 +353,6 @@ try:
value = await sensor.read()
except SensorException as err: # ✅ Specific
logger.exception("Failed to read sensor")
# Safe error responses
except ValueError:
logger.exception("Invalid parameters for API request")
return JSONResponse(
content={
"success": False,
"message": "Invalid request parameters",
},
)
```
## Project-Specific Conventions

4
.gitignore vendored
View File

@ -3,8 +3,6 @@ __pycache__
.mypy_cache
*.swp
debug
.claude/*
.mcp.json
.vscode/*
!.vscode/launch.json
config/*
@ -21,4 +19,4 @@ web/.env
core
!/web/**/*.ts
.idea/*
.ipynb_checkpoints
.ipynb_checkpoints

View File

@ -1,7 +1,7 @@
default_target: local
COMMIT_HASH := $(shell git log -1 --pretty=format:"%h"|tail -1)
VERSION = 0.18.0
VERSION = 0.17.0
IMAGE_REPO ?= ghcr.io/blakeblackshear/frigate
GITHUB_REF_NAME ?= $(shell git rev-parse --abbrev-ref HEAD)
BOARDS= #Initialized empty
@ -49,8 +49,7 @@ push: push-boards
--push
run: local
docker run --rm --publish=5000:5000 --publish=8971:8971 \
--volume=${PWD}/config:/config frigate:latest
docker run --rm --publish=5000:5000 --volume=${PWD}/config:/config frigate:latest
run_tests: local
docker run --rm --workdir=/opt/frigate --entrypoint= frigate:latest \

View File

@ -55,7 +55,7 @@ RUN --mount=type=tmpfs,target=/tmp --mount=type=tmpfs,target=/var/cache/apt \
FROM scratch AS go2rtc
ARG TARGETARCH
WORKDIR /rootfs/usr/local/go2rtc/bin
ADD --link --chmod=755 "https://github.com/AlexxIT/go2rtc/releases/download/v1.9.13/go2rtc_linux_${TARGETARCH}" go2rtc
ADD --link --chmod=755 "https://github.com/AlexxIT/go2rtc/releases/download/v1.9.10/go2rtc_linux_${TARGETARCH}" go2rtc
FROM wget AS tempio
ARG TARGETARCH

View File

@ -105,9 +105,9 @@ if [[ "${TARGETARCH}" == "amd64" ]]; then
# install legacy and standard intel icd and level-zero-gpu
# see https://github.com/intel/compute-runtime/blob/master/LEGACY_PLATFORMS.md for more info
# needed core package
wget https://github.com/intel/compute-runtime/releases/download/25.13.33276.19/libigdgmm12_22.7.0_amd64.deb
dpkg -i libigdgmm12_22.7.0_amd64.deb
rm libigdgmm12_22.7.0_amd64.deb
wget https://github.com/intel/compute-runtime/releases/download/24.52.32224.5/libigdgmm12_22.5.5_amd64.deb
dpkg -i libigdgmm12_22.5.5_amd64.deb
rm libigdgmm12_22.5.5_amd64.deb
# legacy packages
wget https://github.com/intel/compute-runtime/releases/download/24.35.30872.36/intel-opencl-icd-legacy1_24.35.30872.36_amd64.deb
@ -115,19 +115,18 @@ if [[ "${TARGETARCH}" == "amd64" ]]; then
wget https://github.com/intel/intel-graphics-compiler/releases/download/igc-1.0.17537.24/intel-igc-opencl_1.0.17537.24_amd64.deb
wget https://github.com/intel/intel-graphics-compiler/releases/download/igc-1.0.17537.24/intel-igc-core_1.0.17537.24_amd64.deb
# standard packages
wget https://github.com/intel/compute-runtime/releases/download/25.13.33276.19/intel-opencl-icd_25.13.33276.19_amd64.deb
wget https://github.com/intel/compute-runtime/releases/download/25.13.33276.19/intel-level-zero-gpu_1.6.33276.19_amd64.deb
wget https://github.com/intel/intel-graphics-compiler/releases/download/v2.10.10/intel-igc-opencl-2_2.10.10+18926_amd64.deb
wget https://github.com/intel/intel-graphics-compiler/releases/download/v2.10.10/intel-igc-core-2_2.10.10+18926_amd64.deb
wget https://github.com/intel/compute-runtime/releases/download/24.52.32224.5/intel-opencl-icd_24.52.32224.5_amd64.deb
wget https://github.com/intel/compute-runtime/releases/download/24.52.32224.5/intel-level-zero-gpu_1.6.32224.5_amd64.deb
wget https://github.com/intel/intel-graphics-compiler/releases/download/v2.5.6/intel-igc-opencl-2_2.5.6+18417_amd64.deb
wget https://github.com/intel/intel-graphics-compiler/releases/download/v2.5.6/intel-igc-core-2_2.5.6+18417_amd64.deb
# npu packages
wget https://github.com/oneapi-src/level-zero/releases/download/v1.28.2/level-zero_1.28.2+u22.04_amd64.deb
wget https://github.com/intel/linux-npu-driver/releases/download/v1.19.0/intel-driver-compiler-npu_1.19.0.20250707-16111289554_ubuntu22.04_amd64.deb
wget https://github.com/intel/linux-npu-driver/releases/download/v1.19.0/intel-fw-npu_1.19.0.20250707-16111289554_ubuntu22.04_amd64.deb
wget https://github.com/intel/linux-npu-driver/releases/download/v1.19.0/intel-level-zero-npu_1.19.0.20250707-16111289554_ubuntu22.04_amd64.deb
wget https://github.com/oneapi-src/level-zero/releases/download/v1.21.9/level-zero_1.21.9+u22.04_amd64.deb
wget https://github.com/intel/linux-npu-driver/releases/download/v1.17.0/intel-driver-compiler-npu_1.17.0.20250508-14912879441_ubuntu22.04_amd64.deb
wget https://github.com/intel/linux-npu-driver/releases/download/v1.17.0/intel-fw-npu_1.17.0.20250508-14912879441_ubuntu22.04_amd64.deb
wget https://github.com/intel/linux-npu-driver/releases/download/v1.17.0/intel-level-zero-npu_1.17.0.20250508-14912879441_ubuntu22.04_amd64.deb
dpkg -i *.deb
rm *.deb
apt-get -qq install -f -y
fi
if [[ "${TARGETARCH}" == "arm64" ]]; then

View File

@ -10,8 +10,7 @@ echo "[INFO] Starting certsync..."
lefile="/etc/letsencrypt/live/frigate/fullchain.pem"
tls_enabled=`python3 /usr/local/nginx/get_nginx_settings.py | jq -r .tls.enabled`
listen_external_port=`python3 /usr/local/nginx/get_nginx_settings.py | jq -r .listen.external_port`
tls_enabled=`python3 /usr/local/nginx/get_listen_settings.py | jq -r .tls.enabled`
while true
do
@ -35,7 +34,7 @@ do
;;
esac
liveprint=`echo | openssl s_client -showcerts -connect 127.0.0.1:$listen_external_port 2>&1 | openssl x509 -fingerprint 2>&1 | grep -i fingerprint || echo 'failed'`
liveprint=`echo | openssl s_client -showcerts -connect 127.0.0.1:8971 2>&1 | openssl x509 -fingerprint 2>&1 | grep -i fingerprint || echo 'failed'`
case "$liveprint" in
*Fingerprint*)
@ -56,4 +55,4 @@ do
done
exit 0
exit 0

View File

@ -80,14 +80,14 @@ if [ ! \( -f "$letsencrypt_path/privkey.pem" -a -f "$letsencrypt_path/fullchain.
fi
# build templates for optional FRIGATE_BASE_PATH environment variable
python3 /usr/local/nginx/get_nginx_settings.py | \
python3 /usr/local/nginx/get_base_path.py | \
tempio -template /usr/local/nginx/templates/base_path.gotmpl \
-out /usr/local/nginx/conf/base_path.conf
-out /usr/local/nginx/conf/base_path.conf
# build templates for additional network settings
python3 /usr/local/nginx/get_nginx_settings.py | \
tempio -template /usr/local/nginx/templates/listen.gotmpl \
-out /usr/local/nginx/conf/listen.conf
# build templates for optional TLS support
python3 /usr/local/nginx/get_listen_settings.py | \
tempio -template /usr/local/nginx/templates/listen.gotmpl \
-out /usr/local/nginx/conf/listen.conf
# Replace the bash process with the NGINX process, redirecting stderr to stdout
exec 2>&1

View File

@ -0,0 +1,11 @@
"""Prints the base path as json to stdout."""
import json
import os
from typing import Any
base_path = os.environ.get("FRIGATE_BASE_PATH", "")
result: dict[str, Any] = {"base_path": base_path}
print(json.dumps(result))

View File

@ -0,0 +1,35 @@
"""Prints the tls config as json to stdout."""
import json
import sys
from typing import Any
from ruamel.yaml import YAML
sys.path.insert(0, "/opt/frigate")
from frigate.util.config import find_config_file
sys.path.remove("/opt/frigate")
yaml = YAML()
config_file = find_config_file()
try:
with open(config_file) as f:
raw_config = f.read()
if config_file.endswith((".yaml", ".yml")):
config: dict[str, Any] = yaml.load(raw_config)
elif config_file.endswith(".json"):
config: dict[str, Any] = json.loads(raw_config)
except FileNotFoundError:
config: dict[str, Any] = {}
tls_config: dict[str, any] = config.get("tls", {"enabled": True})
networking_config = config.get("networking", {})
ipv6_config = networking_config.get("ipv6", {"enabled": False})
output = {"tls": tls_config, "ipv6": ipv6_config}
print(json.dumps(output))

View File

@ -1,62 +0,0 @@
"""Prints the nginx settings as json to stdout."""
import json
import os
import sys
from typing import Any
from ruamel.yaml import YAML
sys.path.insert(0, "/opt/frigate")
from frigate.util.config import find_config_file
sys.path.remove("/opt/frigate")
yaml = YAML()
config_file = find_config_file()
try:
with open(config_file) as f:
raw_config = f.read()
if config_file.endswith((".yaml", ".yml")):
config: dict[str, Any] = yaml.load(raw_config)
elif config_file.endswith(".json"):
config: dict[str, Any] = json.loads(raw_config)
except FileNotFoundError:
config: dict[str, Any] = {}
tls_config: dict[str, Any] = config.get("tls", {})
tls_config.setdefault("enabled", True)
networking_config: dict[str, Any] = config.get("networking", {})
ipv6_config: dict[str, Any] = networking_config.get("ipv6", {})
ipv6_config.setdefault("enabled", False)
listen_config: dict[str, Any] = networking_config.get("listen", {})
listen_config.setdefault("internal", 5000)
listen_config.setdefault("external", 8971)
# handle case where internal port is a string with ip:port
internal_port = listen_config["internal"]
if type(internal_port) is str:
internal_port = int(internal_port.split(":")[-1])
listen_config["internal_port"] = internal_port
# handle case where external port is a string with ip:port
external_port = listen_config["external"]
if type(external_port) is str:
external_port = int(external_port.split(":")[-1])
listen_config["external_port"] = external_port
base_path = os.environ.get("FRIGATE_BASE_PATH", "")
result: dict[str, Any] = {
"tls": tls_config,
"ipv6": ipv6_config,
"listen": listen_config,
"base_path": base_path,
}
print(json.dumps(result))

View File

@ -7,7 +7,7 @@ location ^~ {{ .base_path }}/ {
# remove base_url from the path before passing upstream
rewrite ^{{ .base_path }}/(.*) /$1 break;
proxy_pass $scheme://127.0.0.1:{{ .listen.external_port }};
proxy_pass $scheme://127.0.0.1:8971;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";

View File

@ -1,36 +1,45 @@
# Internal (IPv4 always; IPv6 optional)
listen {{ .listen.internal }};
{{ if .ipv6.enabled }}listen [::]:{{ .listen.internal_port }};{{ end }}
listen 5000;
{{ if .ipv6 }}{{ if .ipv6.enabled }}listen [::]:5000;{{ end }}{{ end }}
# intended for external traffic, protected by auth
{{ if .tls.enabled }}
# external HTTPS (IPv4 always; IPv6 optional)
listen {{ .listen.external }} ssl;
{{ if .ipv6.enabled }}listen [::]:{{ .listen.external_port }} ssl;{{ end }}
{{ if .tls }}
{{ if .tls.enabled }}
# external HTTPS (IPv4 always; IPv6 optional)
listen 8971 ssl;
{{ if .ipv6 }}{{ if .ipv6.enabled }}listen [::]:8971 ssl;{{ end }}{{ end }}
ssl_certificate /etc/letsencrypt/live/frigate/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/frigate/privkey.pem;
ssl_certificate /etc/letsencrypt/live/frigate/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/frigate/privkey.pem;
# generated 2024-06-01, Mozilla Guideline v5.7, nginx 1.25.3, OpenSSL 1.1.1w, modern configuration, no OCSP
# https://ssl-config.mozilla.org/#server=nginx&version=1.25.3&config=modern&openssl=1.1.1w&ocsp=false&guideline=5.7
ssl_session_timeout 1d;
ssl_session_cache shared:MozSSL:10m; # about 40000 sessions
ssl_session_tickets off;
# generated 2024-06-01, Mozilla Guideline v5.7, nginx 1.25.3, OpenSSL 1.1.1w, modern configuration, no OCSP
# https://ssl-config.mozilla.org/#server=nginx&version=1.25.3&config=modern&openssl=1.1.1w&ocsp=false&guideline=5.7
ssl_session_timeout 1d;
ssl_session_cache shared:MozSSL:10m; # about 40000 sessions
ssl_session_tickets off;
# modern configuration
ssl_protocols TLSv1.3;
ssl_prefer_server_ciphers off;
# modern configuration
ssl_protocols TLSv1.3;
ssl_prefer_server_ciphers off;
# HSTS (ngx_http_headers_module is required) (63072000 seconds)
add_header Strict-Transport-Security "max-age=63072000" always;
# HSTS (ngx_http_headers_module is required) (63072000 seconds)
add_header Strict-Transport-Security "max-age=63072000" always;
# ACME challenge location
location /.well-known/acme-challenge/ {
default_type "text/plain";
root /etc/letsencrypt/www;
}
# ACME challenge location
location /.well-known/acme-challenge/ {
default_type "text/plain";
root /etc/letsencrypt/www;
}
{{ else }}
# external HTTP (IPv4 always; IPv6 optional)
listen 8971;
{{ if .ipv6 }}{{ if .ipv6.enabled }}listen [::]:8971;{{ end }}{{ end }}
{{ end }}
{{ else }}
# (No tls) default to HTTP (IPv4 always; IPv6 optional)
listen {{ .listen.external }};
{{ if .ipv6.enabled }}listen [::]:{{ .listen.external_port }};{{ end }}
# (No tls section) default to HTTP (IPv4 always; IPv6 optional)
listen 8971;
{{ if .ipv6 }}{{ if .ipv6.enabled }}listen [::]:8971;{{ end }}{{ end }}
{{ end }}

View File

@ -13,7 +13,7 @@ ARG ROCM
RUN apt update -qq && \
apt install -y wget gpg && \
wget -O rocm.deb https://repo.radeon.com/amdgpu-install/7.2/ubuntu/jammy/amdgpu-install_7.2.70200-1_all.deb && \
wget -O rocm.deb https://repo.radeon.com/amdgpu-install/7.1.1/ubuntu/jammy/amdgpu-install_7.1.1.70101-1_all.deb && \
apt install -y ./rocm.deb && \
apt update && \
apt install -qq -y rocm
@ -56,8 +56,6 @@ FROM scratch AS rocm-dist
ARG ROCM
# Copy HIP headers required for MIOpen JIT (BuildHip) / HIPRTC at runtime
COPY --from=rocm /opt/rocm-${ROCM}/include/ /opt/rocm-${ROCM}/include/
COPY --from=rocm /opt/rocm-$ROCM/bin/rocminfo /opt/rocm-$ROCM/bin/migraphx-driver /opt/rocm-$ROCM/bin/
# Copy MIOpen database files for gfx10xx and gfx11xx only (RDNA2/RDNA3)
COPY --from=rocm /opt/rocm-$ROCM/share/miopen/db/*gfx10* /opt/rocm-$ROCM/share/miopen/db/

View File

@ -1 +1 @@
onnxruntime-migraphx @ https://github.com/NickM-27/frigate-onnxruntime-rocm/releases/download/v7.2.0/onnxruntime_migraphx-1.23.1-cp311-cp311-linux_x86_64.whl
onnxruntime-migraphx @ https://github.com/NickM-27/frigate-onnxruntime-rocm/releases/download/v7.1.0/onnxruntime_migraphx-1.23.1-cp311-cp311-linux_x86_64.whl

View File

@ -1,5 +1,5 @@
variable "ROCM" {
default = "7.2.0"
default = "7.1.1"
}
variable "HSA_OVERRIDE_GFX_VERSION" {
default = ""

View File

@ -1,18 +1,18 @@
# Nvidia ONNX Runtime GPU Support
# NVidia TensorRT Support (amd64 only)
--extra-index-url 'https://pypi.nvidia.com'
cython==3.0.*; platform_machine == 'x86_64'
nvidia-cuda-cupti-cu12==12.9.79; platform_machine == 'x86_64'
nvidia-cublas-cu12==12.9.1.*; platform_machine == 'x86_64'
nvidia-cudnn-cu12==9.19.0.*; platform_machine == 'x86_64'
nvidia-cufft-cu12==11.4.1.*; platform_machine == 'x86_64'
nvidia-curand-cu12==10.3.10.*; platform_machine == 'x86_64'
nvidia-cuda-nvcc-cu12==12.9.86; platform_machine == 'x86_64'
nvidia-cuda-nvrtc-cu12==12.9.86; platform_machine == 'x86_64'
nvidia-cuda-runtime-cu12==12.9.79; platform_machine == 'x86_64'
nvidia-cusolver-cu12==11.7.5.*; platform_machine == 'x86_64'
nvidia-cusparse-cu12==12.5.10.*; platform_machine == 'x86_64'
nvidia-nccl-cu12==2.29.7; platform_machine == 'x86_64'
nvidia-nvjitlink-cu12==12.9.86; platform_machine == 'x86_64'
nvidia_cuda_cupti_cu12==12.5.82; platform_machine == 'x86_64'
nvidia-cublas-cu12==12.5.3.*; platform_machine == 'x86_64'
nvidia-cudnn-cu12==9.3.0.*; platform_machine == 'x86_64'
nvidia-cufft-cu12==11.2.3.*; platform_machine == 'x86_64'
nvidia-curand-cu12==10.3.6.*; platform_machine == 'x86_64'
nvidia_cuda_nvcc_cu12==12.5.82; platform_machine == 'x86_64'
nvidia-cuda-nvrtc-cu12==12.5.82; platform_machine == 'x86_64'
nvidia_cuda_runtime_cu12==12.5.82; platform_machine == 'x86_64'
nvidia_cusolver_cu12==11.6.3.*; platform_machine == 'x86_64'
nvidia_cusparse_cu12==12.5.1.*; platform_machine == 'x86_64'
nvidia_nccl_cu12==2.23.4; platform_machine == 'x86_64'
nvidia_nvjitlink_cu12==12.5.82; platform_machine == 'x86_64'
onnx==1.16.*; platform_machine == 'x86_64'
onnxruntime-gpu==1.24.*; platform_machine == 'x86_64'
onnxruntime-gpu==1.22.*; platform_machine == 'x86_64'
protobuf==3.20.3; platform_machine == 'x86_64'

View File

@ -155,32 +155,33 @@ services:
### Enabling IPv6
IPv6 is disabled by default, to enable IPv6 modify your Frigate configuration as follows:
IPv6 is disabled by default, to enable IPv6 listen.gotmpl needs to be bind mounted with IPv6 enabled. For example:
```yaml
networking:
ipv6:
enabled: True
```
{{ if not .enabled }}
# intended for external traffic, protected by auth
listen 8971;
{{ else }}
# intended for external traffic, protected by auth
listen 8971 ssl;
# intended for internal traffic, not protected by auth
listen 5000;
```
### Listen on different ports
becomes
You can change the ports Nginx uses for listening using Frigate's configuration file. The internal port (unauthenticated) and external port (authenticated) can be changed independently. You can also specify an IP address using the format `ip:port` if you wish to bind the port to a specific interface. This may be useful for example to prevent exposing the internal port outside the container.
For example:
```yaml
networking:
listen:
internal: 127.0.0.1:5000
external: 8971
```
{{ if not .enabled }}
# intended for external traffic, protected by auth
listen [::]:8971 ipv6only=off;
{{ else }}
# intended for external traffic, protected by auth
listen [::]:8971 ipv6only=off ssl;
:::warning
This setting is for advanced users. For the majority of use cases it's recommended to change the `ports` section of your Docker compose file or use the Docker `run` `--publish` option instead, e.g. `-p 443:8971`. Changing Frigate's ports may break some integrations.
:::
# intended for internal traffic, not protected by auth
listen [::]:5000 ipv6only=off;
```
## Base path
@ -233,7 +234,7 @@ To do this:
### Custom go2rtc version
Frigate currently includes go2rtc v1.9.13, there may be certain cases where you want to run a different version of go2rtc.
Frigate currently includes go2rtc v1.9.10, there may be certain cases where you want to run a different version of go2rtc.
To do this:

View File

@ -244,7 +244,7 @@ go2rtc:
- rtspx://192.168.1.1:7441/abcdefghijk
```
[See the go2rtc docs for more information](https://github.com/AlexxIT/go2rtc/tree/v1.9.13#source-rtsp)
[See the go2rtc docs for more information](https://github.com/AlexxIT/go2rtc/tree/v1.9.10#source-rtsp)
In the Unifi 2.0 update Unifi Protect Cameras had a change in audio sample rate which causes issues for ffmpeg. The input rate needs to be set for record if used directly with unifi protect.

View File

@ -5,167 +5,89 @@ title: Configuring Generative AI
## Configuration
A Generative AI provider can be configured in the global config, which will make the Generative AI features available for use. There are currently 4 native providers available to integrate with Frigate. Other providers that support the OpenAI standard API can also be used. See the OpenAI-Compatible section below.
A Generative AI provider can be configured in the global config, which will make the Generative AI features available for use. There are currently 3 native providers available to integrate with Frigate. Other providers that support the OpenAI standard API can also be used. See the OpenAI section below.
To use Generative AI, you must define a single provider at the global level of your Frigate configuration. If the provider you choose requires an API key, you may either directly paste it in your configuration, or store it in an environment variable prefixed with `FRIGATE_`.
## Local Providers
Local providers run on your own hardware and keep all data processing private. These require a GPU or dedicated hardware for best performance.
## Ollama
:::warning
Running Generative AI models on CPU is not recommended, as high inference times make using Generative AI impractical.
Using Ollama on CPU is not recommended, high inference times make using Generative AI impractical.
:::
### Recommended Local Models
You must use a vision-capable model with Frigate. The following models are recommended for local deployment:
| Model | Notes |
| ------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `qwen3-vl` | Strong visual and situational understanding, strong ability to identify smaller objects and interactions with object. |
| `qwen3.5` | Strong situational understanding, but missing DeepStack from qwen3-vl leading to worse performance for identifying objects in people's hand and other small details. |
| `Intern3.5VL` | Relatively fast with good vision comprehension |
| `gemma3` | Slower model with good vision and temporal understanding |
| `qwen2.5-vl` | Fast but capable model with good vision comprehension |
:::info
Each model is available in multiple parameter sizes (3b, 4b, 8b, etc.). Larger sizes are more capable of complex tasks and understanding of situations, but requires more memory and computational resources. It is recommended to try multiple models and experiment to see which performs best.
:::
:::note
You should have at least 8 GB of RAM available (or VRAM if running on GPU) to run the 7B models, 16 GB to run the 13B models, and 24 GB to run the 33B models.
:::
### Model Types: Instruct vs Thinking
Most vision-language models are available as **instruct** models, which are fine-tuned to follow instructions and respond concisely to prompts. However, some models (such as certain Qwen-VL or minigpt variants) offer both **instruct** and **thinking** versions.
- **Instruct models** are always recommended for use with Frigate. These models generate direct, relevant, actionable descriptions that best fit Frigate's object and event summary use case.
- **Reasoning / Thinking models** are fine-tuned for more free-form, open-ended, and speculative outputs, which are typically not concise and may not provide the practical summaries Frigate expects. For this reason, Frigate does **not** recommend or support using thinking models.
Some models are labeled as **hybrid** (capable of both thinking and instruct tasks). In these cases, it is recommended to disable reasoning / thinking, which is generally model specific (see your models documentation).
**Recommendation:**
Always select the `-instruct` or documented instruct/tagged variant of any model you use in your Frigate configuration. If in doubt, refer to your model provider's documentation or model library for guidance on the correct model variant to use.
### llama.cpp
[llama.cpp](https://github.com/ggml-org/llama.cpp) is a C++ implementation of LLaMA that provides a high-performance inference server.
It is highly recommended to host the llama.cpp server on a machine with a discrete graphics card, or on an Apple silicon Mac for best performance.
#### Supported Models
You must use a vision capable model with Frigate. The llama.cpp server supports various vision models in GGUF format.
#### Configuration
All llama.cpp native options can be passed through `provider_options`, including `temperature`, `top_k`, `top_p`, `min_p`, `repeat_penalty`, `repeat_last_n`, `seed`, `grammar`, and more. See the [llama.cpp server documentation](https://github.com/ggml-org/llama.cpp/blob/master/tools/server/README.md) for a complete list of available parameters.
```yaml
genai:
provider: llamacpp
base_url: http://localhost:8080
model: your-model-name
provider_options:
context_size: 16000 # Tell Frigate your context size so it can send the appropriate amount of information.
```
### Ollama
[Ollama](https://ollama.com/) allows you to self-host large language models and keep everything running locally. It is highly recommended to host this server on a machine with an Nvidia graphics card, or on a Apple silicon Mac for best performance.
Most of the 7b parameter 4-bit vision models will fit inside 8GB of VRAM. There is also a [Docker container](https://hub.docker.com/r/ollama/ollama) available.
Parallel requests also come with some caveats. You will need to set `OLLAMA_NUM_PARALLEL=1` and choose a `OLLAMA_MAX_QUEUE` and `OLLAMA_MAX_LOADED_MODELS` values that are appropriate for your hardware and preferences. See the [Ollama documentation](https://docs.ollama.com/faq#how-does-ollama-handle-concurrent-requests).
### Model Types: Instruct vs Thinking
Most vision-language models are available as **instruct** models, which are fine-tuned to follow instructions and respond concisely to prompts. However, some models (such as certain Qwen-VL or minigpt variants) offer both **instruct** and **thinking** versions.
- **Instruct models** are always recommended for use with Frigate. These models generate direct, relevant, actionable descriptions that best fit Frigate's object and event summary use case.
- **Thinking models** are fine-tuned for more free-form, open-ended, and speculative outputs, which are typically not concise and may not provide the practical summaries Frigate expects. For this reason, Frigate does **not** recommend or support using thinking models.
Some models are labeled as **hybrid** (capable of both thinking and instruct tasks). In these cases, Frigate will always use instruct-style prompts and specifically disables thinking-mode behaviors to ensure concise, useful responses.
**Recommendation:**
Always select the `-instruct` or documented instruct/tagged variant of any model you use in your Frigate configuration. If in doubt, refer to your model providers documentation or model library for guidance on the correct model variant to use.
### Supported Models
You must use a vision capable model with Frigate. Current model variants can be found [in their model library](https://ollama.com/library). Note that Frigate will not automatically download the model you specify in your config, Ollama will try to download the model but it may take longer than the timeout, it is recommended to pull the model beforehand by running `ollama pull your_model` on your Ollama server/Docker container. Note that the model specified in Frigate's config must match the downloaded model tag.
:::info
Each model is available in multiple parameter sizes (3b, 4b, 8b, etc.). Larger sizes are more capable of complex tasks and understanding of situations, but requires more memory and computational resources. It is recommended to try multiple models and experiment to see which performs best.
:::
:::tip
If you are trying to use a single model for Frigate and HomeAssistant, it will need to support vision and tools calling. qwen3-VL supports vision and tools simultaneously in Ollama.
:::
Note that Frigate will not automatically download the model you specify in your config. Ollama will try to download the model but it may take longer than the timeout, so it is recommended to pull the model beforehand by running `ollama pull your_model` on your Ollama server/Docker container. The model specified in Frigate's config must match the downloaded model tag.
The following models are recommended:
#### Configuration
| Model | Notes |
| ------------- | -------------------------------------------------------------------- |
| `qwen3-vl` | Strong visual and situational understanding, higher vram requirement |
| `Intern3.5VL` | Relatively fast with good vision comprehension |
| `gemma3` | Strong frame-to-frame understanding, slower inference times |
| `qwen2.5-vl` | Fast but capable model with good vision comprehension |
:::note
You should have at least 8 GB of RAM available (or VRAM if running on GPU) to run the 7B models, 16 GB to run the 13B models, and 32 GB to run the 33B models.
:::
#### Ollama Cloud models
Ollama also supports [cloud models](https://ollama.com/cloud), where your local Ollama instance handles requests from Frigate, but model inference is performed in the cloud. Set up Ollama locally, sign in with your Ollama account, and specify the cloud model name in your Frigate config. For more details, see the Ollama cloud model [docs](https://docs.ollama.com/cloud).
### Configuration
```yaml
genai:
provider: ollama
base_url: http://localhost:11434
model: qwen3-vl:4b
provider_options: # other Ollama client options can be defined
keep_alive: -1
options:
num_ctx: 8192 # make sure the context matches other services that are using ollama
```
### OpenAI-Compatible
Frigate supports any provider that implements the OpenAI API standard. This includes self-hosted solutions like [vLLM](https://docs.vllm.ai/), [LocalAI](https://localai.io/), and other OpenAI-compatible servers.
:::tip
For OpenAI-compatible servers (such as llama.cpp) that don't expose the configured context size in the API response, you can manually specify the context size in `provider_options`:
```yaml
genai:
provider: openai
base_url: http://your-llama-server
model: your-model-name
provider_options:
context_size: 8192 # Specify the configured context size
```
This ensures Frigate uses the correct context window size when generating prompts.
:::
#### Configuration
```yaml
genai:
provider: openai
base_url: http://your-server:port
api_key: your-api-key # May not be required for local servers
model: your-model-name
```
To use a different OpenAI-compatible API endpoint, set the `OPENAI_BASE_URL` environment variable to your provider's API URL.
## Cloud Providers
Cloud providers run on remote infrastructure and require an API key for authentication. These services handle all model inference on their servers.
### Ollama Cloud
Ollama also supports [cloud models](https://ollama.com/cloud), where your local Ollama instance handles requests from Frigate, but model inference is performed in the cloud. Set up Ollama locally, sign in with your Ollama account, and specify the cloud model name in your Frigate config. For more details, see the Ollama cloud model [docs](https://docs.ollama.com/cloud).
#### Configuration
```yaml
genai:
provider: ollama
base_url: http://localhost:11434
model: cloud-model-name
```
### Google Gemini
## Google Gemini
Google Gemini has a [free tier](https://ai.google.dev/pricing) for the API, however the limits may not be sufficient for standard Frigate usage. Choose a plan appropriate for your installation.
#### Supported Models
### Supported Models
You must use a vision capable model with Frigate. Current model variants can be found [in their documentation](https://ai.google.dev/gemini-api/docs/models/gemini).
#### Get API Key
### Get API Key
To start using Gemini, you must first get an API key from [Google AI Studio](https://aistudio.google.com).
@ -174,7 +96,7 @@ To start using Gemini, you must first get an API key from [Google AI Studio](htt
3. Click "Create API key in new project"
4. Copy the API key for use in your config
#### Configuration
### Configuration
```yaml
genai:
@ -199,19 +121,19 @@ Other HTTP options are available, see the [python-genai documentation](https://g
:::
### OpenAI
## OpenAI
OpenAI does not have a free tier for their API. With the release of gpt-4o, pricing has been reduced and each generation should cost fractions of a cent if you choose to go this route.
#### Supported Models
### Supported Models
You must use a vision capable model with Frigate. Current model variants can be found [in their documentation](https://platform.openai.com/docs/models).
#### Get API Key
### Get API Key
To start using OpenAI, you must first [create an API key](https://platform.openai.com/api-keys) and [configure billing](https://platform.openai.com/settings/organization/billing/overview).
#### Configuration
### Configuration
```yaml
genai:
@ -220,19 +142,42 @@ genai:
model: gpt-4o
```
### Azure OpenAI
:::note
To use a different OpenAI-compatible API endpoint, set the `OPENAI_BASE_URL` environment variable to your provider's API URL.
:::
:::tip
For OpenAI-compatible servers (such as llama.cpp) that don't expose the configured context size in the API response, you can manually specify the context size in `provider_options`:
```yaml
genai:
provider: openai
base_url: http://your-llama-server
model: your-model-name
provider_options:
context_size: 8192 # Specify the configured context size
```
This ensures Frigate uses the correct context window size when generating prompts.
:::
## Azure OpenAI
Microsoft offers several vision models through Azure OpenAI. A subscription is required.
#### Supported Models
### Supported Models
You must use a vision capable model with Frigate. Current model variants can be found [in their documentation](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models).
#### Create Resource and Get API Key
### Create Resource and Get API Key
To start using Azure OpenAI, you must first [create a resource](https://learn.microsoft.com/azure/cognitive-services/openai/how-to/create-resource?pivots=web-portal#create-a-resource). You'll need your API key, model name, and resource URL, which must include the `api-version` parameter (see the example below).
#### Configuration
### Configuration
```yaml
genai:

View File

@ -11,7 +11,7 @@ By default, descriptions will be generated for all tracked objects and all zones
Optionally, you can generate the description using a snapshot (if enabled) by setting `use_snapshot` to `True`. By default, this is set to `False`, which sends the uncompressed images from the `detect` stream collected over the object's lifetime to the model. Once the object lifecycle ends, only a single compressed and cropped thumbnail is saved with the tracked object. Using a snapshot might be useful when you want to _regenerate_ a tracked object's description as it will provide the AI with a higher-quality image (typically downscaled by the AI itself) than the cropped/compressed thumbnail. Using a snapshot otherwise has a trade-off in that only a single image is sent to your provider, which will limit the model's ability to determine object movement or direction.
Generative AI object descriptions can also be toggled dynamically for a camera via MQTT with the topic `frigate/<camera_name>/object_descriptions/set`. See the [MQTT documentation](/integrations/mqtt#frigatecamera_nameobject_descriptionsset).
Generative AI object descriptions can also be toggled dynamically for a camera via MQTT with the topic `frigate/<camera_name>/object_descriptions/set`. See the [MQTT documentation](/integrations/mqtt/#frigatecamera_nameobjectdescriptionsset).
## Usage and Best Practices

View File

@ -7,7 +7,7 @@ Generative AI can be used to automatically generate structured summaries of revi
Requests for a summary are requested automatically to your AI provider for alert review items when the activity has ended, they can also be optionally enabled for detections as well.
Generative AI review summaries can also be toggled dynamically for a [camera via MQTT](/integrations/mqtt#frigatecamera_namereview_descriptionsset).
Generative AI review summaries can also be toggled dynamically for a [camera via MQTT](/integrations/mqtt/#frigatecamera_namereviewdescriptionsset).
## Review Summary Usage and Best Practices

View File

@ -12,20 +12,23 @@ Some of Frigate's enrichments can use a discrete GPU or integrated GPU for accel
Object detection and enrichments (like Semantic Search, Face Recognition, and License Plate Recognition) are independent features. To use a GPU / NPU for object detection, see the [Object Detectors](/configuration/object_detectors.md) documentation. If you want to use your GPU for any supported enrichments, you must choose the appropriate Frigate Docker image for your GPU / NPU and configure the enrichment according to its specific documentation.
- **AMD**
- ROCm support in the `-rocm` Frigate image is automatically detected for enrichments, but only some enrichment models are available due to ROCm's focus on LLMs and limited stability with certain neural network models. Frigate disables models that perform poorly or are unstable to ensure reliable operation, so only compatible enrichments may be active.
- **Intel**
- OpenVINO will automatically be detected and used for enrichments in the default Frigate image.
- **Note:** Intel NPUs have limited model support for enrichments. GPU is recommended for enrichments when available.
- **Nvidia**
- Nvidia GPUs will automatically be detected and used for enrichments in the `-tensorrt` Frigate image.
- Jetson devices will automatically be detected and used for enrichments in the `-tensorrt-jp6` Frigate image.
- **RockChip**
- RockChip NPU will automatically be detected and used for semantic search v1 and face recognition in the `-rk` Frigate image.
Utilizing a GPU for enrichments does not require you to use the same GPU for object detection. For example, you can run the `tensorrt` Docker image to run enrichments on an Nvidia GPU and still use other dedicated hardware like a Coral or Hailo for object detection. However, one combination that is not supported is the `tensorrt` image for object detection on an Nvidia GPU and Intel iGPU for enrichments.
Utilizing a GPU for enrichments does not require you to use the same GPU for object detection. For example, you can run the `tensorrt` Docker image for enrichments and still use other dedicated hardware like a Coral or Hailo for object detection. However, one combination that is not supported is TensorRT for object detection and OpenVINO for enrichments.
:::note

View File

@ -29,12 +29,12 @@ cameras:
When running Frigate through the HA Add-on, the Frigate `/config` directory is mapped to `/addon_configs/<addon_directory>` in the host, where `<addon_directory>` is specific to the variant of the Frigate Add-on you are running.
| Add-on Variant | Configuration directory |
| -------------------------- | ----------------------------------------- |
| Frigate | `/addon_configs/ccab4aaf_frigate` |
| Frigate (Full Access) | `/addon_configs/ccab4aaf_frigate-fa` |
| Frigate Beta | `/addon_configs/ccab4aaf_frigate-beta` |
| Frigate Beta (Full Access) | `/addon_configs/ccab4aaf_frigate-fa-beta` |
| Add-on Variant | Configuration directory |
| -------------------------- | -------------------------------------------- |
| Frigate | `/addon_configs/ccab4aaf_frigate` |
| Frigate (Full Access) | `/addon_configs/ccab4aaf_frigate-fa` |
| Frigate Beta | `/addon_configs/ccab4aaf_frigate-beta` |
| Frigate Beta (Full Access) | `/addon_configs/ccab4aaf_frigate-fa-beta` |
**Whenever you see `/config` in the documentation, it refers to this directory.**
@ -109,16 +109,15 @@ detectors:
record:
enabled: True
motion:
retain:
days: 7
mode: motion
alerts:
retain:
days: 30
mode: motion
detections:
retain:
days: 30
mode: motion
snapshots:
enabled: True
@ -138,10 +137,7 @@ cameras:
- detect
motion:
mask:
timestamp:
friendly_name: "Camera timestamp"
enabled: true
coordinates: "0.000,0.427,0.002,0.000,0.999,0.000,0.999,0.781,0.885,0.456,0.700,0.424,0.701,0.311,0.507,0.294,0.453,0.347,0.451,0.400"
- 0.000,0.427,0.002,0.000,0.999,0.000,0.999,0.781,0.885,0.456,0.700,0.424,0.701,0.311,0.507,0.294,0.453,0.347,0.451,0.400
```
### Standalone Intel Mini PC with USB Coral
@ -169,16 +165,15 @@ detectors:
record:
enabled: True
motion:
retain:
days: 7
mode: motion
alerts:
retain:
days: 30
mode: motion
detections:
retain:
days: 30
mode: motion
snapshots:
enabled: True
@ -198,10 +193,7 @@ cameras:
- detect
motion:
mask:
timestamp:
friendly_name: "Camera timestamp"
enabled: true
coordinates: "0.000,0.427,0.002,0.000,0.999,0.000,0.999,0.781,0.885,0.456,0.700,0.424,0.701,0.311,0.507,0.294,0.453,0.347,0.451,0.400"
- 0.000,0.427,0.002,0.000,0.999,0.000,0.999,0.781,0.885,0.456,0.700,0.424,0.701,0.311,0.507,0.294,0.453,0.347,0.451,0.400
```
### Home Assistant integrated Intel Mini PC with OpenVino
@ -239,16 +231,15 @@ model:
record:
enabled: True
motion:
retain:
days: 7
mode: motion
alerts:
retain:
days: 30
mode: motion
detections:
retain:
days: 30
mode: motion
snapshots:
enabled: True
@ -268,8 +259,5 @@ cameras:
- detect
motion:
mask:
timestamp:
friendly_name: "Camera timestamp"
enabled: true
coordinates: "0.000,0.427,0.002,0.000,0.999,0.000,0.999,0.781,0.885,0.456,0.700,0.424,0.701,0.311,0.507,0.294,0.453,0.347,0.451,0.400"
- 0.000,0.427,0.002,0.000,0.999,0.000,0.999,0.781,0.885,0.456,0.700,0.424,0.701,0.311,0.507,0.294,0.453,0.347,0.451,0.400
```

View File

@ -33,55 +33,18 @@ Your config file will be updated with the relative coordinates of the mask/zone:
```yaml
motion:
mask:
# Motion mask name (required)
mask1:
# Optional: A friendly name for the mask
friendly_name: "Timestamp area"
# Optional: Whether this mask is active (default: true)
enabled: true
# Required: Coordinates polygon for the mask
coordinates: "0.000,0.427,0.002,0.000,0.999,0.000,0.999,0.781,0.885,0.456,0.700,0.424,0.701,0.311,0.507,0.294,0.453,0.347,0.451,0.400"
mask: "0.000,0.427,0.002,0.000,0.999,0.000,0.999,0.781,0.885,0.456,0.700,0.424,0.701,0.311,0.507,0.294,0.453,0.347,0.451,0.400"
```
Multiple motion masks can be listed in your config:
Multiple masks can be listed in your config.
```yaml
motion:
mask:
mask1:
friendly_name: "Timestamp area"
enabled: true
coordinates: "0.239,1.246,0.175,0.901,0.165,0.805,0.195,0.802"
mask2:
friendly_name: "Tree area"
enabled: true
coordinates: "0.000,0.427,0.002,0.000,0.999,0.000,0.999,0.781,0.885,0.456"
- 0.239,1.246,0.175,0.901,0.165,0.805,0.195,0.802
- 0.000,0.427,0.002,0.000,0.999,0.000,0.999,0.781,0.885,0.456
```
Object filter masks can also be created through the UI or manually in the config. They are configured under the object filters section for each object type:
```yaml
objects:
filters:
person:
mask:
person_filter1:
friendly_name: "Roof area"
enabled: true
coordinates: "0.000,0.000,1.000,0.000,1.000,0.400,0.000,0.400"
car:
mask:
car_filter1:
friendly_name: "Sidewalk area"
enabled: true
coordinates: "0.000,0.700,1.000,0.700,1.000,1.000,0.000,1.000"
```
## Enabling/Disabling Masks
Both motion masks and object filter masks can be toggled on or off without removing them from the configuration. Disabled masks are completely ignored at runtime - they will not affect motion detection or object filtering. This is useful for temporarily disabling a mask during certain seasons or times of day without modifying the configuration.
### Further Clarification
This is a response to a [question posed on reddit](https://www.reddit.com/r/homeautomation/comments/ppxdve/replacing_my_doorbell_with_a_security_camera_a_6/hd876w4?utm_source=share&utm_medium=web2x&context=3):

View File

@ -38,6 +38,7 @@ Remember that motion detection is just used to determine when object detection s
The threshold value dictates how much of a change in a pixels luminance is required to be considered motion.
```yaml
# default threshold value
motion:
# Optional: The threshold passed to cv2.threshold to determine if a pixel is different enough to be counted as motion. (default: shown below)
# Increasing this value will make motion detection less sensitive and decreasing it will make motion detection more sensitive.
@ -52,6 +53,7 @@ Watching the motion boxes in the debug view, increase the threshold until you on
### Contour Area
```yaml
# default contour_area value
motion:
# Optional: Minimum size in pixels in the resized motion image that counts as motion (default: shown below)
# Increasing this value will prevent smaller areas of motion from being detected. Decreasing will
@ -79,49 +81,27 @@ However, if the preferred day settings do not work well at night it is recommend
## Tuning For Large Changes In Motion
### Lightning Threshold
```yaml
# default lightning_threshold:
motion:
# Optional: The percentage of the image used to detect lightning or
# other substantial changes where motion detection needs to
# recalibrate. (default: shown below)
# Increasing this value will make motion detection more likely
# to consider lightning or IR mode changes as valid motion.
# Decreasing this value will make motion detection more likely
# to ignore large amounts of motion such as a person
# approaching a doorbell camera.
# Optional: The percentage of the image used to detect lightning or other substantial changes where motion detection
# needs to recalibrate. (default: shown below)
# Increasing this value will make motion detection more likely to consider lightning or ir mode changes as valid motion.
# Decreasing this value will make motion detection more likely to ignore large amounts of motion such as a person approaching
# a doorbell camera.
lightning_threshold: 0.8
```
Large changes in motion like PTZ moves and camera switches between Color and IR mode should result in a pause in object detection. `lightning_threshold` defines the percentage of the image used to detect these substantial changes. Increasing this value makes motion detection more likely to treat large changes (like IR mode switches) as valid motion. Decreasing it makes motion detection more likely to ignore large amounts of motion, such as a person approaching a doorbell camera.
Note that `lightning_threshold` does **not** stop motion-based recordings from being saved — it only prevents additional motion analysis after the threshold is exceeded, reducing false positive object detections during high-motion periods (e.g. storms or PTZ sweeps) without interfering with recordings.
:::warning
Some cameras, like doorbell cameras, may have missed detections when someone walks directly in front of the camera and the `lightning_threshold` causes motion detection to recalibrate. In this case, it may be desirable to increase the `lightning_threshold` to ensure these objects are not missed.
Some cameras like doorbell cameras may have missed detections when someone walks directly in front of the camera and the lightning_threshold causes motion detection to be re-calibrated. In this case, it may be desirable to increase the `lightning_threshold` to ensure these objects are not missed.
:::
### Skip Motion On Large Scene Changes
:::note
```yaml
motion:
# Optional: Fraction of the frame that must change in a single update
# before Frigate will completely ignore any motion in that frame.
# Values range between 0.0 and 1.0, leave unset (null) to disable.
# Setting this to 0.7 would cause Frigate to **skip** reporting
# motion boxes when more than 70% of the image appears to change
# (e.g. during lightning storms, IR/color mode switches, or other
# sudden lighting events).
skip_motion_threshold: 0.7
```
This option is handy when you want to prevent large transient changes from triggering recordings or object detection. It differs from `lightning_threshold` because it completely suppresses motion instead of just forcing a recalibration.
:::warning
When the skip threshold is exceeded, **no motion is reported** for that frame, meaning **nothing is recorded** for that frame. That means you can miss something important, like a PTZ camera auto-tracking an object or activity while the camera is moving. If you prefer to guarantee that every frame is saved, leave this unset and accept occasional recordings containing scene noise — they typically only take up a few megabytes and are quick to scan in the timeline UI.
Lightning threshold does not stop motion based recordings from being saved.
:::
Large changes in motion like PTZ moves and camera switches between Color and IR mode should result in a pause in object detection. This is done via the `lightning_threshold` configuration. It is defined as the percentage of the image used to detect lightning or other substantial changes where motion detection needs to recalibrate. Increasing this value will make motion detection more likely to consider lightning or IR mode changes as valid motion. Decreasing this value will make motion detection more likely to ignore large amounts of motion such as a person approaching a doorbell camera.

View File

@ -34,7 +34,7 @@ Frigate supports multiple different detectors that work on different types of ha
**Nvidia GPU**
- [ONNX](#onnx): Nvidia GPUs will automatically be detected and used as a detector in the `-tensorrt` Frigate image when a supported ONNX model is configured.
- [ONNX](#onnx): TensorRT will automatically be detected and used as a detector in the `-tensorrt` Frigate image when a supported ONNX model is configured.
**Nvidia Jetson** <CommunityBadge />
@ -65,7 +65,7 @@ This does not affect using hardware for accelerating other tasks such as [semant
# Officially Supported Detectors
Frigate provides a number of builtin detector types. By default, Frigate will use a single CPU detector. Other detectors may require additional configuration as described below. When using multiple detectors they will run in dedicated processes, but pull from a common queue of detection requests from across all cameras.
Frigate provides the following builtin detector types: `cpu`, `edgetpu`, `hailo8l`, `memryx`, `onnx`, `openvino`, `rknn`, and `tensorrt`. By default, Frigate will use a single CPU detector. Other detectors may require additional configuration as described below. When using multiple detectors they will run in dedicated processes, but pull from a common queue of detection requests from across all cameras.
## Edge TPU Detector
@ -157,13 +157,7 @@ A TensorFlow Lite model is provided in the container at `/edgetpu_model.tflite`
#### YOLOv9
YOLOv9 models that are compiled for TensorFlow Lite and properly quantized are supported, but not included by default. [Instructions](#yolov9-for-google-coral-support) for downloading a model with support for the Google Coral.
:::tip
**Frigate+ Users:** Follow the [instructions](../integrations/plus#use-models) to set a model ID in your config file.
:::
YOLOv9 models that are compiled for TensorFlow Lite and properly quantized are supported, but not included by default. [Download the model](https://github.com/dbro/frigate-detector-edgetpu-yolo9/releases/download/v1.0/yolov9-s-relu6-best_320_int8_edgetpu.tflite), bind mount the file into the container, and provide the path with `model.path`. Note that the linked model requires a 17-label [labelmap file](https://raw.githubusercontent.com/dbro/frigate-detector-edgetpu-yolo9/refs/heads/main/labels-coco17.txt) that includes only 17 COCO classes.
<details>
<summary>YOLOv9 Setup & Config</summary>
@ -660,9 +654,11 @@ ONNX is an open format for building machine learning models, Frigate supports ru
If the correct build is used for your GPU then the GPU will be detected and used automatically.
- **AMD**
- ROCm will automatically be detected and used with the ONNX detector in the `-rocm` Frigate image.
- **Intel**
- OpenVINO will automatically be detected and used with the ONNX detector in the default Frigate image.
- **Nvidia**
@ -1518,11 +1514,11 @@ RF-DETR can be exported as ONNX by running the command below. You can copy and p
```sh
docker build . --build-arg MODEL_SIZE=Nano --rm --output . -f- <<'EOF'
FROM python:3.12 AS build
FROM python:3.11 AS build
RUN apt-get update && apt-get install --no-install-recommends -y libgl1 && rm -rf /var/lib/apt/lists/*
COPY --from=ghcr.io/astral-sh/uv:0.10.4 /uv /bin/
COPY --from=ghcr.io/astral-sh/uv:0.8.0 /uv /bin/
WORKDIR /rfdetr
RUN uv pip install --system rfdetr[onnxexport] torch==2.8.0 onnx==1.19.1 transformers==4.57.6 onnxscript
RUN uv pip install --system rfdetr[onnxexport] torch==2.8.0 onnx==1.19.1 onnxscript
ARG MODEL_SIZE
RUN python3 -c "from rfdetr import RFDETR${MODEL_SIZE}; x = RFDETR${MODEL_SIZE}(resolution=320); x.export(simplify=True)"
FROM scratch
@ -1560,11 +1556,7 @@ cd tensorrt_demos/yolo
python3 yolo_to_onnx.py -m yolov7-320
```
#### YOLOv9 for Google Coral Support
[Download the model](https://github.com/dbro/frigate-detector-edgetpu-yolo9/releases/download/v1.0/yolov9-s-relu6-best_320_int8_edgetpu.tflite), bind mount the file into the container, and provide the path with `model.path`. Note that the linked model requires a 17-label [labelmap file](https://raw.githubusercontent.com/dbro/frigate-detector-edgetpu-yolo9/refs/heads/main/labels-coco17.txt) that includes only 17 COCO classes.
#### YOLOv9 for other detectors
#### YOLOv9
YOLOv9 model can be exported as ONNX using the command below. You can copy and paste the whole thing to your terminal and execute, altering `MODEL_SIZE=t` and `IMG_SIZE=320` in the first line to the [model size](https://github.com/WongKinYiu/yolov9#performance) you would like to convert (available model sizes are `t`, `s`, `m`, `c`, and `e`, common image sizes are `320` and `640`).

View File

@ -139,13 +139,7 @@ record:
:::tip
When using `hwaccel_args`, hardware encoding is used for timelapse generation. This setting can be overridden for a specific camera (e.g., when camera resolution exceeds hardware encoder limits); set `cameras.<camera>.record.export.hwaccel_args` with the appropriate settings. Using an unrecognized value or empty string will fall back to software encoding (libx264).
:::
:::tip
The encoder determines its own behavior so the resulting file size may be undesirably large.
When using `hwaccel_args` globally hardware encoding is used for time lapse generation. The encoder determines its own behavior so the resulting file size may be undesirably large.
To reduce the output file size the ffmpeg parameter `-qp n` can be utilized (where `n` stands for the value of the quantisation parameter). The value can be adjusted to get an acceptable tradeoff between quality and file size for the given scenario.
:::
@ -154,16 +148,19 @@ To reduce the output file size the ffmpeg parameter `-qp n` can be utilized (whe
Apple devices running the Safari browser may fail to playback h.265 recordings. The [apple compatibility option](../configuration/camera_specific.md#h265-cameras-via-safari) should be used to ensure seamless playback on Apple devices.
## Syncing Media Files With Disk
## Syncing Recordings With Disk
Media files (event snapshots, event thumbnails, review thumbnails, previews, exports, and recordings) can become orphaned when database entries are deleted but the corresponding files remain on disk.
In some cases the recordings files may be deleted but Frigate will not know this has happened. Recordings sync can be enabled which will tell Frigate to check the file system and delete any db entries for files which don't exist.
Normal operation may leave small numbers of orphaned files until Frigate's scheduled cleanup, but crashes, configuration changes, or upgrades may cause more orphaned files that Frigate does not clean up. This feature checks the file system for media files and removes any that are not referenced in the database.
```yaml
record:
sync_recordings: True
```
The Maintenance pane in the Frigate UI or an API endpoint `POST /api/media/sync` can be used to trigger a media sync. When using the API, a job ID is returned and the operation continues on the server. Status can be checked with the `/api/media/sync/status/{job_id}` endpoint.
This feature is meant to fix variations in files, not completely delete entries in the database. If you delete all of your media, don't use `sync_recordings`, just stop Frigate, delete the `frigate.db` database, and restart.
:::warning
This operation uses considerable CPU resources and includes a safety threshold that aborts if more than 50% of files would be deleted. Only run when necessary. If you set `force: true` the safety threshold will be bypassed; do not use `force` unless you are certain the deletions are intended.
The sync operation uses considerable CPU resources and in most cases is not needed, only enable when necessary.
:::

View File

@ -73,19 +73,11 @@ tls:
# Optional: Enable TLS for port 8971 (default: shown below)
enabled: True
# Optional: Networking configuration
# Optional: IPv6 configuration
networking:
# Optional: Enable IPv6 on 5000, and 8971 if tls is configured (default: shown below)
ipv6:
enabled: False
# Optional: Override ports Frigate uses for listening (defaults: shown below)
# An IP address may also be provided to bind to a specific interface, e.g. ip:port
# NOTE: This setting is for advanced users and may break some integrations. The majority
# of users should change ports in the docker compose file
# or use the docker run `--publish` option to select a different port.
listen:
internal: 5000
external: 8971
# Optional: Proxy configuration
proxy:
@ -345,15 +337,7 @@ objects:
# Optional: mask to prevent all object types from being detected in certain areas (default: no mask)
# Checks based on the bottom center of the bounding box of the object.
# NOTE: This mask is COMBINED with the object type specific mask below
mask:
# Object filter mask name (required)
mask1:
# Optional: A friendly name for the mask
friendly_name: "Object filter mask area"
# Optional: Whether this mask is active (default: true)
enabled: true
# Required: Coordinates polygon for the mask
coordinates: "0.000,0.000,0.781,0.000,0.781,0.278,0.000,0.278"
mask: 0.000,0.000,0.781,0.000,0.781,0.278,0.000,0.278
# Optional: filters to reduce false positives for specific object types
filters:
person:
@ -373,15 +357,7 @@ objects:
threshold: 0.7
# Optional: mask to prevent this object type from being detected in certain areas (default: no mask)
# Checks based on the bottom center of the bounding box of the object
mask:
# Object filter mask name (required)
mask1:
# Optional: A friendly name for the mask
friendly_name: "Object filter mask area"
# Optional: Whether this mask is active (default: true)
enabled: true
# Required: Coordinates polygon for the mask
coordinates: "0.000,0.000,0.781,0.000,0.781,0.278,0.000,0.278"
mask: 0.000,0.000,0.781,0.000,0.781,0.278,0.000,0.278
# Optional: Configuration for AI generated tracked object descriptions
genai:
# Optional: Enable AI object description generation (default: shown below)
@ -480,16 +456,12 @@ motion:
# Increasing this value will make motion detection less sensitive and decreasing it will make motion detection more sensitive.
# The value should be between 1 and 255.
threshold: 30
# Optional: The percentage of the image used to detect lightning or other substantial changes where motion detection needs
# to recalibrate and motion checks stop for that frame. Recordings are unaffected. (default: shown below)
# Optional: The percentage of the image used to detect lightning or other substantial changes where motion detection
# needs to recalibrate. (default: shown below)
# Increasing this value will make motion detection more likely to consider lightning or ir mode changes as valid motion.
# Decreasing this value will make motion detection more likely to ignore large amounts of motion such as a person approaching a doorbell camera.
# Decreasing this value will make motion detection more likely to ignore large amounts of motion such as a person approaching
# a doorbell camera.
lightning_threshold: 0.8
# Optional: Fraction of the frame that must change in a single update before motion boxes are completely
# ignored. Values range between 0.0 and 1.0. When exceeded, no motion boxes are reported and **no motion
# recording** is created for that frame. Leave unset (null) to disable this feature. Use with care on PTZ
# cameras or other situations where you require guaranteed frame capture.
skip_motion_threshold: None
# Optional: Minimum size in pixels in the resized motion image that counts as motion (default: shown below)
# Increasing this value will prevent smaller areas of motion from being detected. Decreasing will
# make motion detection more sensitive to smaller moving objects.
@ -509,15 +481,7 @@ motion:
frame_height: 100
# Optional: motion mask
# NOTE: see docs for more detailed info on creating masks
mask:
# Motion mask name (required)
mask1:
# Optional: A friendly name for the mask
friendly_name: "Motion mask area"
# Optional: Whether this mask is active (default: true)
enabled: true
# Required: Coordinates polygon for the mask
coordinates: "0.000,0.469,1.000,0.469,1.000,1.000,0.000,1.000"
mask: 0.000,0.469,1.000,0.469,1.000,1.000,0.000,1.000
# Optional: improve contrast (default: shown below)
# Enables dynamic contrast improvement. This should help improve night detections at the cost of making motion detection more sensitive
# for daytime.
@ -546,6 +510,8 @@ record:
# Optional: Number of minutes to wait between cleanup runs (default: shown below)
# This can be used to reduce the frequency of deleting recording segments from disk if you want to minimize i/o
expire_interval: 60
# Optional: Two-way sync recordings database with disk on startup and once a day (default: shown below).
sync_recordings: False
# Optional: Continuous retention settings
continuous:
# Optional: Number of days to retain recordings regardless of tracked objects or motion (default: shown below)
@ -568,8 +534,6 @@ record:
# The -r (framerate) dictates how smooth the output video is.
# So the args would be -vf setpts=0.02*PTS -r 30 in that case.
timelapse_args: "-vf setpts=0.04*PTS -r 30"
# Optional: Global hardware acceleration settings for timelapse exports. (default: inherit)
hwaccel_args: auto
# Optional: Recording Preview Settings
preview:
# Optional: Quality of recording preview (default: shown below).
@ -788,7 +752,7 @@ classification:
interval: None
# Optional: Restream configuration
# Uses https://github.com/AlexxIT/go2rtc (v1.9.13)
# Uses https://github.com/AlexxIT/go2rtc (v1.9.10)
# NOTE: The default go2rtc API port (1984) must be used,
# changing this port for the integrated go2rtc instance is not supported.
go2rtc:
@ -874,11 +838,6 @@ cameras:
# Optional: camera specific output args (default: inherit)
# output_args:
# Optional: camera specific hwaccel args for timelapse export (default: inherit)
# record:
# export:
# hwaccel_args:
# Optional: timeout for highest scoring image before allowing it
# to be replaced by a newer image. (default: shown below)
best_image_timeout: 60
@ -894,9 +853,6 @@ cameras:
front_steps:
# Optional: A friendly name or descriptive text for the zones
friendly_name: ""
# Optional: Whether this zone is active (default: shown below)
# Disabled zones are completely ignored at runtime - no object tracking or debug drawing
enabled: True
# Required: List of x,y coordinates to define the polygon of the zone.
# NOTE: Presence in a zone is evaluated only based on the bottom center of the objects bounding box.
coordinates: 0.033,0.306,0.324,0.138,0.439,0.185,0.042,0.428

View File

@ -7,7 +7,7 @@ title: Restream
Frigate can restream your video feed as an RTSP feed for other applications such as Home Assistant to utilize it at `rtsp://<frigate_host>:8554/<camera_name>`. Port 8554 must be open. [This allows you to use a video feed for detection in Frigate and Home Assistant live view at the same time without having to make two separate connections to the camera](#reduce-connections-to-camera). The video feed is copied from the original video feed directly to avoid re-encoding. This feed does not include any annotation by Frigate.
Frigate uses [go2rtc](https://github.com/AlexxIT/go2rtc/tree/v1.9.13) to provide its restream and MSE/WebRTC capabilities. The go2rtc config is hosted at the `go2rtc` in the config, see [go2rtc docs](https://github.com/AlexxIT/go2rtc/tree/v1.9.13#configuration) for more advanced configurations and features.
Frigate uses [go2rtc](https://github.com/AlexxIT/go2rtc/tree/v1.9.10) to provide its restream and MSE/WebRTC capabilities. The go2rtc config is hosted at the `go2rtc` in the config, see [go2rtc docs](https://github.com/AlexxIT/go2rtc/tree/v1.9.10#configuration) for more advanced configurations and features.
:::note
@ -206,7 +206,7 @@ Enabling arbitrary exec sources allows execution of arbitrary commands through g
## Advanced Restream Configurations
The [exec](https://github.com/AlexxIT/go2rtc/tree/v1.9.13#source-exec) source in go2rtc can be used for custom ffmpeg commands. An example is below:
The [exec](https://github.com/AlexxIT/go2rtc/tree/v1.9.10#source-exec) source in go2rtc can be used for custom ffmpeg commands. An example is below:
:::warning

View File

@ -76,40 +76,6 @@ Switching between V1 and V2 requires reindexing your embeddings. The embeddings
:::
### GenAI Provider
Frigate can use a GenAI provider for semantic search embeddings when that provider has the `embeddings` role. Currently, only **llama.cpp** supports multimodal embeddings (both text and images).
To use llama.cpp for semantic search:
1. Configure a GenAI provider in your config with `embeddings` in its `roles`.
2. Set `semantic_search.model` to the GenAI config key (e.g. `default`).
3. Start the llama.cpp server with `--embeddings` and `--mmproj` for image support:
```yaml
genai:
default:
provider: llamacpp
base_url: http://localhost:8080
model: your-model-name
roles:
- embeddings
- vision
- tools
semantic_search:
enabled: True
model: default
```
The llama.cpp server must be started with `--embeddings` for the embeddings API, and a multi-modal embeddings model. See the [llama.cpp server documentation](https://github.com/ggml-org/llama.cpp/blob/master/tools/server/README.md) for details.
:::note
Switching between Jina models and a GenAI provider requires reindexing. Embeddings from different backends are incompatible.
:::
### GPU Acceleration
The CLIP models are downloaded in ONNX format, and the `large` model can be accelerated using GPU hardware, when available. This depends on the Docker build that is used. You can also target a specific device in a multi-GPU installation.

View File

@ -9,25 +9,4 @@ Snapshots are accessible in the UI in the Explore pane. This allows for quick su
To only save snapshots for objects that enter a specific zone, [see the zone docs](./zones.md#restricting-snapshots-to-specific-zones)
Snapshots sent via MQTT are configured in the [config file](/configuration) under `cameras -> your_camera -> mqtt`
## Frame Selection
Frigate does not save every frame — it picks a single "best" frame for each tracked object and uses it for both the snapshot and clean copy. As the object is tracked across frames, Frigate continuously evaluates whether the current frame is better than the previous best based on detection confidence, object size, and the presence of key attributes like faces or license plates. Frames where the object touches the edge of the frame are deprioritized. The snapshot is written to disk once tracking ends using whichever frame was determined to be the best.
MQTT snapshots are published more frequently — each time a better thumbnail frame is found during tracking, or when the current best image is older than `best_image_timeout` (default: 60s). These use their own annotation settings configured under `cameras -> your_camera -> mqtt`.
## Clean Copy
Frigate can produce up to two snapshot files per event, each used in different places:
| Version | File | Annotations | Used by |
| --- | --- | --- | --- |
| **Regular snapshot** | `<camera>-<id>.jpg` | Respects your `timestamp`, `bounding_box`, `crop`, and `height` settings | API (`/api/events/<id>/snapshot.jpg`), MQTT (`<camera>/<label>/snapshot`), Explore pane in the UI |
| **Clean copy** | `<camera>-<id>-clean.webp` | Always unannotated — no bounding box, no timestamp, no crop, full resolution | API (`/api/events/<id>/snapshot-clean.webp`), [Frigate+](/plus/first_model) submissions, "Download Clean Snapshot" in the UI |
MQTT snapshots are configured separately under `cameras -> your_camera -> mqtt` and are unrelated to the clean copy.
The clean copy is required for submitting events to [Frigate+](/plus/first_model) — if you plan to use Frigate+, keep `clean_copy` enabled regardless of your other snapshot settings.
If you are not using Frigate+ and `timestamp`, `bounding_box`, and `crop` are all disabled, the regular snapshot is already effectively clean, so `clean_copy` provides no benefit and only uses additional disk space. You can safely set `clean_copy: False` in this case.
Snapshots sent via MQTT are configured in the [config file](https://docs.frigate.video/configuration/) under `cameras -> your_camera -> mqtt`

View File

@ -10,10 +10,6 @@ For example, the cat in this image is currently in Zone 1, but **not** Zone 2.
Zones cannot have the same name as a camera. If desired, a single zone can include multiple cameras if you have multiple cameras covering the same area by configuring zones with the same name for each camera.
## Enabling/Disabling Zones
Zones can be toggled on or off without removing them from the configuration. Disabled zones are completely ignored at runtime - objects will not be tracked for zone presence, and zones will not appear in the debug view. This is useful for temporarily disabling a zone during certain seasons or times of day without modifying the configuration.
During testing, enable the Zones option for the Debug view of your camera (Settings --> Debug) so you can adjust as needed. The zone line will increase in thickness when any object enters the zone.
To create a zone, follow [the steps for a "Motion mask"](masks.md), but use the section of the web UI for creating a zone instead.
@ -90,6 +86,7 @@ cameras:
Only car objects can trigger the `front_yard_street` zone and only person can trigger the `entire_yard`. Objects will be tracked for any `person` that enter anywhere in the yard, and for cars only if they enter the street.
### Zone Loitering
Sometimes objects are expected to be passing through a zone, but an object loitering in an area is unexpected. Zones can be configured to have a minimum loitering time after which the object will be considered in the zone.
@ -97,7 +94,6 @@ Sometimes objects are expected to be passing through a zone, but an object loite
:::note
When using loitering zones, a review item will behave in the following way:
- When a person is in a loitering zone, the review item will remain active until the person leaves the loitering zone, regardless of if they are stationary.
- When any other object is in a loitering zone, the review item will remain active until the loitering time is met. Then if the object is stationary the review item will end.

View File

@ -41,8 +41,8 @@ If the EQ13 is out of stock, the link below may take you to a suggested alternat
| Name | Capabilities | Notes |
| ------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------- | --------------------------------------------------- |
| Beelink EQ13 (<a href="https://amzn.to/4jn2qVr" target="_blank" rel="nofollow noopener sponsored">Amazon</a>) | Can run object detection on several 1080p cameras with low-medium activity | Dual gigabit NICs for easy isolated camera network. |
| Intel 1120p ([Amazon](https://www.amazon.com/Beelink-i3-1220P-Computer-Display-Gigabit/dp/B0DDCKT9YP)) | Can handle a large number of 1080p cameras with high activity | |
| Intel 125H ([Amazon](https://www.amazon.com/MINISFORUM-Pro-125H-Barebone-Computer-HDMI2-1/dp/B0FH21FSZM)) | Can handle a significant number of 1080p cameras with high activity | Includes NPU for more efficient detection in 0.17+ |
| Intel 1120p ([Amazon](https://www.amazon.com/Beelink-i3-1220P-Computer-Display-Gigabit/dp/B0DDCKT9YP) | Can handle a large number of 1080p cameras with high activity | |
| Intel 125H ([Amazon](https://www.amazon.com/MINISFORUM-Pro-125H-Barebone-Computer-HDMI2-1/dp/B0FH21FSZM) | Can handle a significant number of 1080p cameras with high activity | Includes NPU for more efficient detection in 0.17+ |
## Detectors
@ -86,7 +86,7 @@ Frigate supports multiple different detectors that work on different types of ha
**Nvidia**
- [Nvidia GPU](#nvidia-gpus): Nvidia GPUs can provide efficient object detection.
- [TensortRT](#tensorrt---nvidia-gpu): TensorRT can run on Nvidia GPUs to provide efficient object detection.
- [Supports majority of model architectures via ONNX](../../configuration/object_detectors#onnx-supported-models)
- Runs well with any size models including large
@ -172,7 +172,7 @@ Inference speeds vary greatly depending on the CPU or GPU used, some known examp
| Intel Arc A380 | ~ 6 ms | | 320: ~ 10 ms 640: ~ 22 ms | 336: 20 ms 448: 27 ms | |
| Intel Arc A750 | ~ 4 ms | | 320: ~ 8 ms | | |
### Nvidia GPUs
### TensorRT - Nvidia GPU
Frigate is able to utilize an Nvidia GPU which supports the 12.x series of CUDA libraries.
@ -182,15 +182,17 @@ Frigate is able to utilize an Nvidia GPU which supports the 12.x series of CUDA
Make sure your host system has the [nvidia-container-runtime](https://docs.docker.com/config/containers/resource_constraints/#access-an-nvidia-gpu) installed to pass through the GPU to the container and the host system has a compatible driver installed for your GPU.
There are improved capabilities in newer GPU architectures that TensorRT can benefit from, such as INT8 operations and Tensor cores. The features compatible with your hardware will be optimized when the model is converted to a trt file. Currently the script provided for generating the model provides a switch to enable/disable FP16 operations. If you wish to use newer features such as INT8 optimization, more work is required.
#### Compatibility References:
[NVIDIA TensorRT Support Matrix](https://docs.nvidia.com/deeplearning/tensorrt-rtx/latest/getting-started/support-matrix.html)
[NVIDIA TensorRT Support Matrix](https://docs.nvidia.com/deeplearning/tensorrt/archives/tensorrt-841/support-matrix/index.html)
[NVIDIA CUDA Compatibility](https://docs.nvidia.com/deploy/cuda-compatibility/index.html)
[NVIDIA GPU Compute Capability](https://developer.nvidia.com/cuda-gpus)
Inference is done with the `onnx` detector type. Speeds will vary greatly depending on the GPU and the model used.
Inference speeds will vary greatly depending on the GPU and the model used.
`tiny (t)` variants are faster than the equivalent non-tiny model, some known examples are below:
✅ - Accelerated with CUDA Graphs

View File

@ -56,7 +56,7 @@ services:
volumes:
- /path/to/your/config:/config
- /path/to/your/storage:/media/frigate
- type: tmpfs # 1GB In-memory filesystem for recording segment storage
- type: tmpfs # Recommended: 1GB of memory
target: /tmp/cache
tmpfs:
size: 1000000000
@ -123,7 +123,7 @@ On Raspberry Pi OS **Trixie**, the Hailo driver is no longer shipped with the ke
:::note
If you are **not** using a Raspberry Pi with **Bookworm OS**, skip this step and proceed directly to step 2.
If you are using Raspberry Pi with **Trixie OS**, also skip this step and proceed directly to step 2.
:::
@ -133,13 +133,13 @@ On Raspberry Pi OS **Trixie**, the Hailo driver is no longer shipped with the ke
```bash
lsmod | grep hailo
```
If it shows `hailo_pci`, unload it:
```bash
sudo modprobe -r hailo_pci
```
Then locate the built-in kernel driver and rename it so it cannot be loaded.
Renaming allows the original driver to be restored later if needed.
First, locate the currently installed kernel module:
@ -149,29 +149,28 @@ On Raspberry Pi OS **Trixie**, the Hailo driver is no longer shipped with the ke
```
Example output:
```
/lib/modules/6.6.31+rpt-rpi-2712/kernel/drivers/media/pci/hailo/hailo_pci.ko.xz
```
Save the module path to a variable:
```bash
BUILTIN=$(modinfo -n hailo_pci)
```
And rename the module by appending .bak:
```bash
sudo mv "$BUILTIN" "${BUILTIN}.bak"
```
Now refresh the kernel module map so the system recognizes the change:
```bash
sudo depmod -a
```
Reboot your Raspberry Pi:
```bash
@ -186,7 +185,7 @@ On Raspberry Pi OS **Trixie**, the Hailo driver is no longer shipped with the ke
This command should return no results.
2. **Run the installation script**:
3. **Run the installation script**:
Download the installation script:
@ -207,13 +206,14 @@ On Raspberry Pi OS **Trixie**, the Hailo driver is no longer shipped with the ke
```
The script will:
- Install necessary build dependencies
- Clone and build the Hailo driver from the official repository
- Install the driver
- Download and install the required firmware
- Set up udev rules
3. **Reboot your system**:
4. **Reboot your system**:
After the script completes successfully, reboot to load the firmware:
@ -221,7 +221,7 @@ On Raspberry Pi OS **Trixie**, the Hailo driver is no longer shipped with the ke
sudo reboot
```
4. **Verify the installation**:
5. **Verify the installation**:
After rebooting, verify that the Hailo device is available:
@ -236,18 +236,18 @@ On Raspberry Pi OS **Trixie**, the Hailo driver is no longer shipped with the ke
```
Verify the driver version:
```bash
cat /sys/module/hailo_pci/version
```
Verify that the firmware was installed correctly:
```bash
ls -l /lib/firmware/hailo/hailo8_fw.bin
```
**Optional: Fix PCIe descriptor page size error**
**Optional: Fix PCIe descriptor page size error**
If you encounter the following error:
@ -462,7 +462,7 @@ services:
- /etc/localtime:/etc/localtime:ro
- /path/to/your/config:/config
- /path/to/your/storage:/media/frigate
- type: tmpfs # 1GB In-memory filesystem for recording segment storage
- type: tmpfs # Recommended: 1GB of memory
target: /tmp/cache
tmpfs:
size: 1000000000
@ -502,12 +502,12 @@ The official docker image tags for the current stable version are:
- `stable` - Standard Frigate build for amd64 & RPi Optimized Frigate build for arm64. This build includes support for Hailo devices as well.
- `stable-standard-arm64` - Standard Frigate build for arm64
- `stable-tensorrt` - Frigate build specific for amd64 devices running an Nvidia GPU
- `stable-tensorrt` - Frigate build specific for amd64 devices running an nvidia GPU
- `stable-rocm` - Frigate build for [AMD GPUs](../configuration/object_detectors.md#amdrocm-gpu-detector)
The community supported docker image tags for the current stable version are:
- `stable-tensorrt-jp6` - Frigate build optimized for Nvidia Jetson devices running Jetpack 6
- `stable-tensorrt-jp6` - Frigate build optimized for nvidia Jetson devices running Jetpack 6
- `stable-rk` - Frigate build for SBCs with Rockchip SoC
## Home Assistant Add-on
@ -521,7 +521,7 @@ There are important limitations in HA OS to be aware of:
- Separate local storage for media is not yet supported by Home Assistant
- AMD GPUs are not supported because HA OS does not include the mesa driver.
- Intel NPUs are not supported because HA OS does not include the NPU firmware.
- Nvidia GPUs are not supported because addons do not support the Nvidia runtime.
- Nvidia GPUs are not supported because addons do not support the nvidia runtime.
:::
@ -689,43 +689,3 @@ docker run \
```
Log into QNAP, open Container Station. Frigate docker container should be listed under 'Overview' and running. Visit Frigate Web UI by clicking Frigate docker, and then clicking the URL shown at the top of the detail page.
## macOS - Apple Silicon
:::warning
macOS uses port 5000 for its Airplay Receiver service. If you want to expose port 5000 in Frigate for local app and API access the port will need to be mapped to another port on the host e.g. 5001
Failure to remap port 5000 on the host will result in the WebUI and all API endpoints on port 5000 being unreachable, even if port 5000 is exposed correctly in Docker.
:::
Docker containers on macOS can be orchestrated by either [Docker Desktop](https://docs.docker.com/desktop/setup/install/mac-install/) or [OrbStack](https://orbstack.dev) (native swift app). The difference in inference speeds is negligable, however CPU, power consumption and container start times will be lower on OrbStack because it is a native Swift application.
To allow Frigate to use the Apple Silicon Neural Engine / Processing Unit (NPU) the host must be running [Apple Silicon Detector](../configuration/object_detectors.md#apple-silicon-detector) on the host (outside Docker)
#### Docker Compose example
```yaml
services:
frigate:
container_name: frigate
image: ghcr.io/blakeblackshear/frigate:stable-standard-arm64
restart: unless-stopped
shm_size: "512mb" # update for your cameras based on calculation above
volumes:
- /etc/localtime:/etc/localtime:ro
- /path/to/your/config:/config
- /path/to/your/recordings:/recordings
ports:
- "8971:8971"
# If exposing on macOS map to a diffent host port like 5001 or any orher port with no conflicts
# - "5001:5000" # Internal unauthenticated access. Expose carefully.
- "8554:8554" # RTSP feeds
extra_hosts:
# This is very important
# It allows frigate access to the NPU on Apple Silicon via Apple Silicon Detector
- "host.docker.internal:host-gateway" # Required to talk to the NPU detector
environment:
- FRIGATE_RTSP_PASSWORD: "password"
```

View File

@ -20,6 +20,7 @@ Keeping Frigate up to date ensures you benefit from the latest features, perform
If youre running Frigate via Docker (recommended method), follow these steps:
1. **Stop the Container**:
- If using Docker Compose:
```bash
docker compose down frigate
@ -30,8 +31,9 @@ If youre running Frigate via Docker (recommended method), follow these steps:
```
2. **Update and Pull the Latest Image**:
- If using Docker Compose:
- Edit your `docker-compose.yml` file to specify the desired version tag (e.g., `0.17.0` instead of `0.16.4`). For example:
- Edit your `docker-compose.yml` file to specify the desired version tag (e.g., `0.17.0` instead of `0.16.3`). For example:
```yaml
services:
frigate:
@ -49,6 +51,7 @@ If youre running Frigate via Docker (recommended method), follow these steps:
```
3. **Start the Container**:
- If using Docker Compose:
```bash
docker compose up -d
@ -72,15 +75,18 @@ If youre running Frigate via Docker (recommended method), follow these steps:
For users running Frigate as a Home Assistant Addon:
1. **Check for Updates**:
- Navigate to **Settings > Add-ons** in Home Assistant.
- Find your installed Frigate addon (e.g., "Frigate NVR" or "Frigate NVR (Full Access)").
- If an update is available, youll see an "Update" button.
2. **Update the Addon**:
- Click the "Update" button next to the Frigate addon.
- Wait for the process to complete. Home Assistant will handle downloading and installing the new version.
3. **Restart the Addon**:
- After updating, go to the addons page and click "Restart" to apply the changes.
4. **Verify the Update**:
@ -99,8 +105,8 @@ If an update causes issues:
1. Stop Frigate.
2. Restore your backed-up config file and database.
3. Revert to the previous image version:
- For Docker: Specify an older tag (e.g., `ghcr.io/blakeblackshear/frigate:0.16.4`) in your `docker run` command.
- For Docker Compose: Edit your `docker-compose.yml`, specify the older version tag (e.g., `ghcr.io/blakeblackshear/frigate:0.16.4`), and re-run `docker compose up -d`.
- For Docker: Specify an older tag (e.g., `ghcr.io/blakeblackshear/frigate:0.16.3`) in your `docker run` command.
- For Docker Compose: Edit your `docker-compose.yml`, specify the older version tag (e.g., `ghcr.io/blakeblackshear/frigate:0.16.3`), and re-run `docker compose up -d`.
- For Home Assistant: Reinstall the previous addon version manually via the repository if needed and restart the addon.
4. Verify the old version is running again.

View File

@ -11,7 +11,7 @@ Use of the bundled go2rtc is optional. You can still configure FFmpeg to connect
## Setup a go2rtc stream
First, you will want to configure go2rtc to connect to your camera stream by adding the stream you want to use for live view in your Frigate config file. Avoid changing any other parts of your config at this step. Note that go2rtc supports [many different stream types](https://github.com/AlexxIT/go2rtc/tree/v1.9.13#module-streams), not just rtsp.
First, you will want to configure go2rtc to connect to your camera stream by adding the stream you want to use for live view in your Frigate config file. Avoid changing any other parts of your config at this step. Note that go2rtc supports [many different stream types](https://github.com/AlexxIT/go2rtc/tree/v1.9.10#module-streams), not just rtsp.
:::tip
@ -47,8 +47,8 @@ After adding this to the config, restart Frigate and try to watch the live strea
- Check Video Codec:
- If the camera stream works in go2rtc but not in your browser, the video codec might be unsupported.
- If using H265, switch to H264. Refer to [video codec compatibility](https://github.com/AlexxIT/go2rtc/tree/v1.9.13#codecs-madness) in go2rtc documentation.
- If unable to switch from H265 to H264, or if the stream format is different (e.g., MJPEG), re-encode the video using [FFmpeg parameters](https://github.com/AlexxIT/go2rtc/tree/v1.9.13#source-ffmpeg). It supports rotating and resizing video feeds and hardware acceleration. Keep in mind that transcoding video from one format to another is a resource intensive task and you may be better off using the built-in jsmpeg view.
- If using H265, switch to H264. Refer to [video codec compatibility](https://github.com/AlexxIT/go2rtc/tree/v1.9.10#codecs-madness) in go2rtc documentation.
- If unable to switch from H265 to H264, or if the stream format is different (e.g., MJPEG), re-encode the video using [FFmpeg parameters](https://github.com/AlexxIT/go2rtc/tree/v1.9.10#source-ffmpeg). It supports rotating and resizing video feeds and hardware acceleration. Keep in mind that transcoding video from one format to another is a resource intensive task and you may be better off using the built-in jsmpeg view.
```yaml
go2rtc:
streams:

View File

@ -119,7 +119,7 @@ services:
volumes:
- ./config:/config
- ./storage:/media/frigate
- type: tmpfs # 1GB In-memory filesystem for recording segment storage
- type: tmpfs # Optional: 1GB of memory, reduces SSD/SD Card wear
target: /tmp/cache
tmpfs:
size: 1000000000
@ -240,10 +240,7 @@ cameras:
- detect
motion:
mask:
motion_area:
friendly_name: "Motion mask"
enabled: true
coordinates: "0,461,3,0,1919,0,1919,843,1699,492,1344,458,1346,336,973,317,869,375,866,432"
- 0,461,3,0,1919,0,1919,843,1699,492,1344,458,1346,336,973,317,869,375,866,432
```
### Step 6: Enable recordings

View File

@ -16,15 +16,7 @@ See the [MQTT integration
documentation](https://www.home-assistant.io/integrations/mqtt/) for more
details.
In addition, MQTT must be enabled in your Frigate configuration file and Frigate must be connected to the same MQTT server as Home Assistant for many of the entities created by the integration to function, e.g.:
```yaml
mqtt:
enabled: True
host: mqtt.server.com # the address of your HA server that's running the MQTT integration
user: your_mqtt_broker_username
password: your_mqtt_broker_password
```
In addition, MQTT must be enabled in your Frigate configuration file and Frigate must be connected to the same MQTT server as Home Assistant for many of the entities created by the integration to function.
### Integration installation
@ -103,12 +95,12 @@ services:
If you are using Home Assistant Add-on, the URL should be one of the following depending on which Add-on variant you are using. Note that if you are using the Proxy Add-on, you should NOT point the integration at the proxy URL. Just enter the same URL used to access Frigate directly from your network.
| Add-on Variant | URL |
| -------------------------- | -------------------------------------- |
| Frigate | `http://ccab4aaf-frigate:5000` |
| Frigate (Full Access) | `http://ccab4aaf-frigate-fa:5000` |
| Frigate Beta | `http://ccab4aaf-frigate-beta:5000` |
| Frigate Beta (Full Access) | `http://ccab4aaf-frigate-fa-beta:5000` |
| Add-on Variant | URL |
| -------------------------- | ----------------------------------------- |
| Frigate | `http://ccab4aaf-frigate:5000` |
| Frigate (Full Access) | `http://ccab4aaf-frigate-fa:5000` |
| Frigate Beta | `http://ccab4aaf-frigate-beta:5000` |
| Frigate Beta (Full Access) | `http://ccab4aaf-frigate-fa-beta:5000` |
### Frigate running on a separate machine

View File

@ -120,7 +120,7 @@ Message published for each changed tracked object. The first message is publishe
### `frigate/tracked_object_update`
Message published for updates to tracked object metadata. All messages include an `id` field which is the tracked object's event ID, and can be used to look up the event via the API or match it to items in the UI.
Message published for updates to tracked object metadata, for example:
#### Generative AI Description Update
@ -134,14 +134,12 @@ Message published for updates to tracked object metadata. All messages include a
#### Face Recognition Update
Published after each recognition attempt, regardless of whether the score meets `recognition_threshold`. See the [Face Recognition](/configuration/face_recognition) documentation for details on how scoring works.
```json
{
"type": "face",
"id": "1607123955.475377-mxklsc",
"name": "John", // best matching person, or null if no match
"score": 0.95, // running weighted average across all recognition attempts
"name": "John",
"score": 0.95,
"camera": "front_door_cam",
"timestamp": 1607123958.748393
}
@ -149,18 +147,15 @@ Published after each recognition attempt, regardless of whether the score meets
#### License Plate Recognition Update
Published when a license plate is recognized on a car object. See the [License Plate Recognition](/configuration/license_plate_recognition) documentation for details.
```json
{
"type": "lpr",
"id": "1607123955.475377-mxklsc",
"name": "John's Car", // known name for the plate, or null
"name": "John's Car",
"plate": "123ABC",
"score": 0.95,
"camera": "driveway_cam",
"timestamp": 1607123958.748393,
"plate_box": [917, 487, 1029, 529] // box coordinates of the detected license plate in the frame
"timestamp": 1607123958.748393
}
```
@ -430,30 +425,6 @@ Topic to adjust motion contour area for a camera. Expected value is an integer.
Topic with current motion contour area for a camera. Published value is an integer.
### `frigate/<camera_name>/motion_mask/<mask_name>/set`
Topic to turn a specific motion mask for a camera on and off. Expected values are `ON` and `OFF`.
### `frigate/<camera_name>/motion_mask/<mask_name>/state`
Topic with current state of a specific motion mask for a camera. Published values are `ON` and `OFF`.
### `frigate/<camera_name>/object_mask/<mask_name>/set`
Topic to turn a specific object mask for a camera on and off. Expected values are `ON` and `OFF`.
### `frigate/<camera_name>/object_mask/<mask_name>/state`
Topic with current state of a specific object mask for a camera. Published values are `ON` and `OFF`.
### `frigate/<camera_name>/zone/<zone_name>/set`
Topic to turn a specific zone for a camera on and off. Expected values are `ON` and `OFF`.
### `frigate/<camera_name>/zone/<zone_name>/state`
Topic with current state of a specific zone for a camera. Published values are `ON` and `OFF`.
### `frigate/<camera_name>/review_status`
Topic with current activity status of the camera. Possible values are `NONE`, `DETECTION`, or `ALERT`.

View File

@ -54,8 +54,6 @@ Once you have [requested your first model](../plus/first_model.md) and gotten yo
You can either choose the new model from the Frigate+ pane in the Settings page of the Frigate UI, or manually set the model at the root level in your config:
```yaml
detectors: ...
model:
path: plus://<your_model_id>
```

View File

@ -24,8 +24,6 @@ You will receive an email notification when your Frigate+ model is ready.
Models available in Frigate+ can be used with a special model path. No other information needs to be configured because it fetches the remaining config from Frigate+ automatically.
```yaml
detectors: ...
model:
path: plus://<your_model_id>
```

View File

@ -15,15 +15,15 @@ There are three model types offered in Frigate+, `mobiledet`, `yolonas`, and `yo
Not all model types are supported by all detectors, so it's important to choose a model type to match your detector as shown in the table under [supported detector types](#supported-detector-types). You can test model types for compatibility and speed on your hardware by using the base models.
| Model Type | Description |
| ----------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `mobiledet` | Based on the same architecture as the default model included with Frigate. Runs on Google Coral devices and CPUs. |
| `yolonas` | A newer architecture that offers slightly higher accuracy and improved detection of small objects. Runs on Intel, NVidia GPUs, and AMD GPUs. |
| `yolov9` | A leading SOTA (state of the art) object detection model with similar performance to yolonas, but on a wider range of hardware options. Runs on most hardware. |
| Model Type | Description |
| ----------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
| `mobiledet` | Based on the same architecture as the default model included with Frigate. Runs on Google Coral devices and CPUs. |
| `yolonas` | A newer architecture that offers slightly higher accuracy and improved detection of small objects. Runs on Intel, NVidia GPUs, and AMD GPUs. |
| `yolov9` | A leading SOTA (state of the art) object detection model with similar performance to yolonas, but on a wider range of hardware options. Runs on Intel, NVidia GPUs, AMD GPUs, Hailo, MemryX, Apple Silicon, and Rockchip NPUs. |
### YOLOv9 Details
YOLOv9 models are available in `s`, `t`, `edgetpu` variants. When requesting a `yolov9` model, you will be prompted to choose a variant. If you want the model to be compatible with a Google Coral, you will need to choose the `edgetpu` variant. If you are unsure what variant to choose, you should perform some tests with the base models to find the performance level that suits you. The `s` size is most similar to the current `yolonas` models in terms of inference times and accuracy, and a good place to start is the `320x320` resolution model for `yolov9s`.
YOLOv9 models are available in `s` and `t` sizes. When requesting a `yolov9` model, you will be prompted to choose a size. If you are unsure what size to choose, you should perform some tests with the base models to find the performance level that suits you. The `s` size is most similar to the current `yolonas` models in terms of inference times and accuracy, and a good place to start is the `320x320` resolution model for `yolov9s`.
:::info
@ -37,21 +37,23 @@ If you have a Hailo device, you will need to specify the hardware you have when
#### Rockchip (RKNN) Support
Rockchip models are automatically converted as of 0.17. For 0.16, YOLOv9 onnx models will need to be manually converted. First, you will need to configure Frigate to use the model id for your YOLOv9 onnx model so it downloads the model to your `model_cache` directory. From there, you can follow the [documentation](/configuration/object_detectors.md#converting-your-own-onnx-model-to-rknn-format) to convert it.
For 0.16, YOLOv9 onnx models will need to be manually converted. First, you will need to configure Frigate to use the model id for your YOLOv9 onnx model so it downloads the model to your `model_cache` directory. From there, you can follow the [documentation](/configuration/object_detectors.md#converting-your-own-onnx-model-to-rknn-format) to convert it. Automatic conversion is available in 0.17 and later.
## Supported detector types
Currently, Frigate+ models support CPU (`cpu`), Google Coral (`edgetpu`), OpenVino (`openvino`), ONNX (`onnx`), Hailo (`hailo8l`), and Rockchip (`rknn`) detectors.
Currently, Frigate+ models support CPU (`cpu`), Google Coral (`edgetpu`), OpenVino (`openvino`), ONNX (`onnx`), Hailo (`hailo8l`), and Rockchip\* (`rknn`) detectors.
| Hardware | Recommended Detector Type | Recommended Model Type |
| -------------------------------------------------------------------------------- | ------------------------- | ---------------------- |
| [CPU](/configuration/object_detectors.md#cpu-detector-not-recommended) | `cpu` | `mobiledet` |
| [Coral (all form factors)](/configuration/object_detectors.md#edge-tpu-detector) | `edgetpu` | `yolov9` |
| [Coral (all form factors)](/configuration/object_detectors.md#edge-tpu-detector) | `edgetpu` | `mobiledet` |
| [Intel](/configuration/object_detectors.md#openvino-detector) | `openvino` | `yolov9` |
| [NVidia GPU](/configuration/object_detectors#onnx) | `onnx` | `yolov9` |
| [AMD ROCm GPU](/configuration/object_detectors#amdrocm-gpu-detector) | `onnx` | `yolov9` |
| [Hailo8/Hailo8L/Hailo8R](/configuration/object_detectors#hailo-8) | `hailo8l` | `yolov9` |
| [Rockchip NPU](/configuration/object_detectors#rockchip-platform) | `rknn` | `yolov9` |
| [Rockchip NPU](/configuration/object_detectors#rockchip-platform)\* | `rknn` | `yolov9` |
_\* Requires manual conversion in 0.16. Automatic conversion available in 0.17 and later._
## Improving your model
@ -79,7 +81,7 @@ Candidate labels are also available for annotation. These labels don't have enou
Where possible, these labels are mapped to existing labels during training. For example, any `baby` labels are mapped to `person` until support for new labels is added.
The candidate labels are: `baby`, `bpost`, `badger`, `possum`, `rodent`, `chicken`, `groundhog`, `boar`, `hedgehog`, `tractor`, `golf cart`, `garbage truck`, `bus`, `sports ball`, `la_poste`, `lawnmower`, `heron`, `rickshaw`, `wombat`, `auspost`, `aramex`, `bobcat`, `mustelid`, `transoflex`, `airplane`, `drone`, `mountain_lion`, `crocodile`, `turkey`, `baby_stroller`, `monkey`, `coyote`, `porcupine`, `parcelforce`, `sheep`, `snake`, `helicopter`, `lizard`, `duck`, `hermes`, `cargus`, `fan_courier`, `sameday`
The candidate labels are: `baby`, `bpost`, `badger`, `possum`, `rodent`, `chicken`, `groundhog`, `boar`, `hedgehog`, `tractor`, `golf cart`, `garbage truck`, `bus`, `sports ball`
Candidate labels are not available for automatic suggestions.

View File

@ -3,67 +3,17 @@ id: dummy-camera
title: Analyzing Object Detection
---
Frigate provides several tools for investigating object detection and tracking behavior: reviewing recorded detections through the UI, using the built-in Debug Replay feature, and manually setting up a dummy camera for advanced scenarios.
When investigating object detection or tracking problems, it can be helpful to replay an exported video as a temporary "dummy" camera. This lets you reproduce issues locally, iterate on configuration (detections, zones, enrichment settings), and capture logs and clips for analysis.
## Reviewing Detections in the UI
## When to use
Before setting up a replay, you can often diagnose detection issues by reviewing existing recordings directly in the Frigate UI.
- Replaying an exported clip to reproduce incorrect detections
- Testing configuration changes (model settings, trackers, filters) against a known clip
- Gathering deterministic logs and recordings for debugging or issue reports
### Detail View (History)
## Example Config
The **Detail Stream** view in History shows recorded video with detection overlays (bounding boxes, path points, and zone highlights) drawn on top. Select a review item to see its tracked objects and lifecycle events. Clicking a lifecycle event seeks the video to that point so you can see exactly what the detector saw.
### Tracking Details (Explore)
In **Explore**, clicking a thumbnail opens the **Tracking Details** pane, which shows the full lifecycle of a single tracked object: every detection, zone entry/exit, and attribute change. The video plays back with the bounding box overlaid, letting you step through the object's entire lifecycle.
### Annotation Offset
Both views support an **Annotation Offset** setting (`detect.annotation_offset` in your camera config) that shifts the detection overlay in time relative to the recorded video. This compensates for the timing drift between the `detect` and `record` pipelines.
These streams use fundamentally different clocks with different buffering and latency characteristics, so the detection data and the recorded video are never perfectly synchronized. The annotation offset shifts the overlay to visually align the bounding boxes with the objects in the recorded video.
#### Why the offset varies between clips
The base timing drift between detect and record is roughly constant for a given camera, so a single offset value works well on average. However, you may notice the alignment is not pixel-perfect in every clip. This is normal and caused by several factors:
- **Keyframe-constrained seeking**: When the browser seeks to a timestamp, it can only land on the nearest keyframe. Each recording segment has keyframes at different positions relative to the detection timestamps, so the same offset may land slightly early in one clip and slightly late in another.
- **Segment boundary trimming**: When a recording range starts mid-segment, the video is trimmed to the requested start point. This trim may not align with a keyframe, shifting the effective reference point.
- **Capture-time jitter**: Network buffering, camera buffer flushes, and ffmpeg's own buffering mean the system-clock timestamp and the corresponding recorded frame are not always offset by exactly the same amount.
The per-clip variation is typically quite low and is mostly an artifact of keyframe granularity rather than a change in the true drift. A "perfect" alignment would require per-frame, keyframe-aware offset compensation, which is not practical. Treat the annotation offset as a best-effort average for your camera.
## Debug Replay
Debug Replay lets you re-run Frigate's detection pipeline against a section of recorded video without manually configuring a dummy camera. It automatically extracts the recording, creates a temporary camera with the same detection settings as the original, and loops the clip through the pipeline so you can observe detections in real time.
### When to use
- Reproducing a detection or tracking issue from a specific time range
- Testing configuration changes (model settings, zones, filters, motion) against a known clip
- Gathering logs and debug overlays for a bug report
:::note
Only one replay session can be active at a time. If a session is already running, you will be prompted to navigate to it or stop it first.
:::
### Variables to consider
- The replay will not always produce identical results to the original run. Different frames may be selected on replay, which can change detections and tracking.
- Motion detection depends on the exact frames used; small frame shifts can change motion regions and therefore what gets passed to the detector.
- Object detection is not fully deterministic: models and post-processing can yield slightly different results across runs.
Treat the replay as a close approximation rather than an exact reproduction. Run multiple loops and examine the debug overlays and logs to understand the behavior.
## Manual Dummy Camera
For advanced scenarios — such as testing with a clip from a different source, debugging ffmpeg behavior, or running a clip through a completely custom configuration — you can set up a dummy camera manually.
### Example config
Place the clip you want to replay in a location accessible to Frigate (for example `/media/frigate/` or the repository `debug/` folder when developing). Then add a temporary camera to your `config/config.yml`:
Place the clip you want to replay in a location accessible to Frigate (for example `/media/frigate/` or the repository `debug/` folder when developing). Then add a temporary camera to your `config/config.yml` like this:
```yaml
cameras:
@ -82,10 +32,10 @@ cameras:
enabled: false
```
- `-re -stream_loop -1` tells ffmpeg to play the file in real time and loop indefinitely.
- `-fflags +genpts` generates presentation timestamps when they are missing in the file.
- `-re -stream_loop -1` tells `ffmpeg` to play the file in realtime and loop indefinitely, which is useful for long debugging sessions.
- `-fflags +genpts` helps generate presentation timestamps when they are missing in the file.
### Steps
## Steps
1. Export or copy the clip you want to replay to the Frigate host (e.g., `/media/frigate/` or `debug/clips/`). Depending on what you are looking to debug, it is often helpful to add some "pre-capture" time (where the tracked object is not yet visible) to the clip when exporting.
2. Add the temporary camera to `config/config.yml` (example above). Use a unique name such as `test` or `replay_camera` so it's easy to remove later.
@ -95,8 +45,16 @@ cameras:
5. Iterate on camera or enrichment settings (model, fps, zones, filters) and re-check the replay until the behavior is resolved.
6. Remove the temporary camera from your config after debugging to avoid spurious telemetry or recordings.
### Troubleshooting
## Variables to consider in object tracking
- **No video**: verify the file path is correct and accessible from the Frigate process/container.
- **FFmpeg errors**: check the log output and adjust `input_args` for your file format. You may also need to disable hardware acceleration (`hwaccel_args: ""`) for the dummy camera.
- **No detections**: confirm the camera `roles` include `detect` and that the model/detector configuration is enabled.
- The exported video will not always line up exactly with how it originally ran through Frigate (or even with the last loop). Different frames may be used on replay, which can change detections and tracking.
- Motion detection depends on the frames used; small frame shifts can change motion regions and therefore what gets passed to the detector.
- Object detection is not deterministic: models and post-processing can yield different results across runs, so you may not get identical detections or track IDs every time.
When debugging, treat the replay as a close approximation rather than a byte-for-byte replay. Capture multiple runs, enable recording if helpful, and examine logs and saved event clips to understand variability.
## Troubleshooting
- No video: verify the path is correct and accessible from the Frigate process/container.
- FFmpeg errors: check the log output for ffmpeg-specific flags and adjust `input_args` accordingly for your file/container. You may also need to disable hardware acceleration (`hwaccel_args: ""`) for the dummy camera.
- No detections: confirm the camera `roles` include `detect`, and model/detector configuration is enabled.

View File

@ -28,7 +28,7 @@ const sidebars: SidebarsConfig = {
{
type: "link",
label: "Go2RTC Configuration Reference",
href: "https://github.com/AlexxIT/go2rtc/tree/v1.9.13#configuration",
href: "https://github.com/AlexxIT/go2rtc/tree/v1.9.10#configuration",
} as PropSidebarItemLink,
],
Detectors: [

View File

@ -331,59 +331,6 @@ paths:
application/json:
schema:
$ref: "#/components/schemas/HTTPValidationError"
/media/sync:
post:
tags:
- App
summary: Start media sync job
description: |-
Start an asynchronous media sync job to find and (optionally) remove orphaned media files.
Returns 202 with job details when queued, or 409 if a job is already running.
operationId: sync_media_media_sync_post
requestBody:
required: true
content:
application/json:
responses:
"202":
description: Accepted - Job queued
"409":
description: Conflict - Job already running
"422":
description: Validation Error
/media/sync/current:
get:
tags:
- App
summary: Get current media sync job
description: |-
Retrieve the current running media sync job, if any. Returns the job details or null when no job is active.
operationId: get_media_sync_current_media_sync_current_get
responses:
"200":
description: Successful Response
"422":
description: Validation Error
/media/sync/status/{job_id}:
get:
tags:
- App
summary: Get media sync job status
description: |-
Get status and results for the specified media sync job id. Returns 200 with job details including results, or 404 if the job is not found.
operationId: get_media_sync_status_media_sync_status__job_id__get
parameters:
- name: job_id
in: path
responses:
"200":
description: Successful Response
"404":
description: Not Found - Job not found
"422":
description: Validation Error
/faces/train/{name}/classify:
post:
tags:
@ -3200,7 +3147,6 @@ paths:
duration: 30
include_recording: true
draw: {}
pre_capture: null
responses:
"200":
description: Successful Response
@ -5003,12 +4949,6 @@ components:
- type: "null"
title: Draw
default: {}
pre_capture:
anyOf:
- type: integer
- type: "null"
title: Pre Capture Seconds
default: null
type: object
title: EventsCreateBody
EventsDeleteBody:

View File

@ -19,7 +19,6 @@ from fastapi import APIRouter, Body, Path, Request, Response
from fastapi.encoders import jsonable_encoder
from fastapi.params import Depends
from fastapi.responses import JSONResponse, PlainTextResponse, StreamingResponse
from filelock import FileLock, Timeout
from markupsafe import escape
from peewee import SQL, fn, operator
from pydantic import ValidationError
@ -31,32 +30,22 @@ from frigate.api.auth import (
require_role,
)
from frigate.api.defs.query.app_query_parameters import AppTimelineHourlyQueryParameters
from frigate.api.defs.request.app_body import AppConfigSetBody, MediaSyncBody
from frigate.api.defs.request.app_body import AppConfigSetBody
from frigate.api.defs.tags import Tags
from frigate.config import FrigateConfig
from frigate.config.camera.updater import (
CameraConfigUpdateEnum,
CameraConfigUpdateTopic,
)
from frigate.ffmpeg_presets import FFMPEG_HWACCEL_VAAPI, _gpu_selector
from frigate.jobs.media_sync import (
get_current_media_sync_job,
get_media_sync_job_by_id,
start_media_sync_job,
)
from frigate.models import Event, Timeline
from frigate.stats.prometheus import get_metrics, update_metrics
from frigate.types import JobStatusTypesEnum
from frigate.util.builtin import (
clean_camera_user_pass,
deep_merge,
flatten_config_data,
load_labels,
process_config_query_string,
update_yaml_file_bulk,
)
from frigate.util.config import apply_section_update, find_config_file
from frigate.util.schema import get_config_schema
from frigate.util.config import find_config_file
from frigate.util.services import (
get_nvidia_driver_info,
process_logs,
@ -81,7 +70,9 @@ def is_healthy():
@router.get("/config/schema.json", dependencies=[Depends(allow_public())])
def config_schema(request: Request):
return JSONResponse(content=get_config_schema(FrigateConfig))
return Response(
content=request.app.frigate_config.schema_json(), media_type="application/json"
)
@router.get(
@ -127,10 +118,6 @@ def config(request: Request):
config: dict[str, dict[str, Any]] = config_obj.model_dump(
mode="json", warnings="none", exclude_none=True
)
config["detectors"] = {
name: detector.model_dump(mode="json", warnings="none", exclude_none=True)
for name, detector in config_obj.detectors.items()
}
# remove the mqtt password
config["mqtt"].pop("password", None)
@ -201,54 +188,6 @@ def config(request: Request):
return JSONResponse(content=config)
@router.get("/ffmpeg/presets", dependencies=[Depends(allow_any_authenticated())])
def ffmpeg_presets():
"""Return available ffmpeg preset keys for config UI usage."""
# Whitelist based on documented presets in ffmpeg_presets.md
hwaccel_presets = [
"preset-rpi-64-h264",
"preset-rpi-64-h265",
"preset-vaapi",
"preset-intel-qsv-h264",
"preset-intel-qsv-h265",
"preset-nvidia",
"preset-jetson-h264",
"preset-jetson-h265",
"preset-rkmpp",
]
input_presets = [
"preset-http-jpeg-generic",
"preset-http-mjpeg-generic",
"preset-http-reolink",
"preset-rtmp-generic",
"preset-rtsp-generic",
"preset-rtsp-restream",
"preset-rtsp-restream-low-latency",
"preset-rtsp-udp",
"preset-rtsp-blue-iris",
]
record_output_presets = [
"preset-record-generic",
"preset-record-generic-audio-copy",
"preset-record-generic-audio-aac",
"preset-record-mjpeg",
"preset-record-jpeg",
"preset-record-ubiquiti",
]
return JSONResponse(
content={
"hwaccel_args": hwaccel_presets,
"input_args": input_presets,
"output_args": {
"record": record_output_presets,
"detect": [],
},
}
)
@router.get("/config/raw_paths", dependencies=[Depends(require_role(["admin"]))])
def config_raw_paths(request: Request):
"""Admin-only endpoint that returns camera paths and go2rtc streams without credential masking."""
@ -423,245 +362,108 @@ def config_save(save_option: str, body: Any = Body(media_type="text/plain")):
)
def _config_set_in_memory(request: Request, body: AppConfigSetBody) -> JSONResponse:
"""Apply config changes in-memory only, without writing to YAML.
Used for temporary config changes like debug replay camera tuning.
Updates the in-memory Pydantic config and publishes ZMQ updates,
bypassing YAML parsing entirely.
"""
try:
updates = {}
if body.config_data:
updates = flatten_config_data(body.config_data)
updates = {k: ("" if v is None else v) for k, v in updates.items()}
if not updates:
return JSONResponse(
content={"success": False, "message": "No configuration data provided"},
status_code=400,
)
config: FrigateConfig = request.app.frigate_config
# Group flat key paths into nested per-camera, per-section dicts
grouped: dict[str, dict[str, dict]] = {}
for key_path, value in updates.items():
parts = key_path.split(".")
if len(parts) < 3 or parts[0] != "cameras":
continue
cam, section = parts[1], parts[2]
grouped.setdefault(cam, {}).setdefault(section, {})
# Build nested dict from remaining path (e.g. "filters.person.threshold")
target = grouped[cam][section]
for part in parts[3:-1]:
target = target.setdefault(part, {})
if len(parts) > 3:
target[parts[-1]] = value
elif isinstance(value, dict):
grouped[cam][section] = deep_merge(
grouped[cam][section], value, override=True
)
else:
grouped[cam][section] = value
# Apply each section update
for cam_name, sections in grouped.items():
camera_config = config.cameras.get(cam_name)
if not camera_config:
return JSONResponse(
content={
"success": False,
"message": f"Camera '{cam_name}' not found",
},
status_code=400,
)
for section_name, update in sections.items():
err = apply_section_update(camera_config, section_name, update)
if err is not None:
return JSONResponse(
content={"success": False, "message": err},
status_code=400,
)
# Publish ZMQ updates so processing threads pick up changes
if body.update_topic and body.update_topic.startswith("config/cameras/"):
_, _, camera, field = body.update_topic.split("/")
settings = getattr(config.cameras.get(camera, None), field, None)
if settings is not None:
request.app.config_publisher.publish_update(
CameraConfigUpdateTopic(CameraConfigUpdateEnum[field], camera),
settings,
)
return JSONResponse(
content={"success": True, "message": "Config applied in-memory"},
status_code=200,
)
except Exception as e:
logger.error(f"Error applying config in-memory: {e}")
return JSONResponse(
content={"success": False, "message": "Error applying config"},
status_code=500,
)
@router.put("/config/set", dependencies=[Depends(require_role(["admin"]))])
def config_set(request: Request, body: AppConfigSetBody):
config_file = find_config_file()
if body.skip_save:
return _config_set_in_memory(request, body)
lock = FileLock(f"{config_file}.lock", timeout=5)
with open(config_file, "r") as f:
old_raw_config = f.read()
try:
with lock:
with open(config_file, "r") as f:
old_raw_config = f.read()
updates = {}
try:
updates = {}
# process query string parameters (takes precedence over body.config_data)
parsed_url = urllib.parse.urlparse(str(request.url))
query_string = urllib.parse.parse_qs(parsed_url.query, keep_blank_values=True)
# process query string parameters (takes precedence over body.config_data)
parsed_url = urllib.parse.urlparse(str(request.url))
query_string = urllib.parse.parse_qs(
parsed_url.query, keep_blank_values=True
)
# Filter out empty keys but keep blank values for non-empty keys
query_string = {k: v for k, v in query_string.items() if k}
# Filter out empty keys but keep blank values for non-empty keys
query_string = {k: v for k, v in query_string.items() if k}
if query_string:
updates = process_config_query_string(query_string)
elif body.config_data:
updates = flatten_config_data(body.config_data)
if query_string:
updates = process_config_query_string(query_string)
elif body.config_data:
updates = flatten_config_data(body.config_data)
# Convert None values to empty strings for deletion (e.g., when deleting masks)
updates = {k: ("" if v is None else v) for k, v in updates.items()}
if not updates:
return JSONResponse(
content=(
{"success": False, "message": "No configuration data provided"}
),
status_code=400,
)
if not updates:
return JSONResponse(
content=(
{
"success": False,
"message": "No configuration data provided",
}
),
status_code=400,
)
# apply all updates in a single operation
update_yaml_file_bulk(config_file, updates)
# apply all updates in a single operation
update_yaml_file_bulk(config_file, updates)
# validate the updated config
with open(config_file, "r") as f:
new_raw_config = f.read()
try:
config = FrigateConfig.parse(new_raw_config)
except Exception:
with open(config_file, "w") as f:
f.write(old_raw_config)
f.close()
logger.error(f"\nConfig Error:\n\n{str(traceback.format_exc())}")
return JSONResponse(
content=(
{
"success": False,
"message": "Error parsing config. Check logs for error message.",
}
),
status_code=400,
)
except Exception as e:
logging.error(f"Error updating config: {e}")
return JSONResponse(
content=({"success": False, "message": "Error updating config"}),
status_code=500,
)
if body.requires_restart == 0 or body.update_topic:
old_config: FrigateConfig = request.app.frigate_config
request.app.frigate_config = config
request.app.genai_manager.update_config(config)
if request.app.stats_emitter is not None:
request.app.stats_emitter.config = config
if body.update_topic:
if body.update_topic.startswith("config/cameras/"):
_, _, camera, field = body.update_topic.split("/")
if camera == "*":
# Wildcard: fan out update to all cameras
enum_value = CameraConfigUpdateEnum[field]
for camera_name in config.cameras:
settings = config.get_nested_object(
f"config/cameras/{camera_name}/{field}"
)
request.app.config_publisher.publish_update(
CameraConfigUpdateTopic(enum_value, camera_name),
settings,
)
else:
if field == "add":
settings = config.cameras[camera]
elif field == "remove":
settings = old_config.cameras[camera]
else:
settings = config.get_nested_object(body.update_topic)
request.app.config_publisher.publish_update(
CameraConfigUpdateTopic(
CameraConfigUpdateEnum[field], camera
),
settings,
)
else:
# Generic handling for global config updates
settings = config.get_nested_object(body.update_topic)
# Publish None for removal, actual config for add/update
request.app.config_publisher.publisher.publish(
body.update_topic, settings
)
# validate the updated config
with open(config_file, "r") as f:
new_raw_config = f.read()
try:
config = FrigateConfig.parse(new_raw_config)
except Exception:
with open(config_file, "w") as f:
f.write(old_raw_config)
f.close()
logger.error(f"\nConfig Error:\n\n{str(traceback.format_exc())}")
return JSONResponse(
content=(
{
"success": True,
"message": "Config successfully updated, restart to apply",
"success": False,
"message": "Error parsing config. Check logs for error message.",
}
),
status_code=200,
status_code=400,
)
except Timeout:
except Exception as e:
logging.error(f"Error updating config: {e}")
return JSONResponse(
content=(
{
"success": False,
"message": "Another process is currently updating the config. Please try again in a few seconds.",
}
),
status_code=503,
content=({"success": False, "message": "Error updating config"}),
status_code=500,
)
if body.requires_restart == 0 or body.update_topic:
old_config: FrigateConfig = request.app.frigate_config
request.app.frigate_config = config
if body.update_topic:
if body.update_topic.startswith("config/cameras/"):
_, _, camera, field = body.update_topic.split("/")
if field == "add":
settings = config.cameras[camera]
elif field == "remove":
settings = old_config.cameras[camera]
else:
settings = config.get_nested_object(body.update_topic)
request.app.config_publisher.publish_update(
CameraConfigUpdateTopic(CameraConfigUpdateEnum[field], camera),
settings,
)
else:
# Generic handling for global config updates
settings = config.get_nested_object(body.update_topic)
# Publish None for removal, actual config for add/update
request.app.config_publisher.publisher.publish(
body.update_topic, settings
)
return JSONResponse(
content=(
{
"success": True,
"message": "Config successfully updated, restart to apply",
}
),
status_code=200,
)
@router.get("/vainfo", dependencies=[Depends(allow_any_authenticated())])
def vainfo():
# Use LibvaGpuSelector to pick an appropriate libva device (if available)
selected_gpu = ""
try:
selected_gpu = _gpu_selector.get_gpu_arg(FFMPEG_HWACCEL_VAAPI, 0) or ""
except Exception:
selected_gpu = ""
# If selected_gpu is empty, pass None to vainfo_hwaccel to run plain `vainfo`.
vainfo = vainfo_hwaccel(device_name=selected_gpu or None)
vainfo = vainfo_hwaccel()
return JSONResponse(
content={
"return_code": vainfo.returncode,
@ -796,98 +598,6 @@ def restart():
)
@router.post(
"/media/sync",
dependencies=[Depends(require_role(["admin"]))],
summary="Start media sync job",
description="""Start an asynchronous media sync job to find and (optionally) remove orphaned media files.
Returns 202 with job details when queued, or 409 if a job is already running.""",
)
def sync_media(body: MediaSyncBody = Body(...)):
"""Start async media sync job - remove orphaned files.
Syncs specified media types: event snapshots, event thumbnails, review thumbnails,
previews, exports, and/or recordings. Job runs in background; use /media/sync/current
or /media/sync/status/{job_id} to check status.
Args:
body: MediaSyncBody with dry_run flag and media_types list.
media_types can include: 'all', 'event_snapshots', 'event_thumbnails',
'review_thumbnails', 'previews', 'exports', 'recordings'
Returns:
202 Accepted with job_id, or 409 Conflict if job already running.
"""
job_id = start_media_sync_job(
dry_run=body.dry_run, media_types=body.media_types, force=body.force
)
if job_id is None:
# A job is already running
current = get_current_media_sync_job()
return JSONResponse(
content={
"error": "A media sync job is already running",
"current_job_id": current.id if current else None,
},
status_code=409,
)
return JSONResponse(
content={
"job": {
"job_type": "media_sync",
"status": JobStatusTypesEnum.queued,
"id": job_id,
}
},
status_code=202,
)
@router.get(
"/media/sync/current",
dependencies=[Depends(require_role(["admin"]))],
summary="Get current media sync job",
description="""Retrieve the current running media sync job, if any. Returns the job details
or null when no job is active.""",
)
def get_media_sync_current():
"""Get the current running media sync job, if any."""
job = get_current_media_sync_job()
if job is None:
return JSONResponse(content={"job": None}, status_code=200)
return JSONResponse(
content={"job": job.to_dict()},
status_code=200,
)
@router.get(
"/media/sync/status/{job_id}",
dependencies=[Depends(require_role(["admin"]))],
summary="Get media sync job status",
description="""Get status and results for the specified media sync job id. Returns 200 with
job details including results, or 404 if the job is not found.""",
)
def get_media_sync_status(job_id: str):
"""Get the status of a specific media sync job."""
job = get_media_sync_job_by_id(job_id)
if job is None:
return JSONResponse(
content={"error": "Job not found"},
status_code=404,
)
return JSONResponse(
content={"job": job.to_dict()},
status_code=200,
)
@router.get("/labels", dependencies=[Depends(allow_any_authenticated())])
def get_labels(camera: str = ""):
try:
@ -937,12 +647,6 @@ def get_sub_labels(split_joined: Optional[int] = None):
return JSONResponse(content=sub_labels)
@router.get("/audio_labels", dependencies=[Depends(allow_any_authenticated())])
def get_audio_labels():
labels = load_labels("/audio-labelmap.txt", prefill=521)
return JSONResponse(content=labels)
@router.get("/plus/models", dependencies=[Depends(allow_any_authenticated())])
def plusModels(request: Request, filterByCurrentModelDetector: bool = False):
if not request.app.frigate_config.plus_api.is_active():

View File

@ -26,18 +26,12 @@ from frigate.api.defs.request.app_body import (
AppPutRoleBody,
)
from frigate.api.defs.tags import Tags
from frigate.config import AuthConfig, NetworkingConfig, ProxyConfig
from frigate.config import AuthConfig, ProxyConfig
from frigate.const import CONFIG_DIR, JWT_SECRET_ENV_VAR, PASSWORD_HASH_ALGORITHM
from frigate.models import User
logger = logging.getLogger(__name__)
# In-memory cache to track which clients we've logged for an anonymous access event.
# Keyed by a hashed value combining remote address + user-agent. The value is
# an expiration timestamp (float).
FIRST_LOAD_TTL_SECONDS = 60 * 60 * 24 * 7 # 7 days
_first_load_seen: dict[str, float] = {}
def require_admin_by_default():
"""
@ -47,7 +41,7 @@ def require_admin_by_default():
endpoints require admin access unless explicitly overridden with
allow_public(), allow_any_authenticated(), or require_role().
Internal port always has admin role set by the /auth endpoint,
Port 5000 (internal) always has admin role set by the /auth endpoint,
so this check passes automatically for internal requests.
Certain paths are exempted from the global admin check because they must
@ -136,7 +130,7 @@ def require_admin_by_default():
pass
# For all other paths, require admin role
# Internal port requests have admin role set automatically
# Port 5000 (internal) requests have admin role set automatically
role = request.headers.get("remote-role")
if role == "admin":
return
@ -149,17 +143,6 @@ def require_admin_by_default():
return admin_checker
def _is_authenticated(request: Request) -> bool:
"""
Helper to determine if a request is from an authenticated user.
Returns True if the request has a valid authenticated user (not anonymous).
Internal port requests are considered anonymous despite having admin role.
"""
username = request.headers.get("remote-user")
return username is not None and username != "anonymous"
def allow_public():
"""
Override dependency to allow unauthenticated access to an endpoint.
@ -188,7 +171,6 @@ def allow_any_authenticated():
Rejects:
- Requests with no remote-user header (did not pass through /auth endpoint)
- External port requests with anonymous user (auth disabled, no proxy auth)
Example:
@router.get("/authenticated-endpoint", dependencies=[Depends(allow_any_authenticated())])
@ -197,14 +179,8 @@ def allow_any_authenticated():
async def auth_checker(request: Request):
# Ensure a remote-user has been set by the /auth endpoint
username = request.headers.get("remote-user")
# Internal port requests have admin role and should be allowed
role = request.headers.get("remote-role")
if role != "admin":
if username is None or not _is_authenticated(request):
raise HTTPException(status_code=401, detail="Authentication required")
if username is None:
raise HTTPException(status_code=401, detail="Authentication required")
return
return auth_checker
@ -290,15 +266,6 @@ def get_remote_addr(request: Request):
return remote_addr or "127.0.0.1"
def _cleanup_first_load_seen() -> None:
"""Cleanup expired entries in the in-memory first-load cache."""
now = time.time()
# Build list for removal to avoid mutating dict during iteration
expired = [k for k, exp in _first_load_seen.items() if exp <= now]
for k in expired:
del _first_load_seen[k]
def get_jwt_secret() -> str:
jwt_secret = None
# check env var
@ -603,18 +570,12 @@ def resolve_role(
def auth(request: Request):
auth_config: AuthConfig = request.app.frigate_config.auth
proxy_config: ProxyConfig = request.app.frigate_config.proxy
networking_config: NetworkingConfig = request.app.frigate_config.networking
success_response = Response("", status_code=202)
# handle case where internal port is a string with ip:port
internal_port = networking_config.listen.internal
if type(internal_port) is str:
internal_port = int(internal_port.split(":")[-1])
# dont require auth if the request is on the internal port
# this header is set by Frigate's nginx proxy, so it cant be spoofed
if int(request.headers.get("x-server-port", default=0)) == internal_port:
if int(request.headers.get("x-server-port", default=0)) == 5000:
success_response.headers["remote-user"] = "anonymous"
success_response.headers["remote-role"] = "admin"
return success_response
@ -759,30 +720,10 @@ def profile(request: Request):
roles_dict = request.app.frigate_config.auth.roles
allowed_cameras = User.get_allowed_cameras(role, roles_dict, all_camera_names)
response = JSONResponse(
return JSONResponse(
content={"username": username, "role": role, "allowed_cameras": allowed_cameras}
)
if username == "anonymous":
try:
remote_addr = get_remote_addr(request)
except Exception:
remote_addr = (
request.client.host if hasattr(request, "client") else "unknown"
)
ua = request.headers.get("user-agent", "")
key_material = f"{remote_addr}|{ua}"
cache_key = hashlib.sha256(key_material.encode()).hexdigest()
_cleanup_first_load_seen()
now = time.time()
if cache_key not in _first_load_seen:
_first_load_seen[cache_key] = now + FIRST_LOAD_TTL_SECONDS
logger.info(f"Anonymous user access from {remote_addr} ua={ua[:200]}")
return response
@router.get(
"/logout",

View File

@ -1,6 +1,5 @@
"""Camera apis."""
import asyncio
import json
import logging
import re
@ -12,9 +11,7 @@ import httpx
import requests
from fastapi import APIRouter, Depends, Query, Request, Response
from fastapi.responses import JSONResponse
from filelock import FileLock, Timeout
from onvif import ONVIFCamera, ONVIFError
from ruamel.yaml import YAML
from zeep.exceptions import Fault, TransportError
from zeep.transports import AsyncTransport
@ -24,14 +21,8 @@ from frigate.api.auth import (
require_role,
)
from frigate.api.defs.tags import Tags
from frigate.config import FrigateConfig
from frigate.config.camera.updater import (
CameraConfigUpdateEnum,
CameraConfigUpdateTopic,
)
from frigate.config.config import FrigateConfig
from frigate.util.builtin import clean_camera_user_pass
from frigate.util.camera_cleanup import cleanup_camera_db, cleanup_camera_files
from frigate.util.config import find_config_file
from frigate.util.image import run_ffmpeg_snapshot
from frigate.util.services import ffprobe_stream
@ -1004,154 +995,3 @@ async def onvif_probe(
await onvif_camera.close()
except Exception as e:
logger.debug(f"Error closing ONVIF camera session: {e}")
@router.delete(
"/cameras/{camera_name}",
dependencies=[Depends(require_role(["admin"]))],
)
async def delete_camera(
request: Request,
camera_name: str,
delete_exports: bool = Query(default=False),
):
"""Delete a camera and all its associated data.
Removes the camera from config, stops processes, and cleans up
all database entries and media files.
Args:
camera_name: Name of the camera to delete
delete_exports: Whether to also delete exports for this camera
"""
frigate_config: FrigateConfig = request.app.frigate_config
if camera_name not in frigate_config.cameras:
return JSONResponse(
content={
"success": False,
"message": f"Camera {camera_name} not found",
},
status_code=404,
)
old_camera_config = frigate_config.cameras[camera_name]
config_file = find_config_file()
lock = FileLock(f"{config_file}.lock", timeout=5)
try:
with lock:
with open(config_file, "r") as f:
old_raw_config = f.read()
try:
yaml = YAML()
yaml.indent(mapping=2, sequence=4, offset=2)
with open(config_file, "r") as f:
data = yaml.load(f)
# Remove camera from config
if "cameras" in data and camera_name in data["cameras"]:
del data["cameras"][camera_name]
# Remove camera from auth roles
auth = data.get("auth", {})
if auth and "roles" in auth:
empty_roles = []
for role_name, cameras_list in auth["roles"].items():
if (
isinstance(cameras_list, list)
and camera_name in cameras_list
):
cameras_list.remove(camera_name)
# Custom roles can't be empty; mark for removal
if not cameras_list and role_name not in (
"admin",
"viewer",
):
empty_roles.append(role_name)
for role_name in empty_roles:
del auth["roles"][role_name]
with open(config_file, "w") as f:
yaml.dump(data, f)
with open(config_file, "r") as f:
new_raw_config = f.read()
try:
config = FrigateConfig.parse(new_raw_config)
except Exception:
with open(config_file, "w") as f:
f.write(old_raw_config)
logger.exception(
"Config error after removing camera %s",
camera_name,
)
return JSONResponse(
content={
"success": False,
"message": "Error parsing config after camera removal",
},
status_code=400,
)
except Exception as e:
logger.error(
"Error updating config to remove camera %s: %s", camera_name, e
)
return JSONResponse(
content={
"success": False,
"message": "Error updating config",
},
status_code=500,
)
# Update runtime config
request.app.frigate_config = config
request.app.genai_manager.update_config(config)
# Publish removal to stop ffmpeg processes and clean up runtime state
request.app.config_publisher.publish_update(
CameraConfigUpdateTopic(CameraConfigUpdateEnum.remove, camera_name),
old_camera_config,
)
except Timeout:
return JSONResponse(
content={
"success": False,
"message": "Another process is currently updating the config",
},
status_code=409,
)
# Clean up database entries
counts, export_paths = await asyncio.to_thread(
cleanup_camera_db, camera_name, delete_exports
)
# Clean up media files in background thread
await asyncio.to_thread(
cleanup_camera_files, camera_name, export_paths if delete_exports else None
)
# Best-effort go2rtc stream removal
try:
requests.delete(
"http://127.0.0.1:1984/api/streams",
params={"src": camera_name},
timeout=5,
)
except Exception:
logger.debug("Failed to remove go2rtc stream for %s", camera_name)
return JSONResponse(
content={
"success": True,
"message": f"Camera {camera_name} has been deleted",
"cleanup": counts,
},
status_code=200,
)

View File

@ -1,821 +0,0 @@
"""Chat and LLM tool calling APIs."""
import base64
import json
import logging
import time
from datetime import datetime
from typing import Any, Dict, Generator, List, Optional
import cv2
from fastapi import APIRouter, Body, Depends, Request
from fastapi.responses import JSONResponse, StreamingResponse
from pydantic import BaseModel
from frigate.api.auth import (
allow_any_authenticated,
get_allowed_cameras_for_filter,
)
from frigate.api.defs.query.events_query_parameters import EventsQueryParams
from frigate.api.defs.request.chat_body import ChatCompletionRequest
from frigate.api.defs.response.chat_response import (
ChatCompletionResponse,
ChatMessageResponse,
ToolCall,
)
from frigate.api.defs.tags import Tags
from frigate.api.event import events
from frigate.genai.utils import build_assistant_message_for_conversation
logger = logging.getLogger(__name__)
router = APIRouter(tags=[Tags.chat])
def _chunk_content(content: str, chunk_size: int = 80) -> Generator[str, None, None]:
"""Yield content in word-aware chunks for streaming."""
if not content:
return
words = content.split(" ")
current: List[str] = []
current_len = 0
for w in words:
current.append(w)
current_len += len(w) + 1
if current_len >= chunk_size:
yield " ".join(current) + " "
current = []
current_len = 0
if current:
yield " ".join(current)
def _format_events_with_local_time(
events_list: List[Dict[str, Any]],
) -> List[Dict[str, Any]]:
"""Add human-readable local start/end times to each event for the LLM."""
result = []
for evt in events_list:
if not isinstance(evt, dict):
result.append(evt)
continue
copy_evt = dict(evt)
try:
start_ts = evt.get("start_time")
end_ts = evt.get("end_time")
if start_ts is not None:
dt_start = datetime.fromtimestamp(start_ts)
copy_evt["start_time_local"] = dt_start.strftime("%Y-%m-%d %I:%M:%S %p")
if end_ts is not None:
dt_end = datetime.fromtimestamp(end_ts)
copy_evt["end_time_local"] = dt_end.strftime("%Y-%m-%d %I:%M:%S %p")
except (TypeError, ValueError, OSError):
pass
result.append(copy_evt)
return result
class ToolExecuteRequest(BaseModel):
"""Request model for tool execution."""
tool_name: str
arguments: Dict[str, Any]
def get_tool_definitions() -> List[Dict[str, Any]]:
"""
Get OpenAI-compatible tool definitions for Frigate.
Returns a list of tool definitions that can be used with OpenAI-compatible
function calling APIs.
"""
return [
{
"type": "function",
"function": {
"name": "search_objects",
"description": (
"Search for detected objects in Frigate by camera, object label, time range, "
"zones, and other filters. Use this to answer questions about when "
"objects were detected, what objects appeared, or to find specific object detections. "
"An 'object' in Frigate represents a tracked detection (e.g., a person, package, car). "
"When the user asks about a specific name (person, delivery company, animal, etc.), "
"filter by sub_label only and do not set label."
),
"parameters": {
"type": "object",
"properties": {
"camera": {
"type": "string",
"description": "Camera name to filter by (optional).",
},
"label": {
"type": "string",
"description": "Object label to filter by (e.g., 'person', 'package', 'car').",
},
"sub_label": {
"type": "string",
"description": "Name of a person, delivery company, animal, etc. When filtering by a specific name, use only sub_label; do not set label.",
},
"after": {
"type": "string",
"description": "Start time in ISO 8601 format (e.g., '2024-01-01T00:00:00Z').",
},
"before": {
"type": "string",
"description": "End time in ISO 8601 format (e.g., '2024-01-01T23:59:59Z').",
},
"zones": {
"type": "array",
"items": {"type": "string"},
"description": "List of zone names to filter by.",
},
"limit": {
"type": "integer",
"description": "Maximum number of objects to return (default: 25).",
"default": 25,
},
},
},
"required": [],
},
},
{
"type": "function",
"function": {
"name": "get_live_context",
"description": (
"Get the current detection information for a camera: objects being tracked, "
"zones, timestamps. Use this to understand what is visible in the live view. "
"Call this when the user has included a live image (via include_live_image) or "
"when answering questions about what is happening right now on a specific camera."
),
"parameters": {
"type": "object",
"properties": {
"camera": {
"type": "string",
"description": "Camera name to get live context for.",
},
},
"required": ["camera"],
},
},
},
]
@router.get(
"/chat/tools",
dependencies=[Depends(allow_any_authenticated())],
summary="Get available tools",
description="Returns OpenAI-compatible tool definitions for function calling.",
)
def get_tools() -> JSONResponse:
"""Get list of available tools for LLM function calling."""
tools = get_tool_definitions()
return JSONResponse(content={"tools": tools})
async def _execute_search_objects(
arguments: Dict[str, Any],
allowed_cameras: List[str],
) -> JSONResponse:
"""
Execute the search_objects tool.
This searches for detected objects (events) in Frigate using the same
logic as the events API endpoint.
"""
# Parse after/before as server local time; convert to Unix timestamp
after = arguments.get("after")
before = arguments.get("before")
def _parse_as_local_timestamp(s: str):
s = s.replace("Z", "").strip()[:19]
dt = datetime.strptime(s, "%Y-%m-%dT%H:%M:%S")
return time.mktime(dt.timetuple())
if after:
try:
after = _parse_as_local_timestamp(after)
except (ValueError, AttributeError, TypeError):
logger.warning(f"Invalid 'after' timestamp format: {after}")
after = None
if before:
try:
before = _parse_as_local_timestamp(before)
except (ValueError, AttributeError, TypeError):
logger.warning(f"Invalid 'before' timestamp format: {before}")
before = None
# Convert zones array to comma-separated string if provided
zones = arguments.get("zones")
if isinstance(zones, list):
zones = ",".join(zones)
elif zones is None:
zones = "all"
# Build query parameters compatible with EventsQueryParams
query_params = EventsQueryParams(
cameras=arguments.get("camera", "all"),
labels=arguments.get("label", "all"),
sub_labels=arguments.get("sub_label", "all").lower(),
zones=zones,
zone=zones,
after=after,
before=before,
limit=arguments.get("limit", 25),
)
try:
# Call the events endpoint function directly
# The events function is synchronous and takes params and allowed_cameras
response = events(query_params, allowed_cameras)
# The response is already a JSONResponse with event data
# Return it as-is for the LLM
return response
except Exception as e:
logger.error(f"Error executing search_objects: {e}", exc_info=True)
return JSONResponse(
content={
"success": False,
"message": "Error searching objects",
},
status_code=500,
)
@router.post(
"/chat/execute",
dependencies=[Depends(allow_any_authenticated())],
summary="Execute a tool",
description="Execute a tool function call from an LLM.",
)
async def execute_tool(
body: ToolExecuteRequest = Body(...),
allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter),
) -> JSONResponse:
"""
Execute a tool function call.
This endpoint receives tool calls from LLMs and executes the corresponding
Frigate operations, returning results in a format the LLM can understand.
"""
tool_name = body.tool_name
arguments = body.arguments
logger.debug(f"Executing tool: {tool_name} with arguments: {arguments}")
if tool_name == "search_objects":
return await _execute_search_objects(arguments, allowed_cameras)
return JSONResponse(
content={
"success": False,
"message": f"Unknown tool: {tool_name}",
"tool": tool_name,
},
status_code=400,
)
async def _execute_get_live_context(
request: Request,
camera: str,
allowed_cameras: List[str],
) -> Dict[str, Any]:
if camera not in allowed_cameras:
return {
"error": f"Camera '{camera}' not found or access denied",
}
if camera not in request.app.frigate_config.cameras:
return {
"error": f"Camera '{camera}' not found",
}
try:
frame_processor = request.app.detected_frames_processor
camera_state = frame_processor.camera_states.get(camera)
if camera_state is None:
return {
"error": f"Camera '{camera}' state not available",
}
tracked_objects_dict = {}
with camera_state.current_frame_lock:
tracked_objects = camera_state.tracked_objects.copy()
frame_time = camera_state.current_frame_time
for obj_id, tracked_obj in tracked_objects.items():
obj_dict = tracked_obj.to_dict()
if obj_dict.get("frame_time") == frame_time:
tracked_objects_dict[obj_id] = {
"label": obj_dict.get("label"),
"zones": obj_dict.get("current_zones", []),
"sub_label": obj_dict.get("sub_label"),
"stationary": obj_dict.get("stationary", False),
}
return {
"camera": camera,
"timestamp": frame_time,
"detections": list(tracked_objects_dict.values()),
}
except Exception as e:
logger.error(f"Error executing get_live_context: {e}", exc_info=True)
return {
"error": "Error getting live context",
}
async def _get_live_frame_image_url(
request: Request,
camera: str,
allowed_cameras: List[str],
) -> Optional[str]:
"""
Fetch the current live frame for a camera as a base64 data URL.
Returns None if the frame cannot be retrieved. Used when include_live_image
is set to attach the image to the first user message.
"""
if (
camera not in allowed_cameras
or camera not in request.app.frigate_config.cameras
):
return None
try:
frame_processor = request.app.detected_frames_processor
if camera not in frame_processor.camera_states:
return None
frame = frame_processor.get_current_frame(camera, {})
if frame is None:
return None
height, width = frame.shape[:2]
max_dimension = 1024
if height > max_dimension or width > max_dimension:
scale = max_dimension / max(height, width)
frame = cv2.resize(
frame,
(int(width * scale), int(height * scale)),
interpolation=cv2.INTER_AREA,
)
_, img_encoded = cv2.imencode(".jpg", frame, [cv2.IMWRITE_JPEG_QUALITY, 85])
b64 = base64.b64encode(img_encoded.tobytes()).decode("utf-8")
return f"data:image/jpeg;base64,{b64}"
except Exception as e:
logger.debug("Failed to get live frame for %s: %s", camera, e)
return None
async def _execute_tool_internal(
tool_name: str,
arguments: Dict[str, Any],
request: Request,
allowed_cameras: List[str],
) -> Dict[str, Any]:
"""
Internal helper to execute a tool and return the result as a dict.
This is used by the chat completion endpoint to execute tools.
"""
if tool_name == "search_objects":
response = await _execute_search_objects(arguments, allowed_cameras)
try:
if hasattr(response, "body"):
body_str = response.body.decode("utf-8")
return json.loads(body_str)
elif hasattr(response, "content"):
return response.content
else:
return {}
except (json.JSONDecodeError, AttributeError) as e:
logger.warning(f"Failed to extract tool result: {e}")
return {"error": "Failed to parse tool result"}
elif tool_name == "get_live_context":
camera = arguments.get("camera")
if not camera:
logger.error(
"Tool get_live_context failed: camera parameter is required. "
"Arguments: %s",
json.dumps(arguments),
)
return {"error": "Camera parameter is required"}
return await _execute_get_live_context(request, camera, allowed_cameras)
else:
logger.error(
"Tool call failed: unknown tool %r. Expected one of: search_objects, get_live_context. "
"Arguments received: %s",
tool_name,
json.dumps(arguments),
)
return {"error": f"Unknown tool: {tool_name}"}
async def _execute_pending_tools(
pending_tool_calls: List[Dict[str, Any]],
request: Request,
allowed_cameras: List[str],
) -> tuple[List[ToolCall], List[Dict[str, Any]]]:
"""
Execute a list of tool calls; return (ToolCall list for API response, tool result dicts for conversation).
"""
tool_calls_out: List[ToolCall] = []
tool_results: List[Dict[str, Any]] = []
for tool_call in pending_tool_calls:
tool_name = tool_call["name"]
tool_args = tool_call.get("arguments") or {}
tool_call_id = tool_call["id"]
logger.debug(
f"Executing tool: {tool_name} (id: {tool_call_id}) with arguments: {json.dumps(tool_args, indent=2)}"
)
try:
tool_result = await _execute_tool_internal(
tool_name, tool_args, request, allowed_cameras
)
if isinstance(tool_result, dict) and tool_result.get("error"):
logger.error(
"Tool call %s (id: %s) returned error: %s. Arguments: %s",
tool_name,
tool_call_id,
tool_result.get("error"),
json.dumps(tool_args),
)
if tool_name == "search_objects" and isinstance(tool_result, list):
tool_result = _format_events_with_local_time(tool_result)
_keys = {
"id",
"camera",
"label",
"zones",
"start_time_local",
"end_time_local",
"sub_label",
"event_count",
}
tool_result = [
{k: evt[k] for k in _keys if k in evt}
for evt in tool_result
if isinstance(evt, dict)
]
result_content = (
json.dumps(tool_result)
if isinstance(tool_result, (dict, list))
else (tool_result if isinstance(tool_result, str) else str(tool_result))
)
tool_calls_out.append(
ToolCall(name=tool_name, arguments=tool_args, response=result_content)
)
tool_results.append(
{
"role": "tool",
"tool_call_id": tool_call_id,
"content": result_content,
}
)
except Exception as e:
logger.error(
"Error executing tool %s (id: %s): %s. Arguments: %s",
tool_name,
tool_call_id,
e,
json.dumps(tool_args),
exc_info=True,
)
error_content = json.dumps({"error": f"Tool execution failed: {str(e)}"})
tool_calls_out.append(
ToolCall(name=tool_name, arguments=tool_args, response=error_content)
)
tool_results.append(
{
"role": "tool",
"tool_call_id": tool_call_id,
"content": error_content,
}
)
return (tool_calls_out, tool_results)
@router.post(
"/chat/completion",
dependencies=[Depends(allow_any_authenticated())],
summary="Chat completion with tool calling",
description=(
"Send a chat message to the configured GenAI provider with tool calling support. "
"The LLM can call Frigate tools to answer questions about your cameras and events."
),
)
async def chat_completion(
request: Request,
body: ChatCompletionRequest = Body(...),
allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter),
):
"""
Chat completion endpoint with tool calling support.
This endpoint:
1. Gets the configured GenAI client
2. Gets tool definitions
3. Sends messages + tools to LLM
4. Handles tool_calls if present
5. Executes tools and sends results back to LLM
6. Repeats until final answer
7. Returns response to user
"""
genai_client = request.app.genai_manager.tool_client
if not genai_client:
return JSONResponse(
content={
"error": "GenAI is not configured. Please configure a GenAI provider in your Frigate config.",
},
status_code=400,
)
tools = get_tool_definitions()
conversation = []
current_datetime = datetime.now()
current_date_str = current_datetime.strftime("%Y-%m-%d")
current_time_str = current_datetime.strftime("%I:%M:%S %p")
cameras_info = []
config = request.app.frigate_config
for camera_id in allowed_cameras:
if camera_id not in config.cameras:
continue
camera_config = config.cameras[camera_id]
friendly_name = (
camera_config.friendly_name
if camera_config.friendly_name
else camera_id.replace("_", " ").title()
)
cameras_info.append(f" - {friendly_name} (ID: {camera_id})")
cameras_section = ""
if cameras_info:
cameras_section = (
"\n\nAvailable cameras:\n"
+ "\n".join(cameras_info)
+ "\n\nWhen users refer to cameras by their friendly name (e.g., 'Back Deck Camera'), use the corresponding camera ID (e.g., 'back_deck_cam') in tool calls."
)
live_image_note = ""
if body.include_live_image:
live_image_note = (
f"\n\nThe first user message includes a live image from camera "
f"'{body.include_live_image}'. Use get_live_context for that camera to get "
"current detection details (objects, zones) to aid in understanding the image."
)
system_prompt = f"""You are a helpful assistant for Frigate, a security camera NVR system. You help users answer questions about their cameras, detected objects, and events.
Current server local date and time: {current_date_str} at {current_time_str}
Do not start your response with phrases like "I will check...", "Let me see...", or "Let me look...". Answer directly.
Always present times to the user in the server's local timezone. When tool results include start_time_local and end_time_local, use those exact strings when listing or describing detection times—do not convert or invent timestamps. Do not use UTC or ISO format with Z for the user-facing answer unless the tool result only provides Unix timestamps without local time fields.
When users ask about "today", "yesterday", "this week", etc., use the current date above as reference.
When searching for objects or events, use ISO 8601 format for dates (e.g., {current_date_str}T00:00:00Z for the start of today).
Always be accurate with time calculations based on the current date provided.{cameras_section}{live_image_note}"""
conversation.append(
{
"role": "system",
"content": system_prompt,
}
)
first_user_message_seen = False
for msg in body.messages:
msg_dict = {
"role": msg.role,
"content": msg.content,
}
if msg.tool_call_id:
msg_dict["tool_call_id"] = msg.tool_call_id
if msg.name:
msg_dict["name"] = msg.name
if (
msg.role == "user"
and not first_user_message_seen
and body.include_live_image
):
first_user_message_seen = True
image_url = await _get_live_frame_image_url(
request, body.include_live_image, allowed_cameras
)
if image_url:
msg_dict["content"] = [
{"type": "text", "text": msg.content},
{"type": "image_url", "image_url": {"url": image_url}},
]
conversation.append(msg_dict)
tool_iterations = 0
tool_calls: List[ToolCall] = []
max_iterations = body.max_tool_iterations
logger.debug(
f"Starting chat completion with {len(conversation)} message(s), "
f"{len(tools)} tool(s) available, max_iterations={max_iterations}"
)
# True LLM streaming when client supports it and stream requested
if body.stream and hasattr(genai_client, "chat_with_tools_stream"):
stream_tool_calls: List[ToolCall] = []
stream_iterations = 0
async def stream_body_llm():
nonlocal conversation, stream_tool_calls, stream_iterations
while stream_iterations < max_iterations:
logger.debug(
f"Streaming LLM (iteration {stream_iterations + 1}/{max_iterations}) "
f"with {len(conversation)} message(s)"
)
async for event in genai_client.chat_with_tools_stream(
messages=conversation,
tools=tools if tools else None,
tool_choice="auto",
):
kind, value = event
if kind == "content_delta":
yield (
json.dumps({"type": "content", "delta": value}).encode(
"utf-8"
)
+ b"\n"
)
elif kind == "message":
msg = value
if msg.get("finish_reason") == "error":
yield (
json.dumps(
{
"type": "error",
"error": "An error occurred while processing your request.",
}
).encode("utf-8")
+ b"\n"
)
return
pending = msg.get("tool_calls")
if pending:
stream_iterations += 1
conversation.append(
build_assistant_message_for_conversation(
msg.get("content"), pending
)
)
executed_calls, tool_results = await _execute_pending_tools(
pending, request, allowed_cameras
)
stream_tool_calls.extend(executed_calls)
conversation.extend(tool_results)
yield (
json.dumps(
{
"type": "tool_calls",
"tool_calls": [
tc.model_dump() for tc in stream_tool_calls
],
}
).encode("utf-8")
+ b"\n"
)
break
else:
yield (json.dumps({"type": "done"}).encode("utf-8") + b"\n")
return
else:
yield json.dumps({"type": "done"}).encode("utf-8") + b"\n"
return StreamingResponse(
stream_body_llm(),
media_type="application/x-ndjson",
headers={"X-Accel-Buffering": "no"},
)
try:
while tool_iterations < max_iterations:
logger.debug(
f"Calling LLM (iteration {tool_iterations + 1}/{max_iterations}) "
f"with {len(conversation)} message(s) in conversation"
)
response = genai_client.chat_with_tools(
messages=conversation,
tools=tools if tools else None,
tool_choice="auto",
)
if response.get("finish_reason") == "error":
logger.error("GenAI client returned an error")
return JSONResponse(
content={
"error": "An error occurred while processing your request.",
},
status_code=500,
)
conversation.append(
build_assistant_message_for_conversation(
response.get("content"), response.get("tool_calls")
)
)
pending_tool_calls = response.get("tool_calls")
if not pending_tool_calls:
logger.debug(
f"Chat completion finished with final answer (iterations: {tool_iterations})"
)
final_content = response.get("content") or ""
if body.stream:
async def stream_body() -> Any:
if tool_calls:
yield (
json.dumps(
{
"type": "tool_calls",
"tool_calls": [
tc.model_dump() for tc in tool_calls
],
}
).encode("utf-8")
+ b"\n"
)
# Stream content in word-sized chunks for smooth UX
for part in _chunk_content(final_content):
yield (
json.dumps({"type": "content", "delta": part}).encode(
"utf-8"
)
+ b"\n"
)
yield json.dumps({"type": "done"}).encode("utf-8") + b"\n"
return StreamingResponse(
stream_body(),
media_type="application/x-ndjson",
)
return JSONResponse(
content=ChatCompletionResponse(
message=ChatMessageResponse(
role="assistant",
content=final_content,
tool_calls=None,
),
finish_reason=response.get("finish_reason", "stop"),
tool_iterations=tool_iterations,
tool_calls=tool_calls,
).model_dump(),
)
tool_iterations += 1
logger.debug(
f"Tool calls detected (iteration {tool_iterations}/{max_iterations}): "
f"{len(pending_tool_calls)} tool(s) to execute"
)
executed_calls, tool_results = await _execute_pending_tools(
pending_tool_calls, request, allowed_cameras
)
tool_calls.extend(executed_calls)
conversation.extend(tool_results)
logger.debug(
f"Added {len(tool_results)} tool result(s) to conversation. "
f"Continuing with next LLM call..."
)
logger.warning(
f"Max tool iterations ({max_iterations}) reached. Returning partial response."
)
return JSONResponse(
content=ChatCompletionResponse(
message=ChatMessageResponse(
role="assistant",
content="I reached the maximum number of tool call iterations. Please try rephrasing your question.",
tool_calls=None,
),
finish_reason="length",
tool_iterations=tool_iterations,
tool_calls=tool_calls,
).model_dump(),
)
except Exception as e:
logger.error(f"Error in chat completion: {e}", exc_info=True)
return JSONResponse(
content={
"error": "An error occurred while processing your request.",
},
status_code=500,
)

View File

@ -1,176 +0,0 @@
"""Debug replay API endpoints."""
import asyncio
import logging
from datetime import datetime
from fastapi import APIRouter, Depends, Request
from fastapi.responses import JSONResponse
from pydantic import BaseModel, Field
from frigate.api.auth import require_role
from frigate.api.defs.tags import Tags
logger = logging.getLogger(__name__)
router = APIRouter(tags=[Tags.app])
class DebugReplayStartBody(BaseModel):
"""Request body for starting a debug replay session."""
camera: str = Field(title="Source camera name")
start_time: float = Field(title="Start timestamp")
end_time: float = Field(title="End timestamp")
class DebugReplayStartResponse(BaseModel):
"""Response for starting a debug replay session."""
success: bool
replay_camera: str
class DebugReplayStatusResponse(BaseModel):
"""Response for debug replay status."""
active: bool
replay_camera: str | None = None
source_camera: str | None = None
start_time: float | None = None
end_time: float | None = None
live_ready: bool = False
class DebugReplayStopResponse(BaseModel):
"""Response for stopping a debug replay session."""
success: bool
@router.post(
"/debug_replay/start",
response_model=DebugReplayStartResponse,
dependencies=[Depends(require_role(["admin"]))],
summary="Start debug replay",
description="Start a debug replay session from camera recordings.",
)
async def start_debug_replay(request: Request, body: DebugReplayStartBody):
"""Start a debug replay session."""
replay_manager = request.app.replay_manager
if replay_manager.active:
return JSONResponse(
content={
"success": False,
"message": "A replay session is already active",
},
status_code=409,
)
try:
replay_camera = await asyncio.to_thread(
replay_manager.start,
source_camera=body.camera,
start_ts=body.start_time,
end_ts=body.end_time,
frigate_config=request.app.frigate_config,
config_publisher=request.app.config_publisher,
)
except ValueError:
logger.exception("Invalid parameters for debug replay start request")
return JSONResponse(
content={
"success": False,
"message": "Invalid debug replay request parameters",
},
status_code=400,
)
except RuntimeError:
logger.exception("Error while starting debug replay session")
return JSONResponse(
content={
"success": False,
"message": "An internal error occurred while starting debug replay",
},
status_code=500,
)
return DebugReplayStartResponse(
success=True,
replay_camera=replay_camera,
)
@router.get(
"/debug_replay/status",
response_model=DebugReplayStatusResponse,
dependencies=[Depends(require_role(["admin"]))],
summary="Get debug replay status",
description="Get the status of the current debug replay session.",
)
def get_debug_replay_status(request: Request):
"""Get the current replay session status."""
replay_manager = request.app.replay_manager
live_ready = False
replay_camera = replay_manager.replay_camera_name
if replay_manager.active and replay_camera:
frame_processor = request.app.detected_frames_processor
frame = frame_processor.get_current_frame(replay_camera)
if frame is not None:
frame_time = frame_processor.get_current_frame_time(replay_camera)
camera_config = request.app.frigate_config.cameras.get(replay_camera)
retry_interval = 10
if camera_config is not None:
retry_interval = float(camera_config.ffmpeg.retry_interval or 10)
live_ready = datetime.now().timestamp() <= frame_time + retry_interval
return DebugReplayStatusResponse(
active=replay_manager.active,
replay_camera=replay_camera,
source_camera=replay_manager.source_camera,
start_time=replay_manager.start_ts,
end_time=replay_manager.end_ts,
live_ready=live_ready,
)
@router.post(
"/debug_replay/stop",
response_model=DebugReplayStopResponse,
dependencies=[Depends(require_role(["admin"]))],
summary="Stop debug replay",
description="Stop the active debug replay session and clean up all artifacts.",
)
async def stop_debug_replay(request: Request):
"""Stop the active replay session."""
replay_manager = request.app.replay_manager
if not replay_manager.active:
return JSONResponse(
content={"success": False, "message": "No active replay session"},
status_code=400,
)
try:
await asyncio.to_thread(
replay_manager.stop,
frigate_config=request.app.frigate_config,
config_publisher=request.app.config_publisher,
)
except (ValueError, RuntimeError, OSError) as e:
logger.error("Error stopping replay: %s", e)
return JSONResponse(
content={
"success": False,
"message": "Failed to stop replay session due to an internal error.",
},
status_code=500,
)
return DebugReplayStopResponse(success=True)

View File

@ -1,7 +1,8 @@
from enum import Enum
from typing import Optional
from typing import Optional, Union
from pydantic import BaseModel
from pydantic.json_schema import SkipJsonSchema
class Extension(str, Enum):
@ -47,3 +48,15 @@ class MediaMjpegFeedQueryParams(BaseModel):
mask: Optional[int] = None
motion: Optional[int] = None
regions: Optional[int] = None
class MediaRecordingsSummaryQueryParams(BaseModel):
timezone: str = "utc"
cameras: Optional[str] = "all"
class MediaRecordingsAvailabilityQueryParams(BaseModel):
cameras: str = "all"
before: Union[float, SkipJsonSchema[None]] = None
after: Union[float, SkipJsonSchema[None]] = None
scale: int = 30

View File

@ -1,21 +0,0 @@
from typing import Optional, Union
from pydantic import BaseModel
from pydantic.json_schema import SkipJsonSchema
class MediaRecordingsSummaryQueryParams(BaseModel):
timezone: str = "utc"
cameras: Optional[str] = "all"
class MediaRecordingsAvailabilityQueryParams(BaseModel):
cameras: str = "all"
before: Union[float, SkipJsonSchema[None]] = None
after: Union[float, SkipJsonSchema[None]] = None
scale: int = 30
class RecordingsDeleteQueryParams(BaseModel):
keep: Optional[str] = None
cameras: Optional[str] = "all"

View File

@ -1,13 +1,12 @@
from typing import Any, Dict, List, Optional
from typing import Any, Dict, Optional
from pydantic import BaseModel, Field
from pydantic import BaseModel
class AppConfigSetBody(BaseModel):
requires_restart: int = 1
update_topic: str | None = None
config_data: Optional[Dict[str, Any]] = None
skip_save: bool = False
class AppPutPasswordBody(BaseModel):
@ -28,16 +27,3 @@ class AppPostLoginBody(BaseModel):
class AppPutRoleBody(BaseModel):
role: str
class MediaSyncBody(BaseModel):
dry_run: bool = Field(
default=True, description="If True, only report orphans without deleting them"
)
media_types: List[str] = Field(
default=["all"],
description="Types of media to sync: 'all', 'event_snapshots', 'event_thumbnails', 'review_thumbnails', 'previews', 'exports', 'recordings'",
)
force: bool = Field(
default=False, description="If True, bypass safety threshold checks"
)

View File

@ -1,45 +0,0 @@
"""Chat API request models."""
from typing import Optional
from pydantic import BaseModel, Field
class ChatMessage(BaseModel):
"""A single message in a chat conversation."""
role: str = Field(
description="Message role: 'user', 'assistant', 'system', or 'tool'"
)
content: str = Field(description="Message content")
tool_call_id: Optional[str] = Field(
default=None, description="For tool messages, the ID of the tool call"
)
name: Optional[str] = Field(
default=None, description="For tool messages, the tool name"
)
class ChatCompletionRequest(BaseModel):
"""Request for chat completion with tool calling."""
messages: list[ChatMessage] = Field(
description="List of messages in the conversation"
)
max_tool_iterations: int = Field(
default=5,
ge=1,
le=10,
description="Maximum number of tool call iterations (default: 5)",
)
include_live_image: Optional[str] = Field(
default=None,
description=(
"If set, the current live frame from this camera is attached to the first "
"user message as multimodal content. Use with get_live_context for detection info."
),
)
stream: bool = Field(
default=False,
description="If true, stream the final assistant response in the body as newline-delimited JSON.",
)

View File

@ -41,7 +41,6 @@ class EventsCreateBody(BaseModel):
duration: Optional[int] = 30
include_recording: Optional[bool] = True
draw: Optional[dict] = {}
pre_capture: Optional[int] = None
class EventsEndBody(BaseModel):

View File

@ -1,35 +0,0 @@
from typing import Optional
from pydantic import BaseModel, Field
class ExportCaseCreateBody(BaseModel):
"""Request body for creating a new export case."""
name: str = Field(max_length=100, description="Friendly name of the export case")
description: Optional[str] = Field(
default=None, description="Optional description of the export case"
)
class ExportCaseUpdateBody(BaseModel):
"""Request body for updating an existing export case."""
name: Optional[str] = Field(
default=None,
max_length=100,
description="Updated friendly name of the export case",
)
description: Optional[str] = Field(
default=None, description="Updated description of the export case"
)
class ExportCaseAssignBody(BaseModel):
"""Request body for assigning or unassigning an export to a case."""
export_case_id: Optional[str] = Field(
default=None,
max_length=30,
description="Case ID to assign to the export, or null to unassign",
)

View File

@ -3,47 +3,18 @@ from typing import Optional, Union
from pydantic import BaseModel, Field
from pydantic.json_schema import SkipJsonSchema
from frigate.record.export import PlaybackSourceEnum
from frigate.record.export import (
PlaybackFactorEnum,
PlaybackSourceEnum,
)
class ExportRecordingsBody(BaseModel):
playback: PlaybackFactorEnum = Field(
default=PlaybackFactorEnum.realtime, title="Playback factor"
)
source: PlaybackSourceEnum = Field(
default=PlaybackSourceEnum.recordings, title="Playback source"
)
name: Optional[str] = Field(title="Friendly name", default=None, max_length=256)
image_path: Union[str, SkipJsonSchema[None]] = None
export_case_id: Optional[str] = Field(
default=None,
title="Export case ID",
max_length=30,
description="ID of the export case to assign this export to",
)
class ExportRecordingsCustomBody(BaseModel):
source: PlaybackSourceEnum = Field(
default=PlaybackSourceEnum.recordings, title="Playback source"
)
name: str = Field(title="Friendly name", default=None, max_length=256)
image_path: Union[str, SkipJsonSchema[None]] = None
export_case_id: Optional[str] = Field(
default=None,
title="Export case ID",
max_length=30,
description="ID of the export case to assign this export to",
)
ffmpeg_input_args: Optional[str] = Field(
default=None,
title="FFmpeg input arguments",
description="Custom FFmpeg input arguments. If not provided, defaults to timelapse input args.",
)
ffmpeg_output_args: Optional[str] = Field(
default=None,
title="FFmpeg output arguments",
description="Custom FFmpeg output arguments. If not provided, defaults to timelapse output args.",
)
cpu_fallback: bool = Field(
default=False,
title="CPU Fallback",
description="If true, retry export without hardware acceleration if the initial export fails.",
)

View File

@ -1,54 +0,0 @@
"""Chat API response models."""
from typing import Any, Optional
from pydantic import BaseModel, Field
class ToolCallInvocation(BaseModel):
"""A tool call requested by the LLM (before execution)."""
id: str = Field(description="Unique identifier for this tool call")
name: str = Field(description="Tool name to call")
arguments: dict[str, Any] = Field(description="Arguments for the tool call")
class ChatMessageResponse(BaseModel):
"""A message in the chat response."""
role: str = Field(description="Message role")
content: Optional[str] = Field(
default=None, description="Message content (None if tool calls present)"
)
tool_calls: Optional[list[ToolCallInvocation]] = Field(
default=None, description="Tool calls if LLM wants to call tools"
)
class ToolCall(BaseModel):
"""A tool that was executed during the completion, with its response."""
name: str = Field(description="Tool name that was called")
arguments: dict[str, Any] = Field(
default_factory=dict, description="Arguments passed to the tool"
)
response: str = Field(
default="",
description="The response or result returned from the tool execution",
)
class ChatCompletionResponse(BaseModel):
"""Response from chat completion."""
message: ChatMessageResponse = Field(description="The assistant's message")
finish_reason: str = Field(
description="Reason generation stopped: 'stop', 'tool_calls', 'length', 'error'"
)
tool_iterations: int = Field(
default=0, description="Number of tool call iterations performed"
)
tool_calls: list[ToolCall] = Field(
default_factory=list,
description="List of tool calls that were executed during this completion",
)

View File

@ -1,22 +0,0 @@
from typing import List, Optional
from pydantic import BaseModel, Field
class ExportCaseModel(BaseModel):
"""Model representing a single export case."""
id: str = Field(description="Unique identifier for the export case")
name: str = Field(description="Friendly name of the export case")
description: Optional[str] = Field(
default=None, description="Optional description of the export case"
)
created_at: float = Field(
description="Unix timestamp when the export case was created"
)
updated_at: float = Field(
description="Unix timestamp when the export case was last updated"
)
ExportCasesResponse = List[ExportCaseModel]

View File

@ -15,9 +15,6 @@ class ExportModel(BaseModel):
in_progress: bool = Field(
description="Whether the export is currently being processed"
)
export_case_id: Optional[str] = Field(
default=None, description="ID of the export case this export belongs to"
)
class StartExportResponse(BaseModel):

View File

@ -3,16 +3,13 @@ from enum import Enum
class Tags(Enum):
app = "App"
auth = "Auth"
camera = "Camera"
chat = "Chat"
events = "Events"
export = "Export"
classification = "Classification"
preview = "Preview"
logs = "Logs"
media = "Media"
motion_search = "Motion Search"
notifications = "Notifications"
preview = "Preview"
recordings = "Recordings"
review = "Review"
export = "Export"
events = "Events"
classification = "Classification"
auth = "Auth"

View File

@ -1782,7 +1782,6 @@ def create_event(
body.duration,
"api",
body.draw,
body.pre_capture,
),
EventMetadataTypeEnum.manual_event_create.value,
)

View File

@ -4,10 +4,10 @@ import logging
import random
import string
from pathlib import Path
from typing import List, Optional
from typing import List
import psutil
from fastapi import APIRouter, Depends, Query, Request
from fastapi import APIRouter, Depends, Request
from fastapi.responses import JSONResponse
from pathvalidate import sanitize_filepath
from peewee import DoesNotExist
@ -19,20 +19,8 @@ from frigate.api.auth import (
require_camera_access,
require_role,
)
from frigate.api.defs.request.export_case_body import (
ExportCaseAssignBody,
ExportCaseCreateBody,
ExportCaseUpdateBody,
)
from frigate.api.defs.request.export_recordings_body import (
ExportRecordingsBody,
ExportRecordingsCustomBody,
)
from frigate.api.defs.request.export_recordings_body import ExportRecordingsBody
from frigate.api.defs.request.export_rename_body import ExportRenameBody
from frigate.api.defs.response.export_case_response import (
ExportCaseModel,
ExportCasesResponse,
)
from frigate.api.defs.response.export_response import (
ExportModel,
ExportsResponse,
@ -41,9 +29,9 @@ from frigate.api.defs.response.export_response import (
from frigate.api.defs.response.generic_response import GenericResponse
from frigate.api.defs.tags import Tags
from frigate.const import CLIPS_DIR, EXPORT_DIR
from frigate.models import Export, ExportCase, Previews, Recordings
from frigate.models import Export, Previews, Recordings
from frigate.record.export import (
DEFAULT_TIME_LAPSE_FFMPEG_ARGS,
PlaybackFactorEnum,
PlaybackSourceEnum,
RecordingExporter,
)
@ -64,182 +52,17 @@ router = APIRouter(tags=[Tags.export])
)
def get_exports(
allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter),
export_case_id: Optional[str] = None,
cameras: Optional[str] = Query(default="all"),
start_date: Optional[float] = None,
end_date: Optional[float] = None,
):
query = Export.select().where(Export.camera << allowed_cameras)
if export_case_id is not None:
if export_case_id == "unassigned":
query = query.where(Export.export_case.is_null(True))
else:
query = query.where(Export.export_case == export_case_id)
if cameras and cameras != "all":
requested = set(cameras.split(","))
filtered_cameras = list(requested.intersection(allowed_cameras))
if not filtered_cameras:
return JSONResponse(content=[])
query = query.where(Export.camera << filtered_cameras)
if start_date is not None:
query = query.where(Export.date >= start_date)
if end_date is not None:
query = query.where(Export.date <= end_date)
exports = query.order_by(Export.date.desc()).dicts().iterator()
exports = (
Export.select()
.where(Export.camera << allowed_cameras)
.order_by(Export.date.desc())
.dicts()
.iterator()
)
return JSONResponse(content=[e for e in exports])
@router.get(
"/cases",
response_model=ExportCasesResponse,
dependencies=[Depends(allow_any_authenticated())],
summary="Get export cases",
description="Gets all export cases from the database.",
)
def get_export_cases():
cases = (
ExportCase.select().order_by(ExportCase.created_at.desc()).dicts().iterator()
)
return JSONResponse(content=[c for c in cases])
@router.post(
"/cases",
response_model=ExportCaseModel,
dependencies=[Depends(require_role(["admin"]))],
summary="Create export case",
description="Creates a new export case.",
)
def create_export_case(body: ExportCaseCreateBody):
case = ExportCase.create(
id="".join(random.choices(string.ascii_lowercase + string.digits, k=12)),
name=body.name,
description=body.description,
created_at=Path().stat().st_mtime,
updated_at=Path().stat().st_mtime,
)
return JSONResponse(content=model_to_dict(case))
@router.get(
"/cases/{case_id}",
response_model=ExportCaseModel,
dependencies=[Depends(allow_any_authenticated())],
summary="Get a single export case",
description="Gets a specific export case by ID.",
)
def get_export_case(case_id: str):
try:
case = ExportCase.get(ExportCase.id == case_id)
return JSONResponse(content=model_to_dict(case))
except DoesNotExist:
return JSONResponse(
content={"success": False, "message": "Export case not found"},
status_code=404,
)
@router.patch(
"/cases/{case_id}",
response_model=GenericResponse,
dependencies=[Depends(require_role(["admin"]))],
summary="Update export case",
description="Updates an existing export case.",
)
def update_export_case(case_id: str, body: ExportCaseUpdateBody):
try:
case = ExportCase.get(ExportCase.id == case_id)
except DoesNotExist:
return JSONResponse(
content={"success": False, "message": "Export case not found"},
status_code=404,
)
if body.name is not None:
case.name = body.name
if body.description is not None:
case.description = body.description
case.save()
return JSONResponse(
content={"success": True, "message": "Successfully updated export case."}
)
@router.delete(
"/cases/{case_id}",
response_model=GenericResponse,
dependencies=[Depends(require_role(["admin"]))],
summary="Delete export case",
description="""Deletes an export case.\n Exports that reference this case will have their export_case set to null.\n """,
)
def delete_export_case(case_id: str):
try:
case = ExportCase.get(ExportCase.id == case_id)
except DoesNotExist:
return JSONResponse(
content={"success": False, "message": "Export case not found"},
status_code=404,
)
# Unassign exports from this case but keep the exports themselves
Export.update(export_case=None).where(Export.export_case == case).execute()
case.delete_instance()
return JSONResponse(
content={"success": True, "message": "Successfully deleted export case."}
)
@router.patch(
"/export/{export_id}/case",
response_model=GenericResponse,
dependencies=[Depends(require_role(["admin"]))],
summary="Assign export to case",
description=(
"Assigns an export to a case, or unassigns it if export_case_id is null."
),
)
async def assign_export_case(
export_id: str,
body: ExportCaseAssignBody,
request: Request,
):
try:
export: Export = Export.get(Export.id == export_id)
await require_camera_access(export.camera, request=request)
except DoesNotExist:
return JSONResponse(
content={"success": False, "message": "Export not found."},
status_code=404,
)
if body.export_case_id is not None:
try:
ExportCase.get(ExportCase.id == body.export_case_id)
except DoesNotExist:
return JSONResponse(
content={"success": False, "message": "Export case not found."},
status_code=404,
)
export.export_case = body.export_case_id
else:
export.export_case = None
export.save()
return JSONResponse(
content={"success": True, "message": "Successfully updated export case."}
)
@router.post(
"/export/{camera_name}/start/{start_time}/end/{end_time}",
response_model=StartExportResponse,
@ -265,20 +88,11 @@ def export_recording(
status_code=404,
)
playback_factor = body.playback
playback_source = body.source
friendly_name = body.name
existing_image = sanitize_filepath(body.image_path) if body.image_path else None
export_case_id = body.export_case_id
if export_case_id is not None:
try:
ExportCase.get(ExportCase.id == export_case_id)
except DoesNotExist:
return JSONResponse(
content={"success": False, "message": "Export case not found"},
status_code=404,
)
# Ensure that existing_image is a valid path
if existing_image and not existing_image.startswith(CLIPS_DIR):
return JSONResponse(
@ -337,12 +151,16 @@ def export_recording(
existing_image,
int(start_time),
int(end_time),
(
PlaybackFactorEnum[playback_factor]
if playback_factor in PlaybackFactorEnum.__members__.values()
else PlaybackFactorEnum.realtime
),
(
PlaybackSourceEnum[playback_source]
if playback_source in PlaybackSourceEnum.__members__.values()
else PlaybackSourceEnum.recordings
),
export_case_id,
)
exporter.start()
return JSONResponse(
@ -453,138 +271,6 @@ async def export_delete(event_id: str, request: Request):
)
@router.post(
"/export/custom/{camera_name}/start/{start_time}/end/{end_time}",
response_model=StartExportResponse,
dependencies=[Depends(require_camera_access)],
summary="Start custom recording export",
description="""Starts an export of a recording for the specified time range using custom FFmpeg arguments.
The export can be from recordings or preview footage. Returns the export ID if
successful, or an error message if the camera is invalid or no recordings/previews
are found for the time range. If ffmpeg_input_args and ffmpeg_output_args are not provided,
defaults to timelapse export settings.""",
)
def export_recording_custom(
request: Request,
camera_name: str,
start_time: float,
end_time: float,
body: ExportRecordingsCustomBody,
):
if not camera_name or not request.app.frigate_config.cameras.get(camera_name):
return JSONResponse(
content=(
{"success": False, "message": f"{camera_name} is not a valid camera."}
),
status_code=404,
)
playback_source = body.source
friendly_name = body.name
existing_image = sanitize_filepath(body.image_path) if body.image_path else None
ffmpeg_input_args = body.ffmpeg_input_args
ffmpeg_output_args = body.ffmpeg_output_args
cpu_fallback = body.cpu_fallback
export_case_id = body.export_case_id
if export_case_id is not None:
try:
ExportCase.get(ExportCase.id == export_case_id)
except DoesNotExist:
return JSONResponse(
content={"success": False, "message": "Export case not found"},
status_code=404,
)
# Ensure that existing_image is a valid path
if existing_image and not existing_image.startswith(CLIPS_DIR):
return JSONResponse(
content=({"success": False, "message": "Invalid image path"}),
status_code=400,
)
if playback_source == "recordings":
recordings_count = (
Recordings.select()
.where(
Recordings.start_time.between(start_time, end_time)
| Recordings.end_time.between(start_time, end_time)
| (
(start_time > Recordings.start_time)
& (end_time < Recordings.end_time)
)
)
.where(Recordings.camera == camera_name)
.count()
)
if recordings_count <= 0:
return JSONResponse(
content=(
{"success": False, "message": "No recordings found for time range"}
),
status_code=400,
)
else:
previews_count = (
Previews.select()
.where(
Previews.start_time.between(start_time, end_time)
| Previews.end_time.between(start_time, end_time)
| ((start_time > Previews.start_time) & (end_time < Previews.end_time))
)
.where(Previews.camera == camera_name)
.count()
)
if not is_current_hour(start_time) and previews_count <= 0:
return JSONResponse(
content=(
{"success": False, "message": "No previews found for time range"}
),
status_code=400,
)
export_id = f"{camera_name}_{''.join(random.choices(string.ascii_lowercase + string.digits, k=6))}"
# Set default values if not provided (timelapse defaults)
if ffmpeg_input_args is None:
ffmpeg_input_args = ""
if ffmpeg_output_args is None:
ffmpeg_output_args = DEFAULT_TIME_LAPSE_FFMPEG_ARGS
exporter = RecordingExporter(
request.app.frigate_config,
export_id,
camera_name,
friendly_name,
existing_image,
int(start_time),
int(end_time),
(
PlaybackSourceEnum[playback_source]
if playback_source in PlaybackSourceEnum.__members__.values()
else PlaybackSourceEnum.recordings
),
export_case_id,
ffmpeg_input_args,
ffmpeg_output_args,
cpu_fallback,
)
exporter.start()
return JSONResponse(
content=(
{
"success": True,
"message": "Starting export of recording.",
"export_id": export_id,
}
),
status_code=200,
)
@router.get(
"/exports/{export_id}",
response_model=ExportModel,

View File

@ -16,16 +16,12 @@ from frigate.api import app as main_app
from frigate.api import (
auth,
camera,
chat,
classification,
debug_replay,
event,
export,
media,
motion_search,
notification,
preview,
record,
review,
)
from frigate.api.auth import get_jwt_secret, limiter, require_admin_by_default
@ -34,9 +30,7 @@ from frigate.comms.event_metadata_updater import (
)
from frigate.config import FrigateConfig
from frigate.config.camera.updater import CameraConfigUpdatePublisher
from frigate.debug_replay import DebugReplayManager
from frigate.embeddings import EmbeddingsContext
from frigate.genai import GenAIClientManager
from frigate.ptz.onvif import OnvifController
from frigate.stats.emitter import StatsEmitter
from frigate.storage import StorageMaintainer
@ -68,7 +62,6 @@ def create_fastapi_app(
stats_emitter: StatsEmitter,
event_metadata_updater: EventMetadataPublisher,
config_publisher: CameraConfigUpdatePublisher,
replay_manager: DebugReplayManager,
enforce_default_admin: bool = True,
):
logger.info("Starting FastAPI app")
@ -127,7 +120,6 @@ def create_fastapi_app(
# Order of include_router matters: https://fastapi.tiangolo.com/tutorial/path-params/#order-matters
app.include_router(auth.router)
app.include_router(camera.router)
app.include_router(chat.router)
app.include_router(classification.router)
app.include_router(review.router)
app.include_router(main_app.router)
@ -136,12 +128,8 @@ def create_fastapi_app(
app.include_router(export.router)
app.include_router(event.router)
app.include_router(media.router)
app.include_router(motion_search.router)
app.include_router(record.router)
app.include_router(debug_replay.router)
# App Properties
app.frigate_config = frigate_config
app.genai_manager = GenAIClientManager(frigate_config)
app.embeddings = embeddings
app.detected_frames_processor = detected_frames_processor
app.storage_maintainer = storage_maintainer
@ -150,7 +138,6 @@ def create_fastapi_app(
app.stats_emitter = stats_emitter
app.event_metadata_updater = event_metadata_updater
app.config_publisher = config_publisher
app.replay_manager = replay_manager
if frigate_config.auth.enabled:
secret = get_jwt_secret()

View File

@ -8,8 +8,9 @@ import os
import subprocess as sp
import time
from datetime import datetime, timedelta, timezone
from functools import reduce
from pathlib import Path as FilePath
from typing import Any
from typing import Any, List
from urllib.parse import unquote
import cv2
@ -18,19 +19,21 @@ import pytz
from fastapi import APIRouter, Depends, Path, Query, Request, Response
from fastapi.responses import FileResponse, JSONResponse, StreamingResponse
from pathvalidate import sanitize_filename
from peewee import DoesNotExist, fn
from peewee import DoesNotExist, fn, operator
from tzlocal import get_localzone_name
from frigate.api.auth import (
allow_any_authenticated,
get_allowed_cameras_for_filter,
require_camera_access,
require_role,
)
from frigate.api.defs.query.media_query_parameters import (
Extension,
MediaEventsSnapshotQueryParams,
MediaLatestFrameQueryParams,
MediaMjpegFeedQueryParams,
MediaRecordingsAvailabilityQueryParams,
MediaRecordingsSummaryQueryParams,
)
from frigate.api.defs.tags import Tags
from frigate.camera.state import CameraState
@ -41,12 +44,13 @@ from frigate.const import (
INSTALL_DIR,
MAX_SEGMENT_DURATION,
PREVIEW_FRAME_TYPE,
RECORD_DIR,
)
from frigate.models import Event, Previews, Recordings, Regions, ReviewSegment
from frigate.output.preview import get_most_recent_preview_frame
from frigate.track.object_processing import TrackedObjectProcessor
from frigate.util.file import get_event_thumbnail_bytes
from frigate.util.image import get_image_from_recording
from frigate.util.time import get_dst_transitions
logger = logging.getLogger(__name__)
@ -127,9 +131,7 @@ async def camera_ptz_info(request: Request, camera_name: str):
@router.get(
"/{camera_name}/latest.{extension}",
dependencies=[Depends(require_camera_access)],
description="Returns the latest frame from the specified camera in the requested format (jpg, png, webp). Falls back to preview frames if the camera is offline.",
"/{camera_name}/latest.{extension}", dependencies=[Depends(require_camera_access)]
)
async def latest_frame(
request: Request,
@ -163,37 +165,20 @@ async def latest_frame(
or 10
)
is_offline = False
if frame is None or datetime.now().timestamp() > (
frame_processor.get_current_frame_time(camera_name) + retry_interval
):
last_frame_time = frame_processor.get_current_frame_time(camera_name)
preview_path = get_most_recent_preview_frame(
camera_name, before=last_frame_time
)
if preview_path:
logger.debug(f"Using most recent preview frame for {camera_name}")
frame = cv2.imread(preview_path, cv2.IMREAD_UNCHANGED)
if frame is not None:
is_offline = True
if frame is None or not is_offline:
logger.debug(
f"No live or preview frame available for {camera_name}. Using error image."
if request.app.camera_error_image is None:
error_image = glob.glob(
os.path.join(INSTALL_DIR, "frigate/images/camera-error.jpg")
)
if request.app.camera_error_image is None:
error_image = glob.glob(
os.path.join(INSTALL_DIR, "frigate/images/camera-error.jpg")
if len(error_image) > 0:
request.app.camera_error_image = cv2.imread(
error_image[0], cv2.IMREAD_UNCHANGED
)
if len(error_image) > 0:
request.app.camera_error_image = cv2.imread(
error_image[0], cv2.IMREAD_UNCHANGED
)
frame = request.app.camera_error_image
frame = request.app.camera_error_image
height = int(params.height or str(frame.shape[0]))
width = int(height * frame.shape[1] / frame.shape[0])
@ -215,18 +200,14 @@ async def latest_frame(
frame = cv2.resize(frame, dsize=(width, height), interpolation=cv2.INTER_AREA)
_, img = cv2.imencode(f".{extension.value}", frame, quality_params)
headers = {
"Cache-Control": "no-store" if not params.store else "private, max-age=60",
}
if is_offline:
headers["X-Frigate-Offline"] = "true"
return Response(
content=img.tobytes(),
media_type=extension.get_mime_type(),
headers=headers,
headers={
"Cache-Control": "no-store"
if not params.store
else "private, max-age=60",
},
)
elif (
camera_name == "birdseye"
@ -416,6 +397,333 @@ async def submit_recording_snapshot_to_plus(
)
@router.get("/recordings/storage", dependencies=[Depends(allow_any_authenticated())])
def get_recordings_storage_usage(request: Request):
recording_stats = request.app.stats_emitter.get_latest_stats()["service"][
"storage"
][RECORD_DIR]
if not recording_stats:
return JSONResponse({})
total_mb = recording_stats["total"]
camera_usages: dict[str, dict] = (
request.app.storage_maintainer.calculate_camera_usages()
)
for camera_name in camera_usages.keys():
if camera_usages.get(camera_name, {}).get("usage"):
camera_usages[camera_name]["usage_percent"] = (
camera_usages.get(camera_name, {}).get("usage", 0) / total_mb
) * 100
return JSONResponse(content=camera_usages)
@router.get("/recordings/summary", dependencies=[Depends(allow_any_authenticated())])
def all_recordings_summary(
request: Request,
params: MediaRecordingsSummaryQueryParams = Depends(),
allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter),
):
"""Returns true/false by day indicating if recordings exist"""
cameras = params.cameras
if cameras != "all":
requested = set(unquote(cameras).split(","))
filtered = requested.intersection(allowed_cameras)
if not filtered:
return JSONResponse(content={})
camera_list = list(filtered)
else:
camera_list = allowed_cameras
time_range_query = (
Recordings.select(
fn.MIN(Recordings.start_time).alias("min_time"),
fn.MAX(Recordings.start_time).alias("max_time"),
)
.where(Recordings.camera << camera_list)
.dicts()
.get()
)
min_time = time_range_query.get("min_time")
max_time = time_range_query.get("max_time")
if min_time is None or max_time is None:
return JSONResponse(content={})
dst_periods = get_dst_transitions(params.timezone, min_time, max_time)
days: dict[str, bool] = {}
for period_start, period_end, period_offset in dst_periods:
hours_offset = int(period_offset / 60 / 60)
minutes_offset = int(period_offset / 60 - hours_offset * 60)
period_hour_modifier = f"{hours_offset} hour"
period_minute_modifier = f"{minutes_offset} minute"
period_query = (
Recordings.select(
fn.strftime(
"%Y-%m-%d",
fn.datetime(
Recordings.start_time,
"unixepoch",
period_hour_modifier,
period_minute_modifier,
),
).alias("day")
)
.where(
(Recordings.camera << camera_list)
& (Recordings.end_time >= period_start)
& (Recordings.start_time <= period_end)
)
.group_by(
fn.strftime(
"%Y-%m-%d",
fn.datetime(
Recordings.start_time,
"unixepoch",
period_hour_modifier,
period_minute_modifier,
),
)
)
.order_by(Recordings.start_time.desc())
.namedtuples()
)
for g in period_query:
days[g.day] = True
return JSONResponse(content=dict(sorted(days.items())))
@router.get(
"/{camera_name}/recordings/summary", dependencies=[Depends(require_camera_access)]
)
async def recordings_summary(camera_name: str, timezone: str = "utc"):
"""Returns hourly summary for recordings of given camera"""
time_range_query = (
Recordings.select(
fn.MIN(Recordings.start_time).alias("min_time"),
fn.MAX(Recordings.start_time).alias("max_time"),
)
.where(Recordings.camera == camera_name)
.dicts()
.get()
)
min_time = time_range_query.get("min_time")
max_time = time_range_query.get("max_time")
days: dict[str, dict] = {}
if min_time is None or max_time is None:
return JSONResponse(content=list(days.values()))
dst_periods = get_dst_transitions(timezone, min_time, max_time)
for period_start, period_end, period_offset in dst_periods:
hours_offset = int(period_offset / 60 / 60)
minutes_offset = int(period_offset / 60 - hours_offset * 60)
period_hour_modifier = f"{hours_offset} hour"
period_minute_modifier = f"{minutes_offset} minute"
recording_groups = (
Recordings.select(
fn.strftime(
"%Y-%m-%d %H",
fn.datetime(
Recordings.start_time,
"unixepoch",
period_hour_modifier,
period_minute_modifier,
),
).alias("hour"),
fn.SUM(Recordings.duration).alias("duration"),
fn.SUM(Recordings.motion).alias("motion"),
fn.SUM(Recordings.objects).alias("objects"),
)
.where(
(Recordings.camera == camera_name)
& (Recordings.end_time >= period_start)
& (Recordings.start_time <= period_end)
)
.group_by((Recordings.start_time + period_offset).cast("int") / 3600)
.order_by(Recordings.start_time.desc())
.namedtuples()
)
event_groups = (
Event.select(
fn.strftime(
"%Y-%m-%d %H",
fn.datetime(
Event.start_time,
"unixepoch",
period_hour_modifier,
period_minute_modifier,
),
).alias("hour"),
fn.COUNT(Event.id).alias("count"),
)
.where(Event.camera == camera_name, Event.has_clip)
.where(
(Event.start_time >= period_start) & (Event.start_time <= period_end)
)
.group_by((Event.start_time + period_offset).cast("int") / 3600)
.namedtuples()
)
event_map = {g.hour: g.count for g in event_groups}
for recording_group in recording_groups:
parts = recording_group.hour.split()
hour = parts[1]
day = parts[0]
events_count = event_map.get(recording_group.hour, 0)
hour_data = {
"hour": hour,
"events": events_count,
"motion": recording_group.motion,
"objects": recording_group.objects,
"duration": round(recording_group.duration),
}
if day in days:
# merge counts if already present (edge-case at DST boundary)
days[day]["events"] += events_count or 0
days[day]["hours"].append(hour_data)
else:
days[day] = {
"events": events_count or 0,
"hours": [hour_data],
"day": day,
}
return JSONResponse(content=list(days.values()))
@router.get("/{camera_name}/recordings", dependencies=[Depends(require_camera_access)])
async def recordings(
camera_name: str,
after: float = (datetime.now() - timedelta(hours=1)).timestamp(),
before: float = datetime.now().timestamp(),
):
"""Return specific camera recordings between the given 'after'/'end' times. If not provided the last hour will be used"""
recordings = (
Recordings.select(
Recordings.id,
Recordings.start_time,
Recordings.end_time,
Recordings.segment_size,
Recordings.motion,
Recordings.objects,
Recordings.duration,
)
.where(
Recordings.camera == camera_name,
Recordings.end_time >= after,
Recordings.start_time <= before,
)
.order_by(Recordings.start_time)
.dicts()
.iterator()
)
return JSONResponse(content=list(recordings))
@router.get(
"/recordings/unavailable",
response_model=list[dict],
dependencies=[Depends(allow_any_authenticated())],
)
async def no_recordings(
request: Request,
params: MediaRecordingsAvailabilityQueryParams = Depends(),
allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter),
):
"""Get time ranges with no recordings."""
cameras = params.cameras
if cameras != "all":
requested = set(unquote(cameras).split(","))
filtered = requested.intersection(allowed_cameras)
if not filtered:
return JSONResponse(content=[])
cameras = ",".join(filtered)
else:
cameras = allowed_cameras
before = params.before or datetime.datetime.now().timestamp()
after = (
params.after
or (datetime.datetime.now() - datetime.timedelta(hours=1)).timestamp()
)
scale = params.scale
clauses = [(Recordings.end_time >= after) & (Recordings.start_time <= before)]
if cameras != "all":
camera_list = cameras.split(",")
clauses.append((Recordings.camera << camera_list))
else:
camera_list = allowed_cameras
# Get recording start times
data: list[Recordings] = (
Recordings.select(Recordings.start_time, Recordings.end_time)
.where(reduce(operator.and_, clauses))
.order_by(Recordings.start_time.asc())
.dicts()
.iterator()
)
# Convert recordings to list of (start, end) tuples
recordings = [(r["start_time"], r["end_time"]) for r in data]
# Iterate through time segments and check if each has any recording
no_recording_segments = []
current = after
current_gap_start = None
while current < before:
segment_end = min(current + scale, before)
# Check if this segment overlaps with any recording
has_recording = any(
rec_start < segment_end and rec_end > current
for rec_start, rec_end in recordings
)
if not has_recording:
# This segment has no recordings
if current_gap_start is None:
current_gap_start = current # Start a new gap
else:
# This segment has recordings
if current_gap_start is not None:
# End the current gap and append it
no_recording_segments.append(
{"start_time": int(current_gap_start), "end_time": int(current)}
)
current_gap_start = None
current = segment_end
# Append the last gap if it exists
if current_gap_start is not None:
no_recording_segments.append(
{"start_time": int(current_gap_start), "end_time": int(before)}
)
return JSONResponse(content=no_recording_segments)
@router.get(
"/{camera_name}/start/{start_ts}/end/{end_ts}/clip.mp4",
dependencies=[Depends(require_camera_access)],
@ -738,7 +1046,6 @@ async def event_snapshot(
):
event_complete = False
jpg_bytes = None
frame_time = 0
try:
event = Event.get(Event.id == event_id, Event.end_time != None)
event_complete = True
@ -763,7 +1070,7 @@ async def event_snapshot(
if event_id in camera_state.tracked_objects:
tracked_obj = camera_state.tracked_objects.get(event_id)
if tracked_obj is not None:
jpg_bytes, frame_time = tracked_obj.get_img_bytes(
jpg_bytes = tracked_obj.get_img_bytes(
ext="jpg",
timestamp=params.timestamp,
bounding_box=params.bbox,
@ -792,7 +1099,6 @@ async def event_snapshot(
headers = {
"Content-Type": "image/jpeg",
"Cache-Control": "private, max-age=31536000" if event_complete else "no-store",
"X-Frame-Time": str(frame_time),
}
if params.download:
@ -1006,23 +1312,6 @@ def grid_snapshot(
)
@router.delete(
"/{camera_name}/region_grid", dependencies=[Depends(require_role("admin"))]
)
def clear_region_grid(request: Request, camera_name: str):
"""Clear the region grid for a camera."""
if camera_name not in request.app.frigate_config.cameras:
return JSONResponse(
content={"success": False, "message": "Camera not found"},
status_code=404,
)
Regions.delete().where(Regions.camera == camera_name).execute()
return JSONResponse(
content={"success": True, "message": "Region grid cleared"},
)
@router.get(
"/events/{event_id}/snapshot-clean.webp",
dependencies=[Depends(require_camera_access)],
@ -1281,13 +1570,6 @@ def preview_gif(
else:
# need to generate from existing images
preview_dir = os.path.join(CACHE_DIR, "preview_frames")
if not os.path.isdir(preview_dir):
return JSONResponse(
content={"success": False, "message": "Preview not found"},
status_code=404,
)
file_start = f"preview_{camera_name}"
start_file = f"{file_start}-{start_ts}.{PREVIEW_FRAME_TYPE}"
end_file = f"{file_start}-{end_ts}.{PREVIEW_FRAME_TYPE}"
@ -1463,13 +1745,6 @@ def preview_mp4(
else:
# need to generate from existing images
preview_dir = os.path.join(CACHE_DIR, "preview_frames")
if not os.path.isdir(preview_dir):
return JSONResponse(
content={"success": False, "message": "Preview not found"},
status_code=404,
)
file_start = f"preview_{camera_name}"
start_file = f"{file_start}-{start_ts}.{PREVIEW_FRAME_TYPE}"
end_file = f"{file_start}-{end_ts}.{PREVIEW_FRAME_TYPE}"

View File

@ -1,292 +0,0 @@
"""Motion search API for detecting changes within a region of interest."""
import logging
from typing import Any, List, Optional
from fastapi import APIRouter, Depends, Request
from fastapi.responses import JSONResponse
from pydantic import BaseModel, Field
from frigate.api.auth import require_camera_access
from frigate.api.defs.tags import Tags
from frigate.jobs.motion_search import (
cancel_motion_search_job,
get_motion_search_job,
start_motion_search_job,
)
from frigate.types import JobStatusTypesEnum
logger = logging.getLogger(__name__)
router = APIRouter(tags=[Tags.motion_search])
class MotionSearchRequest(BaseModel):
"""Request body for motion search."""
start_time: float = Field(description="Start timestamp for the search range")
end_time: float = Field(description="End timestamp for the search range")
polygon_points: List[List[float]] = Field(
description="List of [x, y] normalized coordinates (0-1) defining the ROI polygon"
)
threshold: int = Field(
default=30,
ge=1,
le=255,
description="Pixel difference threshold (1-255)",
)
min_area: float = Field(
default=5.0,
ge=0.1,
le=100.0,
description="Minimum change area as a percentage of the ROI",
)
frame_skip: int = Field(
default=5,
ge=1,
le=30,
description="Process every Nth frame (1=all frames, 5=every 5th frame)",
)
parallel: bool = Field(
default=False,
description="Enable parallel scanning across segments",
)
max_results: int = Field(
default=25,
ge=1,
le=200,
description="Maximum number of search results to return",
)
class MotionSearchResult(BaseModel):
"""A single search result with timestamp and change info."""
timestamp: float = Field(description="Timestamp where change was detected")
change_percentage: float = Field(description="Percentage of ROI area that changed")
class MotionSearchMetricsResponse(BaseModel):
"""Metrics collected during motion search execution."""
segments_scanned: int = 0
segments_processed: int = 0
metadata_inactive_segments: int = 0
heatmap_roi_skip_segments: int = 0
fallback_full_range_segments: int = 0
frames_decoded: int = 0
wall_time_seconds: float = 0.0
segments_with_errors: int = 0
class MotionSearchStartResponse(BaseModel):
"""Response when motion search job starts."""
success: bool
message: str
job_id: str
class MotionSearchStatusResponse(BaseModel):
"""Response containing job status and results."""
success: bool
message: str
status: str # "queued", "running", "success", "failed", or "cancelled"
results: Optional[List[MotionSearchResult]] = None
total_frames_processed: Optional[int] = None
error_message: Optional[str] = None
metrics: Optional[MotionSearchMetricsResponse] = None
@router.post(
"/{camera_name}/search/motion",
response_model=MotionSearchStartResponse,
dependencies=[Depends(require_camera_access)],
summary="Start motion search job",
description="""Starts an asynchronous search for significant motion changes within
a user-defined Region of Interest (ROI) over a specified time range. Returns a job_id
that can be used to poll for results.""",
)
async def start_motion_search(
request: Request,
camera_name: str,
body: MotionSearchRequest,
):
"""Start an async motion search job."""
config = request.app.frigate_config
if camera_name not in config.cameras:
return JSONResponse(
content={"success": False, "message": f"Camera {camera_name} not found"},
status_code=404,
)
# Validate polygon has at least 3 points
if len(body.polygon_points) < 3:
return JSONResponse(
content={
"success": False,
"message": "Polygon must have at least 3 points",
},
status_code=400,
)
# Validate time range
if body.start_time >= body.end_time:
return JSONResponse(
content={
"success": False,
"message": "Start time must be before end time",
},
status_code=400,
)
# Start the job using the jobs module
job_id = start_motion_search_job(
config=config,
camera_name=camera_name,
start_time=body.start_time,
end_time=body.end_time,
polygon_points=body.polygon_points,
threshold=body.threshold,
min_area=body.min_area,
frame_skip=body.frame_skip,
parallel=body.parallel,
max_results=body.max_results,
)
return JSONResponse(
content={
"success": True,
"message": "Search job started",
"job_id": job_id,
}
)
@router.get(
"/{camera_name}/search/motion/{job_id}",
response_model=MotionSearchStatusResponse,
dependencies=[Depends(require_camera_access)],
summary="Get motion search job status",
description="Returns the status and results (if complete) of a motion search job.",
)
async def get_motion_search_status_endpoint(
request: Request,
camera_name: str,
job_id: str,
):
"""Get the status of a motion search job."""
config = request.app.frigate_config
if camera_name not in config.cameras:
return JSONResponse(
content={"success": False, "message": f"Camera {camera_name} not found"},
status_code=404,
)
job = get_motion_search_job(job_id)
if not job:
return JSONResponse(
content={"success": False, "message": "Job not found"},
status_code=404,
)
api_status = job.status
# Build response content
response_content: dict[str, Any] = {
"success": api_status != JobStatusTypesEnum.failed,
"status": api_status,
}
if api_status == JobStatusTypesEnum.failed:
response_content["message"] = job.error_message or "Search failed"
response_content["error_message"] = job.error_message
elif api_status == JobStatusTypesEnum.cancelled:
response_content["message"] = "Search cancelled"
response_content["total_frames_processed"] = job.total_frames_processed
elif api_status == JobStatusTypesEnum.success:
response_content["message"] = "Search complete"
if job.results:
response_content["results"] = job.results.get("results", [])
response_content["total_frames_processed"] = job.results.get(
"total_frames_processed", job.total_frames_processed
)
else:
response_content["results"] = []
response_content["total_frames_processed"] = job.total_frames_processed
else:
response_content["message"] = "Job processing"
response_content["total_frames_processed"] = job.total_frames_processed
# Include partial results if available (streaming)
if job.results:
response_content["results"] = job.results.get("results", [])
response_content["total_frames_processed"] = job.results.get(
"total_frames_processed", job.total_frames_processed
)
# Include metrics if available
if job.metrics:
response_content["metrics"] = job.metrics.to_dict()
return JSONResponse(content=response_content)
@router.post(
"/{camera_name}/search/motion/{job_id}/cancel",
dependencies=[Depends(require_camera_access)],
summary="Cancel motion search job",
description="Cancels an active motion search job if it is still processing.",
)
async def cancel_motion_search_endpoint(
request: Request,
camera_name: str,
job_id: str,
):
"""Cancel an active motion search job."""
config = request.app.frigate_config
if camera_name not in config.cameras:
return JSONResponse(
content={"success": False, "message": f"Camera {camera_name} not found"},
status_code=404,
)
job = get_motion_search_job(job_id)
if not job:
return JSONResponse(
content={"success": False, "message": "Job not found"},
status_code=404,
)
# Check if already finished
api_status = job.status
if api_status not in (JobStatusTypesEnum.queued, JobStatusTypesEnum.running):
return JSONResponse(
content={
"success": True,
"message": "Job already finished",
"status": api_status,
}
)
# Request cancellation
cancelled = cancel_motion_search_job(job_id)
if cancelled:
return JSONResponse(
content={
"success": True,
"message": "Search cancelled",
"status": "cancelled",
}
)
return JSONResponse(
content={
"success": False,
"message": "Failed to cancel job",
},
status_code=500,
)

View File

@ -1,458 +0,0 @@
"""Recording APIs."""
import datetime as dt
import logging
from datetime import datetime, timedelta
from functools import reduce
from pathlib import Path
from typing import List
from urllib.parse import unquote
from fastapi import APIRouter, Depends, Request
from fastapi import Path as PathParam
from fastapi.responses import JSONResponse
from peewee import fn, operator
from frigate.api.auth import (
allow_any_authenticated,
get_allowed_cameras_for_filter,
require_camera_access,
require_role,
)
from frigate.api.defs.query.recordings_query_parameters import (
MediaRecordingsAvailabilityQueryParams,
MediaRecordingsSummaryQueryParams,
RecordingsDeleteQueryParams,
)
from frigate.api.defs.response.generic_response import GenericResponse
from frigate.api.defs.tags import Tags
from frigate.const import RECORD_DIR
from frigate.models import Event, Recordings
from frigate.util.time import get_dst_transitions
logger = logging.getLogger(__name__)
router = APIRouter(tags=[Tags.recordings])
@router.get("/recordings/storage", dependencies=[Depends(allow_any_authenticated())])
def get_recordings_storage_usage(request: Request):
recording_stats = request.app.stats_emitter.get_latest_stats()["service"][
"storage"
][RECORD_DIR]
if not recording_stats:
return JSONResponse({})
total_mb = recording_stats["total"]
camera_usages: dict[str, dict] = (
request.app.storage_maintainer.calculate_camera_usages()
)
for camera_name in camera_usages.keys():
if camera_usages.get(camera_name, {}).get("usage"):
camera_usages[camera_name]["usage_percent"] = (
camera_usages.get(camera_name, {}).get("usage", 0) / total_mb
) * 100
return JSONResponse(content=camera_usages)
@router.get("/recordings/summary", dependencies=[Depends(allow_any_authenticated())])
def all_recordings_summary(
request: Request,
params: MediaRecordingsSummaryQueryParams = Depends(),
allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter),
):
"""Returns true/false by day indicating if recordings exist"""
cameras = params.cameras
if cameras != "all":
requested = set(unquote(cameras).split(","))
filtered = requested.intersection(allowed_cameras)
if not filtered:
return JSONResponse(content={})
camera_list = list(filtered)
else:
camera_list = allowed_cameras
time_range_query = (
Recordings.select(
fn.MIN(Recordings.start_time).alias("min_time"),
fn.MAX(Recordings.start_time).alias("max_time"),
)
.where(Recordings.camera << camera_list)
.dicts()
.get()
)
min_time = time_range_query.get("min_time")
max_time = time_range_query.get("max_time")
if min_time is None or max_time is None:
return JSONResponse(content={})
dst_periods = get_dst_transitions(params.timezone, min_time, max_time)
days: dict[str, bool] = {}
for period_start, period_end, period_offset in dst_periods:
day_expr = ((Recordings.start_time + period_offset) / 86400).cast("int")
period_query = (
Recordings.select(day_expr.alias("day_idx"))
.where(
(Recordings.camera << camera_list)
& (Recordings.end_time >= period_start)
& (Recordings.start_time <= period_end)
)
.distinct()
.namedtuples()
)
for g in period_query:
day_str = (dt.date(1970, 1, 1) + dt.timedelta(days=g.day_idx)).isoformat()
days[day_str] = True
return JSONResponse(content=dict(sorted(days.items())))
@router.get(
"/{camera_name}/recordings/summary", dependencies=[Depends(require_camera_access)]
)
async def recordings_summary(camera_name: str, timezone: str = "utc"):
"""Returns hourly summary for recordings of given camera"""
time_range_query = (
Recordings.select(
fn.MIN(Recordings.start_time).alias("min_time"),
fn.MAX(Recordings.start_time).alias("max_time"),
)
.where(Recordings.camera == camera_name)
.dicts()
.get()
)
min_time = time_range_query.get("min_time")
max_time = time_range_query.get("max_time")
days: dict[str, dict] = {}
if min_time is None or max_time is None:
return JSONResponse(content=list(days.values()))
dst_periods = get_dst_transitions(timezone, min_time, max_time)
for period_start, period_end, period_offset in dst_periods:
hours_offset = int(period_offset / 60 / 60)
minutes_offset = int(period_offset / 60 - hours_offset * 60)
period_hour_modifier = f"{hours_offset} hour"
period_minute_modifier = f"{minutes_offset} minute"
recording_groups = (
Recordings.select(
fn.strftime(
"%Y-%m-%d %H",
fn.datetime(
Recordings.start_time,
"unixepoch",
period_hour_modifier,
period_minute_modifier,
),
).alias("hour"),
fn.SUM(Recordings.duration).alias("duration"),
fn.SUM(Recordings.motion).alias("motion"),
fn.SUM(Recordings.objects).alias("objects"),
)
.where(
(Recordings.camera == camera_name)
& (Recordings.end_time >= period_start)
& (Recordings.start_time <= period_end)
)
.group_by((Recordings.start_time + period_offset).cast("int") / 3600)
.order_by(Recordings.start_time.desc())
.namedtuples()
)
event_groups = (
Event.select(
fn.strftime(
"%Y-%m-%d %H",
fn.datetime(
Event.start_time,
"unixepoch",
period_hour_modifier,
period_minute_modifier,
),
).alias("hour"),
fn.COUNT(Event.id).alias("count"),
)
.where(Event.camera == camera_name, Event.has_clip)
.where(
(Event.start_time >= period_start) & (Event.start_time <= period_end)
)
.group_by((Event.start_time + period_offset).cast("int") / 3600)
.namedtuples()
)
event_map = {g.hour: g.count for g in event_groups}
for recording_group in recording_groups:
parts = recording_group.hour.split()
hour = parts[1]
day = parts[0]
events_count = event_map.get(recording_group.hour, 0)
hour_data = {
"hour": hour,
"events": events_count,
"motion": recording_group.motion,
"objects": recording_group.objects,
"duration": round(recording_group.duration),
}
if day in days:
# merge counts if already present (edge-case at DST boundary)
days[day]["events"] += events_count or 0
days[day]["hours"].append(hour_data)
else:
days[day] = {
"events": events_count or 0,
"hours": [hour_data],
"day": day,
}
return JSONResponse(content=list(days.values()))
@router.get("/{camera_name}/recordings", dependencies=[Depends(require_camera_access)])
async def recordings(
camera_name: str,
after: float = (datetime.now() - timedelta(hours=1)).timestamp(),
before: float = datetime.now().timestamp(),
):
"""Return specific camera recordings between the given 'after'/'end' times. If not provided the last hour will be used"""
recordings = (
Recordings.select(
Recordings.id,
Recordings.start_time,
Recordings.end_time,
Recordings.segment_size,
Recordings.motion,
Recordings.objects,
Recordings.motion_heatmap,
Recordings.duration,
)
.where(
Recordings.camera == camera_name,
Recordings.end_time >= after,
Recordings.start_time <= before,
)
.order_by(Recordings.start_time)
.dicts()
.iterator()
)
return JSONResponse(content=list(recordings))
@router.get(
"/recordings/unavailable",
response_model=list[dict],
dependencies=[Depends(allow_any_authenticated())],
)
async def no_recordings(
request: Request,
params: MediaRecordingsAvailabilityQueryParams = Depends(),
allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter),
):
"""Get time ranges with no recordings."""
cameras = params.cameras
if cameras != "all":
requested = set(unquote(cameras).split(","))
filtered = requested.intersection(allowed_cameras)
if not filtered:
return JSONResponse(content=[])
cameras = ",".join(filtered)
else:
cameras = allowed_cameras
before = params.before or datetime.datetime.now().timestamp()
after = (
params.after
or (datetime.datetime.now() - datetime.timedelta(hours=1)).timestamp()
)
scale = params.scale
clauses = [(Recordings.end_time >= after) & (Recordings.start_time <= before)]
if cameras != "all":
camera_list = cameras.split(",")
clauses.append((Recordings.camera << camera_list))
else:
camera_list = allowed_cameras
# Get recording start times
data: list[Recordings] = (
Recordings.select(Recordings.start_time, Recordings.end_time)
.where(reduce(operator.and_, clauses))
.order_by(Recordings.start_time.asc())
.dicts()
.iterator()
)
# Convert recordings to list of (start, end) tuples
recordings = [(r["start_time"], r["end_time"]) for r in data]
# Iterate through time segments and check if each has any recording
no_recording_segments = []
current = after
current_gap_start = None
while current < before:
segment_end = min(current + scale, before)
# Check if this segment overlaps with any recording
has_recording = any(
rec_start < segment_end and rec_end > current
for rec_start, rec_end in recordings
)
if not has_recording:
# This segment has no recordings
if current_gap_start is None:
current_gap_start = current # Start a new gap
else:
# This segment has recordings
if current_gap_start is not None:
# End the current gap and append it
no_recording_segments.append(
{"start_time": int(current_gap_start), "end_time": int(current)}
)
current_gap_start = None
current = segment_end
# Append the last gap if it exists
if current_gap_start is not None:
no_recording_segments.append(
{"start_time": int(current_gap_start), "end_time": int(before)}
)
return JSONResponse(content=no_recording_segments)
@router.delete(
"/recordings/start/{start}/end/{end}",
response_model=GenericResponse,
dependencies=[Depends(require_role(["admin"]))],
summary="Delete recordings",
description="""Deletes recordings within the specified time range.
Recordings can be filtered by cameras and kept based on motion, objects, or audio attributes.
""",
)
async def delete_recordings(
start: float = PathParam(..., description="Start timestamp (unix)"),
end: float = PathParam(..., description="End timestamp (unix)"),
params: RecordingsDeleteQueryParams = Depends(),
allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter),
):
"""Delete recordings in the specified time range."""
if start >= end:
return JSONResponse(
content={
"success": False,
"message": "Start time must be less than end time.",
},
status_code=400,
)
cameras = params.cameras
if cameras != "all":
requested = set(cameras.split(","))
filtered = requested.intersection(allowed_cameras)
if not filtered:
return JSONResponse(
content={
"success": False,
"message": "No valid cameras found in the request.",
},
status_code=400,
)
camera_list = list(filtered)
else:
camera_list = allowed_cameras
# Parse keep parameter
keep_set = set()
if params.keep:
keep_set = set(params.keep.split(","))
# Build query to find overlapping recordings
clauses = [
(
Recordings.start_time.between(start, end)
| Recordings.end_time.between(start, end)
| ((start > Recordings.start_time) & (end < Recordings.end_time))
),
(Recordings.camera << camera_list),
]
keep_clauses = []
if "motion" in keep_set:
keep_clauses.append(Recordings.motion.is_null(False) & (Recordings.motion > 0))
if "object" in keep_set:
keep_clauses.append(
Recordings.objects.is_null(False) & (Recordings.objects > 0)
)
if "audio" in keep_set:
keep_clauses.append(Recordings.dBFS.is_null(False))
if keep_clauses:
keep_condition = reduce(operator.or_, keep_clauses)
clauses.append(~keep_condition)
recordings_to_delete = (
Recordings.select(Recordings.id, Recordings.path)
.where(reduce(operator.and_, clauses))
.dicts()
.iterator()
)
recording_ids = []
deleted_count = 0
error_count = 0
for recording in recordings_to_delete:
recording_ids.append(recording["id"])
try:
Path(recording["path"]).unlink(missing_ok=True)
deleted_count += 1
except Exception as e:
logger.error(f"Failed to delete recording file {recording['path']}: {e}")
error_count += 1
if recording_ids:
max_deletes = 100000
recording_ids_list = list(recording_ids)
for i in range(0, len(recording_ids_list), max_deletes):
Recordings.delete().where(
Recordings.id << recording_ids_list[i : i + max_deletes]
).execute()
message = f"Successfully deleted {deleted_count} recording(s)."
if error_count > 0:
message += f" {error_count} file deletion error(s) occurred."
return JSONResponse(
content={"success": True, "message": message},
status_code=200,
)

View File

@ -33,6 +33,7 @@ from frigate.api.defs.response.review_response import (
ReviewSummaryResponse,
)
from frigate.api.defs.tags import Tags
from frigate.config import FrigateConfig
from frigate.embeddings import EmbeddingsContext
from frigate.models import Recordings, ReviewSegment, UserReviewStatus
from frigate.review.types import SeverityEnum
@ -746,7 +747,9 @@ async def set_not_reviewed(
description="Use GenAI to summarize review items over a period of time.",
)
def generate_review_summary(request: Request, start_ts: float, end_ts: float):
if not request.app.genai_manager.vision_client:
config: FrigateConfig = request.app.frigate_config
if not config.genai.provider:
return JSONResponse(
content=(
{

View File

@ -43,15 +43,10 @@ from frigate.const import (
)
from frigate.data_processing.types import DataProcessorMetrics
from frigate.db.sqlitevecq import SqliteVecQueueDatabase
from frigate.debug_replay import (
DebugReplayManager,
cleanup_replay_cameras,
)
from frigate.embeddings import EmbeddingProcess, EmbeddingsContext
from frigate.events.audio import AudioProcessor
from frigate.events.cleanup import EventCleanup
from frigate.events.maintainer import EventProcessor
from frigate.jobs.motion_search import stop_all_motion_search_jobs
from frigate.log import _stop_logging
from frigate.models import (
Event,
@ -144,9 +139,6 @@ class FrigateApp:
else:
logger.debug(f"Skipping directory: {d}")
def init_debug_replay_manager(self) -> None:
self.replay_manager = DebugReplayManager()
def init_camera_metrics(self) -> None:
# create camera_metrics
for camera_name in self.config.cameras.keys():
@ -539,7 +531,6 @@ class FrigateApp:
set_file_limit()
# Start frigate services.
self.init_debug_replay_manager()
self.init_camera_metrics()
self.init_queues()
self.init_database()
@ -550,10 +541,6 @@ class FrigateApp:
self.init_embeddings_manager()
self.bind_database()
self.check_db_data_migrations()
# Clean up any stale replay camera artifacts (filesystem + DB)
cleanup_replay_cameras()
self.init_inter_process_communicator()
self.start_detectors()
self.init_dispatcher()
@ -585,7 +572,6 @@ class FrigateApp:
self.stats_emitter,
self.event_metadata_updater,
self.inter_config_updater,
self.replay_manager,
),
host="127.0.0.1",
port=5001,
@ -600,9 +586,6 @@ class FrigateApp:
# used by the docker healthcheck
Path("/dev/shm/.frigate-is-stopping").touch()
# Cancel any running motion search jobs before setting stop_event
stop_all_motion_search_jobs()
self.stop_event.set()
# set an end_time on entries without an end_time before exiting
@ -654,7 +637,6 @@ class FrigateApp:
self.record_cleanup.join()
self.stats_emitter.join()
self.frigate_watchdog.join()
self.camera_maintainer.join()
self.db.stop()
# Save embeddings stats to disk

View File

@ -19,8 +19,6 @@ class CameraMetrics:
process_pid: Synchronized
capture_process_pid: Synchronized
ffmpeg_pid: Synchronized
reconnects_last_hour: Synchronized
stalls_last_hour: Synchronized
def __init__(self, manager: SyncManager):
self.camera_fps = manager.Value("d", 0)
@ -37,8 +35,6 @@ class CameraMetrics:
self.process_pid = manager.Value("i", 0)
self.capture_process_pid = manager.Value("i", 0)
self.ffmpeg_pid = manager.Value("i", 0)
self.reconnects_last_hour = manager.Value("i", 0)
self.stalls_last_hour = manager.Value("i", 0)
class PTZMetrics:

View File

@ -57,9 +57,6 @@ class CameraActivityManager:
all_objects: list[dict[str, Any]] = []
for camera in new_activity.keys():
if camera not in self.config.cameras:
continue
# handle cameras that were added dynamically
if camera not in self.camera_all_object_counts:
self.__init_camera(self.config.cameras[camera])
@ -127,11 +124,7 @@ class CameraActivityManager:
any_changed = False
# run through each object and check what topics need to be updated
camera_config = self.config.cameras.get(camera)
if camera_config is None:
return
for label in camera_config.objects.track:
for label in self.config.cameras[camera].objects.track:
if label in self.config.model.non_logo_attributes:
continue
@ -181,9 +174,6 @@ class AudioActivityManager:
now = datetime.datetime.now().timestamp()
for camera in new_activity.keys():
if camera not in self.config.cameras:
continue
# handle cameras that were added dynamically
if camera not in self.current_audio_detections:
self.__init_camera(self.config.cameras[camera])
@ -203,11 +193,7 @@ class AudioActivityManager:
def compare_audio_activity(
self, camera: str, new_detections: list[tuple[str, float]], now: float
) -> None:
camera_config = self.config.cameras.get(camera)
if camera_config is None:
return False
max_not_heard = camera_config.audio.max_not_heard
max_not_heard = self.config.cameras[camera].audio.max_not_heard
current = self.current_audio_detections[camera]
any_changed = False
@ -236,7 +222,6 @@ class AudioActivityManager:
None,
"audio",
{},
None,
),
EventMetadataTypeEnum.manual_event_create.value,
)

View File

@ -55,20 +55,8 @@ class CameraMaintainer(threading.Thread):
self.shm_count = self.__calculate_shm_frame_count()
self.camera_processes: dict[str, mp.Process] = {}
self.capture_processes: dict[str, mp.Process] = {}
self.camera_stop_events: dict[str, MpEvent] = {}
self.metrics_manager = metrics_manager
def __ensure_camera_stop_event(self, camera: str) -> MpEvent:
camera_stop_event = self.camera_stop_events.get(camera)
if camera_stop_event is None:
camera_stop_event = mp.Event()
self.camera_stop_events[camera] = camera_stop_event
else:
camera_stop_event.clear()
return camera_stop_event
def __init_historical_regions(self) -> None:
# delete region grids for removed or renamed cameras
cameras = list(self.config.cameras.keys())
@ -111,8 +99,6 @@ class CameraMaintainer(threading.Thread):
logger.info(f"Camera processor not started for disabled camera {name}")
return
camera_stop_event = self.__ensure_camera_stop_event(name)
if runtime:
self.camera_metrics[name] = CameraMetrics(self.metrics_manager)
self.ptz_metrics[name] = PTZMetrics(autotracker_enabled=False)
@ -149,7 +135,7 @@ class CameraMaintainer(threading.Thread):
self.camera_metrics[name],
self.ptz_metrics[name],
self.region_grids[name],
camera_stop_event,
self.stop_event,
self.config.logger,
)
self.camera_processes[config.name] = camera_process
@ -164,8 +150,6 @@ class CameraMaintainer(threading.Thread):
logger.info(f"Capture process not started for disabled camera {name}")
return
camera_stop_event = self.__ensure_camera_stop_event(name)
# pre-create shms
count = 10 if runtime else self.shm_count
for i in range(count):
@ -176,7 +160,7 @@ class CameraMaintainer(threading.Thread):
config,
count,
self.camera_metrics[name],
camera_stop_event,
self.stop_event,
self.config.logger,
)
capture_process.daemon = True
@ -186,36 +170,18 @@ class CameraMaintainer(threading.Thread):
logger.info(f"Capture process started for {name}: {capture_process.pid}")
def __stop_camera_capture_process(self, camera: str) -> None:
capture_process = self.capture_processes.get(camera)
capture_process = self.capture_processes[camera]
if capture_process is not None:
logger.info(f"Waiting for capture process for {camera} to stop")
camera_stop_event = self.camera_stop_events.get(camera)
if camera_stop_event is not None:
camera_stop_event.set()
capture_process.join(timeout=10)
if capture_process.is_alive():
logger.warning(
f"Capture process for {camera} didn't exit, forcing termination"
)
capture_process.terminate()
capture_process.join()
capture_process.terminate()
capture_process.join()
def __stop_camera_process(self, camera: str) -> None:
camera_process = self.camera_processes.get(camera)
camera_process = self.camera_processes[camera]
if camera_process is not None:
logger.info(f"Waiting for process for {camera} to stop")
camera_stop_event = self.camera_stop_events.get(camera)
if camera_stop_event is not None:
camera_stop_event.set()
camera_process.join(timeout=10)
if camera_process.is_alive():
logger.warning(f"Process for {camera} didn't exit, forcing termination")
camera_process.terminate()
camera_process.join()
camera_process.terminate()
camera_process.join()
logger.info(f"Closing frame queue for {camera}")
empty_and_close_queue(self.camera_metrics[camera].frame_queue)
@ -233,12 +199,6 @@ class CameraMaintainer(threading.Thread):
for update_type, updated_cameras in updates.items():
if update_type == CameraConfigUpdateEnum.add.name:
for camera in updated_cameras:
if (
camera in self.camera_processes
or camera in self.capture_processes
):
continue
self.__start_camera_processor(
camera,
self.update_subscriber.camera_configs[camera],
@ -250,22 +210,15 @@ class CameraMaintainer(threading.Thread):
runtime=True,
)
elif update_type == CameraConfigUpdateEnum.remove.name:
for camera in updated_cameras:
self.__stop_camera_capture_process(camera)
self.__stop_camera_process(camera)
self.capture_processes.pop(camera, None)
self.camera_processes.pop(camera, None)
self.camera_stop_events.pop(camera, None)
self.region_grids.pop(camera, None)
self.camera_metrics.pop(camera, None)
self.ptz_metrics.pop(camera, None)
self.__stop_camera_capture_process(camera)
self.__stop_camera_process(camera)
# ensure the capture processes are done
for camera in self.capture_processes.keys():
for camera in self.camera_processes.keys():
self.__stop_camera_capture_process(camera)
# ensure the camera processors are done
for camera in self.camera_processes.keys():
for camera in self.capture_processes.keys():
self.__stop_camera_process(camera)
self.update_subscriber.stop()

View File

@ -65,7 +65,7 @@ class CameraState:
frame_copy = cv2.cvtColor(frame_copy, cv2.COLOR_YUV2BGR_I420)
# draw on the frame
if draw_options.get("mask"):
mask_overlay = np.where(self.camera_config.motion.rasterized_mask == [0])
mask_overlay = np.where(self.camera_config.motion.mask == [0])
frame_copy[mask_overlay] = [0, 0, 0]
if draw_options.get("bounding_boxes"):
@ -197,10 +197,6 @@ class CameraState:
if draw_options.get("zones"):
for name, zone in self.camera_config.zones.items():
# skip disabled zones
if not zone.enabled:
continue
thickness = (
8
if any(

View File

@ -26,8 +26,8 @@ class ConfigPublisher:
def stop(self) -> None:
self.stop_event.set()
self.socket.close(linger=0)
self.context.destroy(linger=0)
self.socket.close()
self.context.destroy()
class ConfigSubscriber:
@ -55,5 +55,5 @@ class ConfigSubscriber:
return (None, None)
def stop(self) -> None:
self.socket.close(linger=0)
self.context.destroy(linger=0)
self.socket.close()
self.context.destroy()

View File

@ -15,7 +15,6 @@ from frigate.config.camera.updater import (
CameraConfigUpdatePublisher,
CameraConfigUpdateTopic,
)
from frigate.config.config import RuntimeFilterConfig, RuntimeMotionConfig
from frigate.const import (
CLEAR_ONGOING_REVIEW_SEGMENTS,
EXPIRE_AUDIO_ACTIVITY,
@ -29,7 +28,6 @@ from frigate.const import (
UPDATE_CAMERA_ACTIVITY,
UPDATE_EMBEDDINGS_REINDEX_PROGRESS,
UPDATE_EVENT_DESCRIPTION,
UPDATE_JOB_STATE,
UPDATE_MODEL_STATE,
UPDATE_REVIEW_DESCRIPTION,
UPSERT_REVIEW_SEGMENT,
@ -62,7 +60,6 @@ class Dispatcher:
self.camera_activity = CameraActivityManager(config, self.publish)
self.audio_activity = AudioActivityManager(config, self.publish)
self.model_state: dict[str, ModelStatusTypesEnum] = {}
self.job_state: dict[str, dict[str, Any]] = {} # {job_type: job_data}
self.embeddings_reindex: dict[str, Any] = {}
self.birdseye_layout: dict[str, Any] = {}
self.audio_transcription_state: str = "idle"
@ -85,9 +82,6 @@ class Dispatcher:
"review_detections": self._on_detections_command,
"object_descriptions": self._on_object_description_command,
"review_descriptions": self._on_review_description_command,
"motion_mask": self._on_motion_mask_command,
"object_mask": self._on_object_mask_command,
"zone": self._on_zone_command,
}
self._global_settings_handlers: dict[str, Callable] = {
"notifications": self._on_global_notification_command,
@ -104,23 +98,11 @@ class Dispatcher:
"""Handle receiving of payload from communicators."""
def handle_camera_command(
command_type: str,
camera_name: str,
command: str,
payload: str,
sub_command: str | None = None,
command_type: str, camera_name: str, command: str, payload: str
) -> None:
if camera_name not in self.config.cameras:
return
try:
if command_type == "set":
if sub_command:
self._camera_settings_handlers[command](
camera_name, sub_command, payload
)
else:
self._camera_settings_handlers[command](camera_name, payload)
self._camera_settings_handlers[command](camera_name, payload)
elif command_type == "ptz":
self._on_ptz_command(camera_name, payload)
except KeyError:
@ -134,9 +116,6 @@ class Dispatcher:
def handle_request_region_grid() -> Any:
camera = payload
if camera not in self.config.cameras:
return None
grid = get_camera_regions_grid(
camera,
self.config.cameras[camera].detect,
@ -201,19 +180,6 @@ class Dispatcher:
def handle_model_state() -> None:
self.publish("model_state", json.dumps(self.model_state.copy()))
def handle_update_job_state() -> None:
if payload and isinstance(payload, dict):
job_type = payload.get("job_type")
if job_type:
self.job_state[job_type] = payload
self.publish(
"job_state",
json.dumps(self.job_state),
)
def handle_job_state() -> None:
self.publish("job_state", json.dumps(self.job_state.copy()))
def handle_update_audio_transcription_state() -> None:
if payload:
self.audio_transcription_state = payload
@ -249,11 +215,7 @@ class Dispatcher:
self.publish("birdseye_layout", json.dumps(self.birdseye_layout.copy()))
def handle_on_connect() -> None:
camera_status = {
camera: status
for camera, status in self.camera_activity.last_camera_activity.copy().items()
if camera in self.config.cameras
}
camera_status = self.camera_activity.last_camera_activity.copy()
audio_detections = self.audio_activity.current_audio_detections.copy()
cameras_with_status = camera_status.keys()
@ -315,7 +277,6 @@ class Dispatcher:
UPDATE_EVENT_DESCRIPTION: handle_update_event_description,
UPDATE_REVIEW_DESCRIPTION: handle_update_review_description,
UPDATE_MODEL_STATE: handle_update_model_state,
UPDATE_JOB_STATE: handle_update_job_state,
UPDATE_EMBEDDINGS_REINDEX_PROGRESS: handle_update_embeddings_reindex_progress,
UPDATE_BIRDSEYE_LAYOUT: handle_update_birdseye_layout,
UPDATE_AUDIO_TRANSCRIPTION_STATE: handle_update_audio_transcription_state,
@ -323,7 +284,6 @@ class Dispatcher:
"restart": handle_restart,
"embeddingsReindexProgress": handle_embeddings_reindex_progress,
"modelState": handle_model_state,
"jobState": handle_job_state,
"audioTranscriptionState": handle_audio_transcription_state,
"birdseyeLayout": handle_birdseye_layout,
"onConnect": handle_on_connect,
@ -337,14 +297,6 @@ class Dispatcher:
camera_name = parts[-3]
command = parts[-2]
handle_camera_command("set", camera_name, command, payload)
elif len(parts) == 4 and topic.endswith("set"):
# example /cam_name/motion_mask/mask_name/set payload=ON|OFF
camera_name = parts[-4]
command = parts[-3]
sub_command = parts[-2]
handle_camera_command(
"set", camera_name, command, payload, sub_command
)
elif len(parts) == 2 and topic.endswith("set"):
command = parts[-2]
self._global_settings_handlers[command](payload)
@ -356,8 +308,7 @@ class Dispatcher:
# example /cam_name/notifications/suspend payload=duration
camera_name = parts[-3]
command = parts[-2]
if camera_name in self.config.cameras:
self._on_camera_notification_suspend(camera_name, payload)
self._on_camera_notification_suspend(camera_name, payload)
except IndexError:
logger.error(
f"Received invalid {topic.split('/')[-1]} command: {topic}"
@ -890,149 +841,3 @@ class Dispatcher:
genai_settings,
)
self.publish(f"{camera_name}/review_descriptions/state", payload, retain=True)
def _on_motion_mask_command(
self, camera_name: str, mask_name: str, payload: str
) -> None:
"""Callback for motion mask topic."""
if payload not in ["ON", "OFF"]:
logger.error(f"Invalid payload for motion mask {mask_name}: {payload}")
return
motion_settings = self.config.cameras[camera_name].motion
if mask_name not in motion_settings.mask:
logger.error(f"Unknown motion mask: {mask_name}")
return
mask = motion_settings.mask[mask_name]
if not mask:
logger.error(f"Motion mask {mask_name} is None")
return
if payload == "ON":
if not mask.enabled_in_config:
logger.error(
f"Motion mask {mask_name} must be enabled in the config to be turned on via MQTT."
)
return
mask.enabled = payload == "ON"
# Recreate RuntimeMotionConfig to update rasterized_mask
motion_settings = RuntimeMotionConfig(
frame_shape=self.config.cameras[camera_name].frame_shape,
**motion_settings.model_dump(exclude_unset=True),
)
# Update the dispatcher's own config
self.config.cameras[camera_name].motion = motion_settings
self.config_updater.publish_update(
CameraConfigUpdateTopic(CameraConfigUpdateEnum.motion, camera_name),
motion_settings,
)
self.publish(
f"{camera_name}/motion_mask/{mask_name}/state", payload, retain=True
)
def _on_object_mask_command(
self, camera_name: str, mask_name: str, payload: str
) -> None:
"""Callback for object mask topic."""
if payload not in ["ON", "OFF"]:
logger.error(f"Invalid payload for object mask {mask_name}: {payload}")
return
object_settings = self.config.cameras[camera_name].objects
# Check if this is a global mask
mask_found = False
if mask_name in object_settings.mask:
mask = object_settings.mask[mask_name]
if mask:
if payload == "ON":
if not mask.enabled_in_config:
logger.error(
f"Object mask {mask_name} must be enabled in the config to be turned on via MQTT."
)
return
mask.enabled = payload == "ON"
mask_found = True
# Check if this is a per-object filter mask
for object_name, filter_config in object_settings.filters.items():
if mask_name in filter_config.mask:
mask = filter_config.mask[mask_name]
if mask:
if payload == "ON":
if not mask.enabled_in_config:
logger.error(
f"Object mask {mask_name} must be enabled in the config to be turned on via MQTT."
)
return
mask.enabled = payload == "ON"
mask_found = True
if not mask_found:
logger.error(f"Unknown object mask: {mask_name}")
return
# Recreate RuntimeFilterConfig for each object filter to update rasterized_mask
for object_name, filter_config in object_settings.filters.items():
# Merge global object masks with per-object filter masks
merged_mask = dict(filter_config.mask) # Copy filter-specific masks
# Add global object masks if they exist
if object_settings.mask:
for global_mask_id, global_mask_config in object_settings.mask.items():
# Use a global prefix to avoid key collisions
global_mask_id_prefixed = f"global_{global_mask_id}"
merged_mask[global_mask_id_prefixed] = global_mask_config
object_settings.filters[object_name] = RuntimeFilterConfig(
frame_shape=self.config.cameras[camera_name].frame_shape,
mask=merged_mask,
**filter_config.model_dump(
exclude_unset=True, exclude={"mask", "raw_mask"}
),
)
# Update the dispatcher's own config
self.config.cameras[camera_name].objects = object_settings
self.config_updater.publish_update(
CameraConfigUpdateTopic(CameraConfigUpdateEnum.objects, camera_name),
object_settings,
)
self.publish(
f"{camera_name}/object_mask/{mask_name}/state", payload, retain=True
)
def _on_zone_command(self, camera_name: str, zone_name: str, payload: str) -> None:
"""Callback for zone topic."""
if payload not in ["ON", "OFF"]:
logger.error(f"Invalid payload for zone {zone_name}: {payload}")
return
camera_config = self.config.cameras[camera_name]
if zone_name not in camera_config.zones:
logger.error(f"Unknown zone: {zone_name}")
return
if payload == "ON":
if not camera_config.zones[zone_name].enabled_in_config:
logger.error(
f"Zone {zone_name} must be enabled in the config to be turned on via MQTT."
)
return
camera_config.zones[zone_name].enabled = payload == "ON"
self.config_updater.publish_update(
CameraConfigUpdateTopic(CameraConfigUpdateEnum.zones, camera_name),
camera_config.zones,
)
self.publish(f"{camera_name}/zone/{zone_name}/state", payload, retain=True)

View File

@ -61,8 +61,8 @@ class InterProcessCommunicator(Communicator):
def stop(self) -> None:
self.stop_event.set()
self.reader_thread.join()
self.socket.close(linger=0)
self.context.destroy(linger=0)
self.socket.close()
self.context.destroy()
class InterProcessRequestor:
@ -82,5 +82,5 @@ class InterProcessRequestor:
return ""
def stop(self) -> None:
self.socket.close(linger=0)
self.context.destroy(linger=0)
self.socket.close()
self.context.destroy()

View File

@ -133,29 +133,6 @@ class MqttClient(Communicator):
retain=True,
)
for mask_name, motion_mask in camera.motion.mask.items():
if motion_mask:
self.publish(
f"{camera_name}/motion_mask/{mask_name}/state",
"ON" if motion_mask.enabled else "OFF",
retain=True,
)
for mask_name, object_mask in camera.objects.mask.items():
if object_mask:
self.publish(
f"{camera_name}/object_mask/{mask_name}/state",
"ON" if object_mask.enabled else "OFF",
retain=True,
)
for zone_name, zone in camera.zones.items():
self.publish(
f"{camera_name}/zone/{zone_name}/state",
"ON" if zone.enabled else "OFF",
retain=True,
)
if self.config.notifications.enabled_in_config:
self.publish(
"notifications/state",
@ -265,24 +242,6 @@ class MqttClient(Communicator):
self.on_mqtt_command,
)
for mask_name in self.config.cameras[name].motion.mask.keys():
self.client.message_callback_add(
f"{self.mqtt_config.topic_prefix}/{name}/motion_mask/{mask_name}/set",
self.on_mqtt_command,
)
for mask_name in self.config.cameras[name].objects.mask.keys():
self.client.message_callback_add(
f"{self.mqtt_config.topic_prefix}/{name}/object_mask/{mask_name}/set",
self.on_mqtt_command,
)
for zone_name in self.config.cameras[name].zones.keys():
self.client.message_callback_add(
f"{self.mqtt_config.topic_prefix}/{name}/zone/{zone_name}/set",
self.on_mqtt_command,
)
if self.config.notifications.enabled_in_config:
self.client.message_callback_add(
f"{self.mqtt_config.topic_prefix}/notifications/set",

View File

@ -43,7 +43,7 @@ class ZmqProxy:
def stop(self) -> None:
# destroying the context will tell the proxy to stop
self.context.destroy(linger=0)
self.context.destroy()
self.runner.join()
@ -66,8 +66,8 @@ class Publisher(Generic[T]):
self.socket.send_string(f"{self.topic}{sub_topic} {json.dumps(payload)}")
def stop(self) -> None:
self.socket.close(linger=0)
self.context.destroy(linger=0)
self.socket.close()
self.context.destroy()
class Subscriber(Generic[T]):
@ -96,8 +96,8 @@ class Subscriber(Generic[T]):
return self._return_object("", None)
def stop(self) -> None:
self.socket.close(linger=0)
self.context.destroy(linger=0)
self.socket.close()
self.context.destroy()
def _return_object(self, topic: str, payload: T | None) -> T | None:
return payload

View File

@ -8,7 +8,6 @@ from .config import * # noqa: F403
from .database import * # noqa: F403
from .logger import * # noqa: F403
from .mqtt import * # noqa: F403
from .network import * # noqa: F403
from .proxy import * # noqa: F403
from .telemetry import * # noqa: F403
from .tls import * # noqa: F403

View File

@ -8,63 +8,39 @@ __all__ = ["AuthConfig"]
class AuthConfig(FrigateBaseModel):
enabled: bool = Field(
default=True,
title="Enable authentication",
description="Enable native authentication for the Frigate UI.",
)
enabled: bool = Field(default=True, title="Enable authentication")
reset_admin_password: bool = Field(
default=False,
title="Reset admin password",
description="If true, reset the admin user's password on startup and print the new password in logs.",
default=False, title="Reset the admin password on startup"
)
cookie_name: str = Field(
default="frigate_token",
title="JWT cookie name",
description="Name of the cookie used to store the JWT token for native authentication.",
pattern=r"^[a-z_]+$",
)
cookie_secure: bool = Field(
default=False,
title="Secure cookie flag",
description="Set the secure flag on the auth cookie; should be true when using TLS.",
default="frigate_token", title="Name for jwt token cookie", pattern=r"^[a-z_]+$"
)
cookie_secure: bool = Field(default=False, title="Set secure flag on cookie")
session_length: int = Field(
default=86400,
title="Session length",
description="Session duration in seconds for JWT-based sessions.",
ge=60,
default=86400, title="Session length for jwt session tokens", ge=60
)
refresh_time: int = Field(
default=1800,
title="Session refresh window",
description="When a session is within this many seconds of expiring, refresh it back to full length.",
title="Refresh the session if it is going to expire in this many seconds",
ge=30,
)
failed_login_rate_limit: Optional[str] = Field(
default=None,
title="Failed login limits",
description="Rate limiting rules for failed login attempts to reduce brute-force attacks.",
title="Rate limits for failed login attempts.",
)
trusted_proxies: list[str] = Field(
default=[],
title="Trusted proxies",
description="List of trusted proxy IPs used when determining client IP for rate limiting.",
title="Trusted proxies for determining IP address to rate limit",
)
# As of Feb 2023, OWASP recommends 600000 iterations for PBKDF2-SHA256
hash_iterations: int = Field(
default=600000,
title="Hash iterations",
description="Number of PBKDF2-SHA256 iterations to use when hashing user passwords.",
)
hash_iterations: int = Field(default=600000, title="Password hash iterations")
roles: Dict[str, List[str]] = Field(
default_factory=dict,
title="Role mappings",
description="Map roles to camera lists. An empty list grants access to all cameras for the role.",
title="Role to camera mappings. Empty list grants access to all cameras.",
)
admin_first_time_login: Optional[bool] = Field(
default=False,
title="First-time admin flag",
title="Internal field to expose first-time admin login flag to the UI",
description=(
"When true the UI may show a help link on the login page informing users how to sign in after an admin password reset. "
),

View File

@ -17,45 +17,25 @@ class AudioFilterConfig(FrigateBaseModel):
default=0.8,
ge=AUDIO_MIN_CONFIDENCE,
lt=1.0,
title="Minimum audio confidence",
description="Minimum confidence threshold for the audio event to be counted.",
title="Minimum detection confidence threshold for audio to be counted.",
)
class AudioConfig(FrigateBaseModel):
enabled: bool = Field(
default=False,
title="Enable audio detection",
description="Enable or disable audio event detection for all cameras; can be overridden per-camera.",
)
enabled: bool = Field(default=False, title="Enable audio events.")
max_not_heard: int = Field(
default=30,
title="End timeout",
description="Amount of seconds without the configured audio type before the audio event is ended.",
default=30, title="Seconds of not hearing the type of audio to end the event."
)
min_volume: int = Field(
default=500,
title="Minimum volume",
description="Minimum RMS volume threshold required to run audio detection; lower values increase sensitivity (e.g., 200 high, 500 medium, 1000 low).",
default=500, title="Min volume required to run audio detection."
)
listen: list[str] = Field(
default=DEFAULT_LISTEN_AUDIO,
title="Listen types",
description="List of audio event types to detect (for example: bark, fire_alarm, scream, speech, yell).",
default=DEFAULT_LISTEN_AUDIO, title="Audio to listen for."
)
filters: Optional[dict[str, AudioFilterConfig]] = Field(
None,
title="Audio filters",
description="Per-audio-type filter settings such as confidence thresholds used to reduce false positives.",
None, title="Audio filters."
)
enabled_in_config: Optional[bool] = Field(
None,
title="Original audio state",
description="Indicates whether audio detection was originally enabled in the static config file.",
)
num_threads: int = Field(
default=2,
title="Detection threads",
description="Number of threads to use for audio detection processing.",
ge=1,
None, title="Keep track of original state of audio detection."
)
num_threads: int = Field(default=2, title="Number of detection threads", ge=1)

View File

@ -29,88 +29,45 @@ class BirdseyeModeEnum(str, Enum):
class BirdseyeLayoutConfig(FrigateBaseModel):
scaling_factor: float = Field(
default=2.0,
title="Scaling factor",
description="Scaling factor used by the layout calculator (range 1.0 to 5.0).",
ge=1.0,
le=5.0,
)
max_cameras: Optional[int] = Field(
default=None,
title="Max cameras",
description="Maximum number of cameras to display at once in Birdseye; shows the most recent cameras.",
default=2.0, title="Birdseye Scaling Factor", ge=1.0, le=5.0
)
max_cameras: Optional[int] = Field(default=None, title="Max cameras")
class BirdseyeConfig(FrigateBaseModel):
enabled: bool = Field(
default=True,
title="Enable Birdseye",
description="Enable or disable the Birdseye view feature.",
)
enabled: bool = Field(default=True, title="Enable birdseye view.")
mode: BirdseyeModeEnum = Field(
default=BirdseyeModeEnum.objects,
title="Tracking mode",
description="Mode for including cameras in Birdseye: 'objects', 'motion', or 'continuous'.",
default=BirdseyeModeEnum.objects, title="Tracking mode."
)
restream: bool = Field(
default=False,
title="Restream RTSP",
description="Re-stream the Birdseye output as an RTSP feed; enabling this will keep Birdseye running continuously.",
)
width: int = Field(
default=1280,
title="Width",
description="Output width (pixels) of the composed Birdseye frame.",
)
height: int = Field(
default=720,
title="Height",
description="Output height (pixels) of the composed Birdseye frame.",
)
restream: bool = Field(default=False, title="Restream birdseye via RTSP.")
width: int = Field(default=1280, title="Birdseye width.")
height: int = Field(default=720, title="Birdseye height.")
quality: int = Field(
default=8,
title="Encoding quality",
description="Encoding quality for the Birdseye mpeg1 feed (1 highest quality, 31 lowest).",
title="Encoding quality.",
ge=1,
le=31,
)
inactivity_threshold: int = Field(
default=30,
title="Inactivity threshold",
description="Seconds of inactivity after which a camera will stop being shown in Birdseye.",
gt=0,
default=30, title="Birdseye Inactivity Threshold", gt=0
)
layout: BirdseyeLayoutConfig = Field(
default_factory=BirdseyeLayoutConfig,
title="Layout",
description="Layout options for the Birdseye composition.",
default_factory=BirdseyeLayoutConfig, title="Birdseye Layout Config"
)
idle_heartbeat_fps: float = Field(
default=0.0,
ge=0.0,
le=10.0,
title="Idle heartbeat FPS",
description="Frames-per-second to resend the last composed Birdseye frame when idle; set to 0 to disable.",
title="Idle heartbeat FPS (0 disables, max 10)",
)
# uses BaseModel because some global attributes are not available at the camera level
class BirdseyeCameraConfig(BaseModel):
enabled: bool = Field(
default=True,
title="Enable Birdseye",
description="Enable or disable the Birdseye view feature.",
)
enabled: bool = Field(default=True, title="Enable birdseye view for camera.")
mode: BirdseyeModeEnum = Field(
default=BirdseyeModeEnum.objects,
title="Tracking mode",
description="Mode for including cameras in Birdseye: 'objects', 'motion', or 'continuous'.",
default=BirdseyeModeEnum.objects, title="Tracking mode for camera."
)
order: int = Field(
default=0,
title="Position",
description="Numeric position controlling the camera's ordering in the Birdseye layout.",
)
order: int = Field(default=0, title="Position of the camera in the birdseye view.")

View File

@ -50,17 +50,10 @@ class CameraTypeEnum(str, Enum):
class CameraConfig(FrigateBaseModel):
name: Optional[str] = Field(
None,
title="Camera name",
description="Camera name is required",
pattern=REGEX_CAMERA_NAME,
)
name: Optional[str] = Field(None, title="Camera name.", pattern=REGEX_CAMERA_NAME)
friendly_name: Optional[str] = Field(
None,
title="Friendly name",
description="Camera friendly name used in the Frigate UI",
None, title="Camera friendly name used in the Frigate UI."
)
@model_validator(mode="before")
@ -70,129 +63,80 @@ class CameraConfig(FrigateBaseModel):
pass
return values
enabled: bool = Field(default=True, title="Enabled", description="Enabled")
enabled: bool = Field(default=True, title="Enable camera.")
# Options with global fallback
audio: AudioConfig = Field(
default_factory=AudioConfig,
title="Audio events",
description="Settings for audio-based event detection for this camera.",
default_factory=AudioConfig, title="Audio events configuration."
)
audio_transcription: CameraAudioTranscriptionConfig = Field(
default_factory=CameraAudioTranscriptionConfig,
title="Audio transcription",
description="Settings for live and speech audio transcription used for events and live captions.",
title="Audio transcription config.",
)
birdseye: BirdseyeCameraConfig = Field(
default_factory=BirdseyeCameraConfig,
title="Birdseye",
description="Settings for the Birdseye composite view that composes multiple camera feeds into a single layout.",
default_factory=BirdseyeCameraConfig, title="Birdseye camera configuration."
)
detect: DetectConfig = Field(
default_factory=DetectConfig,
title="Object Detection",
description="Settings for the detection/detect role used to run object detection and initialize trackers.",
default_factory=DetectConfig, title="Object detection configuration."
)
face_recognition: CameraFaceRecognitionConfig = Field(
default_factory=CameraFaceRecognitionConfig,
title="Face recognition",
description="Settings for face detection and recognition for this camera.",
)
ffmpeg: CameraFfmpegConfig = Field(
title="FFmpeg",
description="FFmpeg settings including binary path, args, hwaccel options, and per-role output args.",
default_factory=CameraFaceRecognitionConfig, title="Face recognition config."
)
ffmpeg: CameraFfmpegConfig = Field(title="FFmpeg configuration for the camera.")
live: CameraLiveConfig = Field(
default_factory=CameraLiveConfig,
title="Live playback",
description="Settings used by the Web UI to control live stream selection, resolution and quality.",
default_factory=CameraLiveConfig, title="Live playback settings."
)
lpr: CameraLicensePlateRecognitionConfig = Field(
default_factory=CameraLicensePlateRecognitionConfig,
title="License Plate Recognition",
description="License plate recognition settings including detection thresholds, formatting, and known plates.",
)
motion: MotionConfig = Field(
None,
title="Motion detection",
description="Default motion detection settings for this camera.",
default_factory=CameraLicensePlateRecognitionConfig, title="LPR config."
)
motion: MotionConfig = Field(None, title="Motion detection configuration.")
objects: ObjectConfig = Field(
default_factory=ObjectConfig,
title="Objects",
description="Object tracking defaults including which labels to track and per-object filters.",
default_factory=ObjectConfig, title="Object configuration."
)
record: RecordConfig = Field(
default_factory=RecordConfig,
title="Recording",
description="Recording and retention settings for this camera.",
default_factory=RecordConfig, title="Record configuration."
)
review: ReviewConfig = Field(
default_factory=ReviewConfig,
title="Review",
description="Settings that control alerts, detections, and GenAI review summaries used by the UI and storage for this camera.",
default_factory=ReviewConfig, title="Review configuration."
)
semantic_search: CameraSemanticSearchConfig = Field(
default_factory=CameraSemanticSearchConfig,
title="Semantic Search",
description="Settings for semantic search which builds and queries object embeddings to find similar items.",
title="Semantic search configuration.",
)
snapshots: SnapshotsConfig = Field(
default_factory=SnapshotsConfig,
title="Snapshots",
description="Settings for saved JPEG snapshots of tracked objects for this camera.",
default_factory=SnapshotsConfig, title="Snapshot configuration."
)
timestamp_style: TimestampStyleConfig = Field(
default_factory=TimestampStyleConfig,
title="Timestamp style",
description="Styling options for in-feed timestamps applied to recordings and snapshots.",
default_factory=TimestampStyleConfig, title="Timestamp style configuration."
)
# Options without global fallback
best_image_timeout: int = Field(
default=60,
title="Best image timeout",
description="How long to wait for the image with the highest confidence score.",
title="How long to wait for the image with the highest confidence score.",
)
mqtt: CameraMqttConfig = Field(
default_factory=CameraMqttConfig,
title="MQTT",
description="MQTT image publishing settings.",
default_factory=CameraMqttConfig, title="MQTT configuration."
)
notifications: NotificationConfig = Field(
default_factory=NotificationConfig,
title="Notifications",
description="Settings to enable and control notifications for this camera.",
default_factory=NotificationConfig, title="Notifications configuration."
)
onvif: OnvifConfig = Field(
default_factory=OnvifConfig,
title="ONVIF",
description="ONVIF connection and PTZ autotracking settings for this camera.",
)
type: CameraTypeEnum = Field(
default=CameraTypeEnum.generic,
title="Camera type",
description="Camera Type",
default_factory=OnvifConfig, title="Camera Onvif Configuration."
)
type: CameraTypeEnum = Field(default=CameraTypeEnum.generic, title="Camera Type")
ui: CameraUiConfig = Field(
default_factory=CameraUiConfig,
title="Camera UI",
description="Display ordering and visibility for this camera in the UI. Ordering affects the default dashboard. For more granular control, use camera groups.",
default_factory=CameraUiConfig, title="Camera UI Modifications."
)
webui_url: Optional[str] = Field(
None,
title="Camera URL",
description="URL to visit the camera directly from system page",
title="URL to visit the camera directly from system page",
)
zones: dict[str, ZoneConfig] = Field(
default_factory=dict,
title="Zones",
description="Zones allow you to define a specific area of the frame so you can determine whether or not an object is within a particular area.",
default_factory=dict, title="Zone configuration."
)
enabled_in_config: Optional[bool] = Field(
default=None,
title="Original camera state",
description="Keep track of original state of camera.",
default=None, title="Keep track of original state of camera."
)
_ffmpeg_cmds: list[dict[str, list[str]]] = PrivateAttr()
@ -242,14 +186,6 @@ class CameraConfig(FrigateBaseModel):
def create_ffmpeg_cmds(self):
if "_ffmpeg_cmds" in self:
return
self._build_ffmpeg_cmds()
def recreate_ffmpeg_cmds(self):
"""Force regeneration of ffmpeg commands from current config."""
self._build_ffmpeg_cmds()
def _build_ffmpeg_cmds(self):
"""Build ffmpeg commands from the current ffmpeg config."""
ffmpeg_cmds = []
for ffmpeg_input in self.ffmpeg.inputs:
ffmpeg_cmd = self._get_ffmpeg_cmd(ffmpeg_input)

View File

@ -8,82 +8,56 @@ __all__ = ["DetectConfig", "StationaryConfig", "StationaryMaxFramesConfig"]
class StationaryMaxFramesConfig(FrigateBaseModel):
default: Optional[int] = Field(
default=None,
title="Default max frames",
description="Default maximum frames to track a stationary object before stopping.",
ge=1,
)
default: Optional[int] = Field(default=None, title="Default max frames.", ge=1)
objects: dict[str, int] = Field(
default_factory=dict,
title="Object max frames",
description="Per-object overrides for maximum frames to track stationary objects.",
default_factory=dict, title="Object specific max frames."
)
class StationaryConfig(FrigateBaseModel):
interval: Optional[int] = Field(
default=None,
title="Stationary interval",
description="How often (in frames) to run a detection check to confirm a stationary object.",
title="Frame interval for checking stationary objects.",
gt=0,
)
threshold: Optional[int] = Field(
default=None,
title="Stationary threshold",
description="Number of frames with no position change required to mark an object as stationary.",
title="Number of frames without a position change for an object to be considered stationary",
ge=1,
)
max_frames: StationaryMaxFramesConfig = Field(
default_factory=StationaryMaxFramesConfig,
title="Max frames",
description="Limits how long stationary objects are tracked before being discarded.",
title="Max frames for stationary objects.",
)
classifier: bool = Field(
default=True,
title="Enable visual classifier",
description="Use a visual classifier to detect truly stationary objects even when bounding boxes jitter.",
title="Enable visual classifier for determing if objects with jittery bounding boxes are stationary.",
)
class DetectConfig(FrigateBaseModel):
enabled: bool = Field(
default=False,
title="Detection enabled",
description="Enable or disable object detection for all cameras; can be overridden per-camera. Detection must be enabled for object tracking to run.",
)
enabled: bool = Field(default=False, title="Detection Enabled.")
height: Optional[int] = Field(
default=None,
title="Detect height",
description="Height (pixels) of frames used for the detect stream; leave empty to use the native stream resolution.",
default=None, title="Height of the stream for the detect role."
)
width: Optional[int] = Field(
default=None,
title="Detect width",
description="Width (pixels) of frames used for the detect stream; leave empty to use the native stream resolution.",
default=None, title="Width of the stream for the detect role."
)
fps: int = Field(
default=5,
title="Detect FPS",
description="Desired frames per second to run detection on; lower values reduce CPU usage (recommended value is 5, only set higher - at most 10 - if tracking extremely fast moving objects).",
default=5, title="Number of frames per second to process through detection."
)
min_initialized: Optional[int] = Field(
default=None,
title="Minimum initialization frames",
description="Number of consecutive detection hits required before creating a tracked object. Increase to reduce false initializations. Default value is fps divided by 2.",
title="Minimum number of consecutive hits for an object to be initialized by the tracker.",
)
max_disappeared: Optional[int] = Field(
default=None,
title="Maximum disappeared frames",
description="Number of frames without a detection before a tracked object is considered gone.",
title="Maximum number of frames the object can disappear before detection ends.",
)
stationary: StationaryConfig = Field(
default_factory=StationaryConfig,
title="Stationary objects config",
description="Settings to detect and manage objects that remain stationary for a period of time.",
title="Stationary objects config.",
)
annotation_offset: int = Field(
default=0,
title="Annotation offset",
description="Milliseconds to shift detect annotations to better align timeline bounding boxes with recordings; can be positive or negative.",
default=0, title="Milliseconds to offset detect annotations by."
)

View File

@ -35,58 +35,39 @@ DETECT_FFMPEG_OUTPUT_ARGS_DEFAULT = [
class FfmpegOutputArgsConfig(FrigateBaseModel):
detect: Union[str, list[str]] = Field(
default=DETECT_FFMPEG_OUTPUT_ARGS_DEFAULT,
title="Detect output arguments",
description="Default output arguments for detect role streams.",
title="Detect role FFmpeg output arguments.",
)
record: Union[str, list[str]] = Field(
default=RECORD_FFMPEG_OUTPUT_ARGS_DEFAULT,
title="Record output arguments",
description="Default output arguments for record role streams.",
title="Record role FFmpeg output arguments.",
)
class FfmpegConfig(FrigateBaseModel):
path: str = Field(
default="default",
title="FFmpeg path",
description='Path to the FFmpeg binary to use or a version alias ("5.0" or "7.0").',
)
path: str = Field(default="default", title="FFmpeg path")
global_args: Union[str, list[str]] = Field(
default=FFMPEG_GLOBAL_ARGS_DEFAULT,
title="FFmpeg global arguments",
description="Global arguments passed to FFmpeg processes.",
default=FFMPEG_GLOBAL_ARGS_DEFAULT, title="Global FFmpeg arguments."
)
hwaccel_args: Union[str, list[str]] = Field(
default="auto",
title="Hardware acceleration arguments",
description="Hardware acceleration arguments for FFmpeg. Provider-specific presets are recommended.",
default="auto", title="FFmpeg hardware acceleration arguments."
)
input_args: Union[str, list[str]] = Field(
default=FFMPEG_INPUT_ARGS_DEFAULT,
title="Input arguments",
description="Input arguments applied to FFmpeg input streams.",
default=FFMPEG_INPUT_ARGS_DEFAULT, title="FFmpeg input arguments."
)
output_args: FfmpegOutputArgsConfig = Field(
default_factory=FfmpegOutputArgsConfig,
title="Output arguments",
description="Default output arguments used for different FFmpeg roles such as detect and record.",
title="FFmpeg output arguments per role.",
)
retry_interval: float = Field(
default=10.0,
title="FFmpeg retry time",
description="Seconds to wait before attempting to reconnect a camera stream after failure. Default is 10.",
title="Time in seconds to wait before FFmpeg retries connecting to the camera.",
gt=0.0,
)
apple_compatibility: bool = Field(
default=False,
title="Apple compatibility",
description="Enable HEVC tagging for better Apple player compatibility when recording H.265.",
)
gpu: int = Field(
default=0,
title="GPU index",
description="Default GPU index used for hardware acceleration if available.",
title="Set tag on HEVC (H.265) recording stream to improve compatibility with Apple players.",
)
gpu: int = Field(default=0, title="GPU index to use for hardware acceleration.")
@property
def ffmpeg_path(self) -> str:
@ -114,36 +95,21 @@ class CameraRoleEnum(str, Enum):
class CameraInput(FrigateBaseModel):
path: EnvString = Field(
title="Input path",
description="Camera input stream URL or path.",
)
roles: list[CameraRoleEnum] = Field(
title="Input roles",
description="Roles for this input stream.",
)
path: EnvString = Field(title="Camera input path.")
roles: list[CameraRoleEnum] = Field(title="Roles assigned to this input.")
global_args: Union[str, list[str]] = Field(
default_factory=list,
title="FFmpeg global arguments",
description="FFmpeg global arguments for this input stream.",
default_factory=list, title="FFmpeg global arguments."
)
hwaccel_args: Union[str, list[str]] = Field(
default_factory=list,
title="Hardware acceleration arguments",
description="Hardware acceleration arguments for this input stream.",
default_factory=list, title="FFmpeg hardware acceleration arguments."
)
input_args: Union[str, list[str]] = Field(
default_factory=list,
title="Input arguments",
description="Input arguments specific to this stream.",
default_factory=list, title="FFmpeg input arguments."
)
class CameraFfmpegConfig(FfmpegConfig):
inputs: list[CameraInput] = Field(
title="Camera inputs",
description="List of input stream definitions (paths and roles) for this camera.",
)
inputs: list[CameraInput] = Field(title="Camera inputs.")
@field_validator("inputs")
@classmethod

View File

@ -6,7 +6,7 @@ from pydantic import Field
from ..base import FrigateBaseModel
from ..env import EnvString
__all__ = ["GenAIConfig", "GenAIProviderEnum", "GenAIRoleEnum"]
__all__ = ["GenAIConfig", "GenAIProviderEnum"]
class GenAIProviderEnum(str, Enum):
@ -14,56 +14,18 @@ class GenAIProviderEnum(str, Enum):
azure_openai = "azure_openai"
gemini = "gemini"
ollama = "ollama"
llamacpp = "llamacpp"
class GenAIRoleEnum(str, Enum):
tools = "tools"
vision = "vision"
embeddings = "embeddings"
class GenAIConfig(FrigateBaseModel):
"""Primary GenAI Config to define GenAI Provider."""
api_key: Optional[EnvString] = Field(
default=None,
title="API key",
description="API key required by some providers (can also be set via environment variables).",
)
base_url: Optional[str] = Field(
default=None,
title="Base URL",
description="Base URL for self-hosted or compatible providers (for example an Ollama instance).",
)
model: str = Field(
default="gpt-4o",
title="Model",
description="The model to use from the provider for generating descriptions or summaries.",
)
provider: GenAIProviderEnum | None = Field(
default=None,
title="Provider",
description="The GenAI provider to use (for example: ollama, gemini, openai).",
)
roles: list[GenAIRoleEnum] = Field(
default_factory=lambda: [
GenAIRoleEnum.embeddings,
GenAIRoleEnum.vision,
GenAIRoleEnum.tools,
],
title="Roles",
description="GenAI roles (tools, vision, embeddings); one provider per role.",
)
api_key: Optional[EnvString] = Field(default=None, title="Provider API key.")
base_url: Optional[str] = Field(default=None, title="Provider base url.")
model: str = Field(default="gpt-4o", title="GenAI model.")
provider: GenAIProviderEnum | None = Field(default=None, title="GenAI provider.")
provider_options: dict[str, Any] = Field(
default={},
title="Provider options",
description="Additional provider-specific options to pass to the GenAI client.",
json_schema_extra={"additionalProperties": {"type": "string"}},
default={}, title="GenAI Provider extra options."
)
runtime_options: dict[str, Any] = Field(
default={},
title="Runtime options",
description="Runtime options passed to the provider for each inference call.",
json_schema_extra={"additionalProperties": {"type": "string"}},
default={}, title="Options to pass during inference calls."
)

View File

@ -10,18 +10,7 @@ __all__ = ["CameraLiveConfig"]
class CameraLiveConfig(FrigateBaseModel):
streams: Dict[str, str] = Field(
default_factory=list,
title="Live stream names",
description="Mapping of configured stream names to restream/go2rtc names used for live playback.",
)
height: int = Field(
default=720,
title="Live height",
description="Height (pixels) to render the jsmpeg live stream in the Web UI; must be <= detect stream height.",
)
quality: int = Field(
default=8,
ge=1,
le=31,
title="Live quality",
description="Encoding quality for the jsmpeg stream (1 highest, 31 lowest).",
title="Friendly names and restream names to use for live view.",
)
height: int = Field(default=720, title="Live camera view height")
quality: int = Field(default=8, ge=1, le=31, title="Live camera view quality")

View File

@ -1,85 +0,0 @@
"""Mask configuration for motion and object masks."""
from typing import Any, Optional, Union
from pydantic import Field, field_serializer
from ..base import FrigateBaseModel
__all__ = ["MotionMaskConfig", "ObjectMaskConfig"]
class MotionMaskConfig(FrigateBaseModel):
"""Configuration for a single motion mask."""
friendly_name: Optional[str] = Field(
default=None,
title="Friendly name",
description="A friendly name for this motion mask used in the Frigate UI",
)
enabled: bool = Field(
default=True,
title="Enabled",
description="Enable or disable this motion mask",
)
coordinates: Union[str, list[str]] = Field(
default="",
title="Coordinates",
description="Ordered x,y coordinates defining the motion mask polygon used to include/exclude areas.",
)
raw_coordinates: Union[str, list[str]] = ""
enabled_in_config: Optional[bool] = Field(
default=None, title="Keep track of original state of motion mask."
)
def get_formatted_name(self, mask_id: str) -> str:
"""Return the friendly name if set, otherwise return a formatted version of the mask ID."""
if self.friendly_name:
return self.friendly_name
return mask_id.replace("_", " ").title()
@field_serializer("coordinates", when_used="json")
def serialize_coordinates(self, value: Any, info):
return self.raw_coordinates if self.raw_coordinates else value
@field_serializer("raw_coordinates", when_used="json")
def serialize_raw_coordinates(self, value: Any, info):
return None
class ObjectMaskConfig(FrigateBaseModel):
"""Configuration for a single object mask."""
friendly_name: Optional[str] = Field(
default=None,
title="Friendly name",
description="A friendly name for this object mask used in the Frigate UI",
)
enabled: bool = Field(
default=True,
title="Enabled",
description="Enable or disable this object mask",
)
coordinates: Union[str, list[str]] = Field(
default="",
title="Coordinates",
description="Ordered x,y coordinates defining the object mask polygon used to include/exclude areas.",
)
raw_coordinates: Union[str, list[str]] = ""
enabled_in_config: Optional[bool] = Field(
default=None, title="Keep track of original state of object mask."
)
@field_serializer("coordinates", when_used="json")
def serialize_coordinates(self, value: Any, info):
return self.raw_coordinates if self.raw_coordinates else value
@field_serializer("raw_coordinates", when_used="json")
def serialize_raw_coordinates(self, value: Any, info):
return None
def get_formatted_name(self, mask_id: str) -> str:
"""Return the friendly name if set, otherwise return a formatted version of the mask ID."""
if self.friendly_name:
return self.friendly_name
return mask_id.replace("_", " ").title()

View File

@ -1,89 +1,43 @@
from typing import Any, Optional
from typing import Any, Optional, Union
from pydantic import Field, field_serializer
from ..base import FrigateBaseModel
from .mask import MotionMaskConfig
__all__ = ["MotionConfig"]
class MotionConfig(FrigateBaseModel):
enabled: bool = Field(
default=True,
title="Enable motion detection",
description="Enable or disable motion detection for all cameras; can be overridden per-camera.",
)
enabled: bool = Field(default=True, title="Enable motion on all cameras.")
threshold: int = Field(
default=30,
title="Motion threshold",
description="Pixel difference threshold used by the motion detector; higher values reduce sensitivity (range 1-255).",
title="Motion detection threshold (1-255).",
ge=1,
le=255,
)
lightning_threshold: float = Field(
default=0.8,
title="Lightning threshold",
description="Threshold to detect and ignore brief lighting spikes (lower is more sensitive, values between 0.3 and 1.0). This does not prevent motion detection entirely; it merely causes the detector to stop analyzing additional frames once the threshold is exceeded. Motion-based recordings are still created during these events.",
ge=0.3,
le=1.0,
default=0.8, title="Lightning detection threshold (0.3-1.0).", ge=0.3, le=1.0
)
skip_motion_threshold: Optional[float] = Field(
default=None,
title="Skip motion threshold",
description="If set to a value between 0.0 and 1.0, and more than this fraction of the image changes in a single frame, the detector will return no motion boxes and immediately recalibrate. This can save CPU and reduce false positives during lightning, storms, etc., but may miss real events such as a PTZ camera autotracking an object. The tradeoff is between dropping a few megabytes of recordings versus reviewing a couple short clips. Leave unset (None) to disable this feature.",
ge=0.0,
le=1.0,
)
improve_contrast: bool = Field(
default=True,
title="Improve contrast",
description="Apply contrast improvement to frames before motion analysis to help detection.",
)
contour_area: Optional[int] = Field(
default=10,
title="Contour area",
description="Minimum contour area in pixels required for a motion contour to be counted.",
)
delta_alpha: float = Field(
default=0.2,
title="Delta alpha",
description="Alpha blending factor used in frame differencing for motion calculation.",
)
frame_alpha: float = Field(
default=0.01,
title="Frame alpha",
description="Alpha value used when blending frames for motion preprocessing.",
)
frame_height: Optional[int] = Field(
default=100,
title="Frame height",
description="Height in pixels to scale frames to when computing motion.",
)
mask: dict[str, Optional[MotionMaskConfig]] = Field(
default_factory=dict,
title="Mask coordinates",
description="Ordered x,y coordinates defining the motion mask polygon used to include/exclude areas.",
improve_contrast: bool = Field(default=True, title="Improve Contrast")
contour_area: Optional[int] = Field(default=10, title="Contour Area")
delta_alpha: float = Field(default=0.2, title="Delta Alpha")
frame_alpha: float = Field(default=0.01, title="Frame Alpha")
frame_height: Optional[int] = Field(default=100, title="Frame Height")
mask: Union[str, list[str]] = Field(
default="", title="Coordinates polygon for the motion mask."
)
mqtt_off_delay: int = Field(
default=30,
title="MQTT off delay",
description="Seconds to wait after last motion before publishing an MQTT 'off' state.",
title="Delay for updating MQTT with no motion detected.",
)
enabled_in_config: Optional[bool] = Field(
default=None,
title="Original motion state",
description="Indicates whether motion detection was enabled in the original static configuration.",
)
raw_mask: dict[str, Optional[MotionMaskConfig]] = Field(
default_factory=dict, exclude=True
default=None, title="Keep track of original state of motion detection."
)
raw_mask: Union[str, list[str]] = ""
@field_serializer("mask", when_used="json")
def serialize_mask(self, value: Any, info):
if self.raw_mask:
return self.raw_mask
return value
return self.raw_mask
@field_serializer("raw_mask", when_used="json")
def serialize_raw_mask(self, value: Any, info):

View File

@ -6,40 +6,18 @@ __all__ = ["CameraMqttConfig"]
class CameraMqttConfig(FrigateBaseModel):
enabled: bool = Field(
default=True,
title="Send image",
description="Enable publishing image snapshots for objects to MQTT topics for this camera.",
)
timestamp: bool = Field(
default=True,
title="Add timestamp",
description="Overlay a timestamp on images published to MQTT.",
)
bounding_box: bool = Field(
default=True,
title="Add bounding box",
description="Draw bounding boxes on images published over MQTT.",
)
crop: bool = Field(
default=True,
title="Crop image",
description="Crop images published to MQTT to the detected object's bounding box.",
)
height: int = Field(
default=270,
title="Image height",
description="Height (pixels) to resize images published over MQTT.",
)
enabled: bool = Field(default=True, title="Send image over MQTT.")
timestamp: bool = Field(default=True, title="Add timestamp to MQTT image.")
bounding_box: bool = Field(default=True, title="Add bounding box to MQTT image.")
crop: bool = Field(default=True, title="Crop MQTT image to detected object.")
height: int = Field(default=270, title="MQTT image height.")
required_zones: list[str] = Field(
default_factory=list,
title="Required zones",
description="Zones that an object must enter for an MQTT image to be published.",
title="List of required zones to be entered in order to send the image.",
)
quality: int = Field(
default=70,
title="JPEG quality",
description="JPEG quality for images published to MQTT (0-100).",
title="Quality of the encoded jpeg (0-100).",
ge=0,
le=100,
)

View File

@ -8,24 +8,11 @@ __all__ = ["NotificationConfig"]
class NotificationConfig(FrigateBaseModel):
enabled: bool = Field(
default=False,
title="Enable notifications",
description="Enable or disable notifications for all cameras; can be overridden per-camera.",
)
email: Optional[str] = Field(
default=None,
title="Notification email",
description="Email address used for push notifications or required by certain notification providers.",
)
enabled: bool = Field(default=False, title="Enable notifications")
email: Optional[str] = Field(default=None, title="Email required for push.")
cooldown: int = Field(
default=0,
ge=0,
title="Cooldown period",
description="Cooldown (seconds) between notifications to avoid spamming recipients.",
default=0, ge=0, title="Cooldown period for notifications (time in seconds)."
)
enabled_in_config: Optional[bool] = Field(
default=None,
title="Original notifications state",
description="Indicates whether notifications were enabled in the original static configuration.",
default=None, title="Keep track of original state of notifications."
)

View File

@ -3,7 +3,6 @@ from typing import Any, Optional, Union
from pydantic import Field, PrivateAttr, field_serializer, field_validator
from ..base import FrigateBaseModel
from .mask import ObjectMaskConfig
__all__ = ["ObjectConfig", "GenAIObjectConfig", "FilterConfig"]
@ -14,48 +13,36 @@ DEFAULT_TRACKED_OBJECTS = ["person"]
class FilterConfig(FrigateBaseModel):
min_area: Union[int, float] = Field(
default=0,
title="Minimum object area",
description="Minimum bounding box area (pixels or percentage) required for this object type. Can be pixels (int) or percentage (float between 0.000001 and 0.99).",
title="Minimum area of bounding box for object to be counted. Can be pixels (int) or percentage (float between 0.000001 and 0.99).",
)
max_area: Union[int, float] = Field(
default=24000000,
title="Maximum object area",
description="Maximum bounding box area (pixels or percentage) allowed for this object type. Can be pixels (int) or percentage (float between 0.000001 and 0.99).",
title="Maximum area of bounding box for object to be counted. Can be pixels (int) or percentage (float between 0.000001 and 0.99).",
)
min_ratio: float = Field(
default=0,
title="Minimum aspect ratio",
description="Minimum width/height ratio required for the bounding box to qualify.",
title="Minimum ratio of bounding box's width/height for object to be counted.",
)
max_ratio: float = Field(
default=24000000,
title="Maximum aspect ratio",
description="Maximum width/height ratio allowed for the bounding box to qualify.",
title="Maximum ratio of bounding box's width/height for object to be counted.",
)
threshold: float = Field(
default=0.7,
title="Confidence threshold",
description="Average detection confidence threshold required for the object to be considered a true positive.",
title="Average detection confidence threshold for object to be counted.",
)
min_score: float = Field(
default=0.5,
title="Minimum confidence",
description="Minimum single-frame detection confidence required for the object to be counted.",
default=0.5, title="Minimum detection confidence for object to be counted."
)
mask: dict[str, Optional[ObjectMaskConfig]] = Field(
default_factory=dict,
title="Filter mask",
description="Polygon coordinates defining where this filter applies within the frame.",
)
raw_mask: dict[str, Optional[ObjectMaskConfig]] = Field(
default_factory=dict, exclude=True
mask: Optional[Union[str, list[str]]] = Field(
default=None,
title="Detection area polygon mask for this filter configuration.",
)
raw_mask: Union[str, list[str]] = ""
@field_serializer("mask", when_used="json")
def serialize_mask(self, value: Any, info):
if self.raw_mask:
return self.raw_mask
return value
return self.raw_mask
@field_serializer("raw_mask", when_used="json")
def serialize_raw_mask(self, value: Any, info):
@ -64,64 +51,46 @@ class FilterConfig(FrigateBaseModel):
class GenAIObjectTriggerConfig(FrigateBaseModel):
tracked_object_end: bool = Field(
default=True,
title="Send on end",
description="Send a request to GenAI when the tracked object ends.",
default=True, title="Send once the object is no longer tracked."
)
after_significant_updates: Optional[int] = Field(
default=None,
title="Early GenAI trigger",
description="Send a request to GenAI after a specified number of significant updates for the tracked object.",
title="Send an early request to generative AI when X frames accumulated.",
ge=1,
)
class GenAIObjectConfig(FrigateBaseModel):
enabled: bool = Field(
default=False,
title="Enable GenAI",
description="Enable GenAI generation of descriptions for tracked objects by default.",
)
enabled: bool = Field(default=False, title="Enable GenAI for camera.")
use_snapshot: bool = Field(
default=False,
title="Use snapshots",
description="Use object snapshots instead of thumbnails for GenAI description generation.",
default=False, title="Use snapshots for generating descriptions."
)
prompt: str = Field(
default="Analyze the sequence of images containing the {label}. Focus on the likely intent or behavior of the {label} based on its actions and movement, rather than describing its appearance or the surroundings. Consider what the {label} is doing, why, and what it might do next.",
title="Caption prompt",
description="Default prompt template used when generating descriptions with GenAI.",
title="Default caption prompt.",
)
object_prompts: dict[str, str] = Field(
default_factory=dict,
title="Object prompts",
description="Per-object prompts to customize GenAI outputs for specific labels.",
default_factory=dict, title="Object specific prompts."
)
objects: Union[str, list[str]] = Field(
default_factory=list,
title="GenAI objects",
description="List of object labels to send to GenAI by default.",
title="List of objects to run generative AI for.",
)
required_zones: Union[str, list[str]] = Field(
default_factory=list,
title="Required zones",
description="Zones that must be entered for objects to qualify for GenAI description generation.",
title="List of required zones to be entered in order to run generative AI.",
)
debug_save_thumbnails: bool = Field(
default=False,
title="Save thumbnails",
description="Save thumbnails sent to GenAI for debugging and review.",
title="Save thumbnails sent to generative AI for debugging purposes.",
)
send_triggers: GenAIObjectTriggerConfig = Field(
default_factory=GenAIObjectTriggerConfig,
title="GenAI triggers",
description="Defines when frames should be sent to GenAI (on end, after updates, etc.).",
title="What triggers to use to send frames to generative AI for a tracked object.",
)
enabled_in_config: Optional[bool] = Field(
default=None,
title="Original GenAI state",
description="Indicates whether GenAI was enabled in the original static config.",
default=None, title="Keep track of original state of generative AI."
)
@field_validator("required_zones", mode="before")
@ -134,28 +103,14 @@ class GenAIObjectConfig(FrigateBaseModel):
class ObjectConfig(FrigateBaseModel):
track: list[str] = Field(
default=DEFAULT_TRACKED_OBJECTS,
title="Objects to track",
description="List of object labels to track for all cameras; can be overridden per-camera.",
)
track: list[str] = Field(default=DEFAULT_TRACKED_OBJECTS, title="Objects to track.")
filters: dict[str, FilterConfig] = Field(
default_factory=dict,
title="Object filters",
description="Filters applied to detected objects to reduce false positives (area, ratio, confidence).",
)
mask: dict[str, Optional[ObjectMaskConfig]] = Field(
default_factory=dict,
title="Object mask",
description="Mask polygon used to prevent object detection in specified areas.",
)
raw_mask: dict[str, Optional[ObjectMaskConfig]] = Field(
default_factory=dict, exclude=True
default_factory=dict, title="Object filters."
)
mask: Union[str, list[str]] = Field(default="", title="Object mask.")
genai: GenAIObjectConfig = Field(
default_factory=GenAIObjectConfig,
title="GenAI object config",
description="GenAI options for describing tracked objects and sending frames for generation.",
title="Config for using genai to analyze objects.",
)
_all_objects: list[str] = PrivateAttr()
@ -174,13 +129,3 @@ class ObjectConfig(FrigateBaseModel):
enabled_labels.update(camera.objects.track)
self._all_objects = list(enabled_labels)
@field_serializer("mask", when_used="json")
def serialize_mask(self, value: Any, info):
if self.raw_mask:
return self.raw_mask
return value
@field_serializer("raw_mask", when_used="json")
def serialize_raw_mask(self, value: Any, info):
return None

View File

@ -17,57 +17,37 @@ class ZoomingModeEnum(str, Enum):
class PtzAutotrackConfig(FrigateBaseModel):
enabled: bool = Field(
default=False,
title="Enable Autotracking",
description="Enable or disable automatic PTZ camera tracking of detected objects.",
)
enabled: bool = Field(default=False, title="Enable PTZ object autotracking.")
calibrate_on_startup: bool = Field(
default=False,
title="Calibrate on start",
description="Measure PTZ motor speeds on startup to improve tracking accuracy. Frigate will update config with movement_weights after calibration.",
default=False, title="Perform a camera calibration when Frigate starts."
)
zooming: ZoomingModeEnum = Field(
default=ZoomingModeEnum.disabled,
title="Zoom mode",
description="Control zoom behavior: disabled (pan/tilt only), absolute (most compatible), or relative (concurrent pan/tilt/zoom).",
default=ZoomingModeEnum.disabled, title="Autotracker zooming mode."
)
zoom_factor: float = Field(
default=0.3,
title="Zoom factor",
description="Control zoom level on tracked objects. Lower values keep more scene in view; higher values zoom in closer but may lose tracking. Values between 0.1 and 0.75.",
title="Zooming factor (0.1-0.75).",
ge=0.1,
le=0.75,
)
track: list[str] = Field(
default=DEFAULT_TRACKED_OBJECTS,
title="Tracked objects",
description="List of object types that should trigger autotracking.",
)
track: list[str] = Field(default=DEFAULT_TRACKED_OBJECTS, title="Objects to track.")
required_zones: list[str] = Field(
default_factory=list,
title="Required zones",
description="Objects must enter one of these zones before autotracking begins.",
title="List of required zones to be entered in order to begin autotracking.",
)
return_preset: str = Field(
default="home",
title="Return preset",
description="ONVIF preset name configured in camera firmware to return to after tracking ends.",
title="Name of camera preset to return to when object tracking is over.",
)
timeout: int = Field(
default=10,
title="Return timeout",
description="Wait this many seconds after losing tracking before returning camera to preset position.",
default=10, title="Seconds to delay before returning to preset."
)
movement_weights: Optional[Union[str, list[str]]] = Field(
default_factory=list,
title="Movement weights",
description="Calibration values automatically generated by camera calibration. Do not modify manually.",
title="Internal value used for PTZ movements based on the speed of your camera's motor.",
)
enabled_in_config: Optional[bool] = Field(
default=None,
title="Original autotrack state",
description="Internal field to track whether autotracking was enabled in configuration.",
default=None, title="Keep track of original state of autotracking."
)
@field_validator("movement_weights", mode="before")
@ -92,38 +72,16 @@ class PtzAutotrackConfig(FrigateBaseModel):
class OnvifConfig(FrigateBaseModel):
host: str = Field(
default="",
title="ONVIF host",
description="Host (and optional scheme) for the ONVIF service for this camera.",
)
port: int = Field(
default=8000,
title="ONVIF port",
description="Port number for the ONVIF service.",
)
user: Optional[EnvString] = Field(
default=None,
title="ONVIF username",
description="Username for ONVIF authentication; some devices require admin user for ONVIF.",
)
password: Optional[EnvString] = Field(
default=None,
title="ONVIF password",
description="Password for ONVIF authentication.",
)
tls_insecure: bool = Field(
default=False,
title="Disable TLS verify",
description="Skip TLS verification and disable digest auth for ONVIF (unsafe; use in safe networks only).",
)
host: str = Field(default="", title="Onvif Host")
port: int = Field(default=8000, title="Onvif Port")
user: Optional[EnvString] = Field(default=None, title="Onvif Username")
password: Optional[EnvString] = Field(default=None, title="Onvif Password")
tls_insecure: bool = Field(default=False, title="Onvif Disable TLS verification")
autotracking: PtzAutotrackConfig = Field(
default_factory=PtzAutotrackConfig,
title="Autotracking",
description="Automatically track moving objects and keep them centered in the frame using PTZ camera movements.",
title="PTZ auto tracking config.",
)
ignore_time_mismatch: bool = Field(
default=False,
title="Ignore time mismatch",
description="Ignore time synchronization differences between camera and Frigate server for ONVIF communication.",
title="Onvif Ignore Time Synchronization Mismatch Between Camera and Server",
)

View File

@ -1,5 +1,5 @@
from enum import Enum
from typing import Optional, Union
from typing import Optional
from pydantic import Field
@ -19,14 +19,11 @@ __all__ = [
"RetainModeEnum",
]
DEFAULT_TIME_LAPSE_FFMPEG_ARGS = "-vf setpts=0.04*PTS -r 30"
class RecordRetainConfig(FrigateBaseModel):
days: float = Field(
default=0,
ge=0,
title="Retention days",
description="Days to retain recordings.",
)
days: float = Field(default=0, ge=0, title="Default retention period.")
class RetainModeEnum(str, Enum):
@ -36,37 +33,22 @@ class RetainModeEnum(str, Enum):
class ReviewRetainConfig(FrigateBaseModel):
days: float = Field(
default=10,
ge=0,
title="Retention days",
description="Number of days to retain recordings of detection events.",
)
mode: RetainModeEnum = Field(
default=RetainModeEnum.motion,
title="Retention mode",
description="Mode for retention: all (save all segments), motion (save segments with motion), or active_objects (save segments with active objects).",
)
days: float = Field(default=10, ge=0, title="Default retention period.")
mode: RetainModeEnum = Field(default=RetainModeEnum.motion, title="Retain mode.")
class EventsConfig(FrigateBaseModel):
pre_capture: int = Field(
default=5,
title="Pre-capture seconds",
description="Number of seconds before the detection event to include in the recording.",
title="Seconds to retain before event starts.",
le=MAX_PRE_CAPTURE,
ge=0,
)
post_capture: int = Field(
default=5,
ge=0,
title="Post-capture seconds",
description="Number of seconds after the detection event to include in the recording.",
default=5, ge=0, title="Seconds to retain after event ends."
)
retain: ReviewRetainConfig = Field(
default_factory=ReviewRetainConfig,
title="Event retention",
description="Retention settings for recordings of detection events.",
default_factory=ReviewRetainConfig, title="Event retention settings."
)
@ -80,65 +62,46 @@ class RecordQualityEnum(str, Enum):
class RecordPreviewConfig(FrigateBaseModel):
quality: RecordQualityEnum = Field(
default=RecordQualityEnum.medium,
title="Preview quality",
description="Preview quality level (very_low, low, medium, high, very_high).",
default=RecordQualityEnum.medium, title="Quality of recording preview."
)
class RecordExportConfig(FrigateBaseModel):
hwaccel_args: Union[str, list[str]] = Field(
default="auto",
title="Export hwaccel args",
description="Hardware acceleration args to use for export/transcode operations.",
timelapse_args: str = Field(
default=DEFAULT_TIME_LAPSE_FFMPEG_ARGS, title="Timelapse Args"
)
class RecordConfig(FrigateBaseModel):
enabled: bool = Field(
default=False,
title="Enable recording",
description="Enable or disable recording for all cameras; can be overridden per-camera.",
enabled: bool = Field(default=False, title="Enable record on all cameras.")
sync_recordings: bool = Field(
default=False, title="Sync recordings with disk on startup and once a day."
)
expire_interval: int = Field(
default=60,
title="Record cleanup interval",
description="Minutes between cleanup passes that remove expired recording segments.",
title="Number of minutes to wait between cleanup runs.",
)
continuous: RecordRetainConfig = Field(
default_factory=RecordRetainConfig,
title="Continuous retention",
description="Number of days to retain recordings regardless of tracked objects or motion. Set to 0 if you only want to retain recordings of alerts and detections.",
title="Continuous recording retention settings.",
)
motion: RecordRetainConfig = Field(
default_factory=RecordRetainConfig,
title="Motion retention",
description="Number of days to retain recordings triggered by motion regardless of tracked objects. Set to 0 if you only want to retain recordings of alerts and detections.",
default_factory=RecordRetainConfig, title="Motion recording retention settings."
)
detections: EventsConfig = Field(
default_factory=EventsConfig,
title="Detection retention",
description="Recording retention settings for detection events including pre/post capture durations.",
default_factory=EventsConfig, title="Detection specific retention settings."
)
alerts: EventsConfig = Field(
default_factory=EventsConfig,
title="Alert retention",
description="Recording retention settings for alert events including pre/post capture durations.",
default_factory=EventsConfig, title="Alert specific retention settings."
)
export: RecordExportConfig = Field(
default_factory=RecordExportConfig,
title="Export config",
description="Settings used when exporting recordings such as timelapse and hardware acceleration.",
default_factory=RecordExportConfig, title="Recording Export Config"
)
preview: RecordPreviewConfig = Field(
default_factory=RecordPreviewConfig,
title="Preview config",
description="Settings controlling the quality of recording previews shown in the UI.",
default_factory=RecordPreviewConfig, title="Recording Preview Config"
)
enabled_in_config: Optional[bool] = Field(
default=None,
title="Original recording state",
description="Indicates whether recording was enabled in the original static configuration.",
default=None, title="Keep track of original state of recording."
)
@property

View File

@ -21,32 +21,22 @@ DEFAULT_ALERT_OBJECTS = ["person", "car"]
class AlertsConfig(FrigateBaseModel):
"""Configure alerts"""
enabled: bool = Field(
default=True,
title="Enable alerts",
description="Enable or disable alert generation for all cameras; can be overridden per-camera.",
)
enabled: bool = Field(default=True, title="Enable alerts.")
labels: list[str] = Field(
default=DEFAULT_ALERT_OBJECTS,
title="Alert labels",
description="List of object labels that qualify as alerts (for example: car, person).",
default=DEFAULT_ALERT_OBJECTS, title="Labels to create alerts for."
)
required_zones: Union[str, list[str]] = Field(
default_factory=list,
title="Required zones",
description="Zones that an object must enter to be considered an alert; leave empty to allow any zone.",
title="List of required zones to be entered in order to save the event as an alert.",
)
enabled_in_config: Optional[bool] = Field(
default=None,
title="Original alerts state",
description="Tracks whether alerts were originally enabled in the static configuration.",
default=None, title="Keep track of original state of alerts."
)
cutoff_time: int = Field(
default=40,
title="Alerts cutoff time",
description="Seconds to wait after no alert-causing activity before cutting off an alert.",
title="Time to cutoff alerts after no alert-causing activity has occurred.",
)
@field_validator("required_zones", mode="before")
@ -61,32 +51,22 @@ class AlertsConfig(FrigateBaseModel):
class DetectionsConfig(FrigateBaseModel):
"""Configure detections"""
enabled: bool = Field(
default=True,
title="Enable detections",
description="Enable or disable detection events for all cameras; can be overridden per-camera.",
)
enabled: bool = Field(default=True, title="Enable detections.")
labels: Optional[list[str]] = Field(
default=None,
title="Detection labels",
description="List of object labels that qualify as detection events.",
default=None, title="Labels to create detections for."
)
required_zones: Union[str, list[str]] = Field(
default_factory=list,
title="Required zones",
description="Zones that an object must enter to be considered a detection; leave empty to allow any zone.",
title="List of required zones to be entered in order to save the event as a detection.",
)
cutoff_time: int = Field(
default=30,
title="Detections cutoff time",
description="Seconds to wait after no detection-causing activity before cutting off a detection.",
title="Time to cutoff detection after no detection-causing activity has occurred.",
)
enabled_in_config: Optional[bool] = Field(
default=None,
title="Original detections state",
description="Tracks whether detections were originally enabled in the static configuration.",
default=None, title="Keep track of original state of detections."
)
@field_validator("required_zones", mode="before")
@ -101,42 +81,27 @@ class DetectionsConfig(FrigateBaseModel):
class GenAIReviewConfig(FrigateBaseModel):
enabled: bool = Field(
default=False,
title="Enable GenAI descriptions",
description="Enable or disable GenAI-generated descriptions and summaries for review items.",
)
alerts: bool = Field(
default=True,
title="Enable GenAI for alerts",
description="Use GenAI to generate descriptions for alert items.",
)
detections: bool = Field(
default=False,
title="Enable GenAI for detections",
description="Use GenAI to generate descriptions for detection items.",
title="Enable GenAI descriptions for review items.",
)
alerts: bool = Field(default=True, title="Enable GenAI for alerts.")
detections: bool = Field(default=False, title="Enable GenAI for detections.")
image_source: ImageSourceEnum = Field(
default=ImageSourceEnum.preview,
title="Review image source",
description="Source of images sent to GenAI ('preview' or 'recordings'); 'recordings' uses higher quality frames but more tokens.",
title="Image source for review descriptions.",
)
additional_concerns: list[str] = Field(
default=[],
title="Additional concerns",
description="A list of additional concerns or notes the GenAI should consider when evaluating activity on this camera.",
title="Additional concerns that GenAI should make note of on this camera.",
)
debug_save_thumbnails: bool = Field(
default=False,
title="Save thumbnails",
description="Save thumbnails that are sent to the GenAI provider for debugging and review.",
title="Save thumbnails sent to generative AI for debugging purposes.",
)
enabled_in_config: Optional[bool] = Field(
default=None,
title="Original GenAI state",
description="Tracks whether GenAI review was originally enabled in the static configuration.",
default=None, title="Keep track of original state of generative AI."
)
preferred_language: str | None = Field(
title="Preferred language",
description="Preferred language to request from the GenAI provider for generated responses.",
title="Preferred language for GenAI Response",
default=None,
)
activity_context_prompt: str = Field(
@ -174,24 +139,19 @@ Evaluate in this order:
3. **Escalate to Level 2 if:** Weapons, break-in tools, forced entry in progress, violence, or active property damage visible (escalates from Level 0 or 1)
The mere presence of an unidentified person in private areas during late night hours is inherently suspicious and warrants human review, regardless of what activity they appear to be doing or how brief the sequence is.""",
title="Activity context prompt",
description="Custom prompt describing what is and is not suspicious activity to provide context for GenAI summaries.",
title="Custom activity context prompt defining normal and suspicious activity patterns for this property.",
)
class ReviewConfig(FrigateBaseModel):
"""Configure reviews"""
alerts: AlertsConfig = Field(
default_factory=AlertsConfig,
title="Alerts config",
description="Settings for which tracked objects generate alerts and how alerts are retained.",
default_factory=AlertsConfig, title="Review alerts config."
)
detections: DetectionsConfig = Field(
default_factory=DetectionsConfig,
title="Detections config",
description="Settings for creating detection events (non-alert) and how long to keep them.",
default_factory=DetectionsConfig, title="Review detections config."
)
genai: GenAIReviewConfig = Field(
default_factory=GenAIReviewConfig,
title="GenAI config",
description="Controls use of generative AI for producing descriptions and summaries of review items.",
default_factory=GenAIReviewConfig, title="Review description genai config."
)

View File

@ -9,68 +9,36 @@ __all__ = ["SnapshotsConfig", "RetainConfig"]
class RetainConfig(FrigateBaseModel):
default: float = Field(
default=10,
title="Default retention",
description="Default number of days to retain snapshots.",
)
mode: RetainModeEnum = Field(
default=RetainModeEnum.motion,
title="Retention mode",
description="Mode for retention: all (save all segments), motion (save segments with motion), or active_objects (save segments with active objects).",
)
default: float = Field(default=10, title="Default retention period.")
mode: RetainModeEnum = Field(default=RetainModeEnum.motion, title="Retain mode.")
objects: dict[str, float] = Field(
default_factory=dict,
title="Object retention",
description="Per-object overrides for snapshot retention days.",
default_factory=dict, title="Object retention period."
)
class SnapshotsConfig(FrigateBaseModel):
enabled: bool = Field(
default=False,
title="Snapshots enabled",
description="Enable or disable saving snapshots for all cameras; can be overridden per-camera.",
)
enabled: bool = Field(default=False, title="Snapshots enabled.")
clean_copy: bool = Field(
default=True,
title="Save clean copy",
description="Save an unannotated clean copy of snapshots in addition to annotated ones.",
default=True, title="Create a clean copy of the snapshot image."
)
timestamp: bool = Field(
default=False,
title="Timestamp overlay",
description="Overlay a timestamp on saved snapshots.",
default=False, title="Add a timestamp overlay on the snapshot."
)
bounding_box: bool = Field(
default=True,
title="Bounding box overlay",
description="Draw bounding boxes for tracked objects on saved snapshots.",
)
crop: bool = Field(
default=False,
title="Crop snapshot",
description="Crop saved snapshots to the detected object's bounding box.",
default=True, title="Add a bounding box overlay on the snapshot."
)
crop: bool = Field(default=False, title="Crop the snapshot to the detected object.")
required_zones: list[str] = Field(
default_factory=list,
title="Required zones",
description="Zones an object must enter for a snapshot to be saved.",
)
height: Optional[int] = Field(
default=None,
title="Snapshot height",
description="Height (pixels) to resize saved snapshots to; leave empty to preserve original size.",
title="List of required zones to be entered in order to save a snapshot.",
)
height: Optional[int] = Field(default=None, title="Snapshot image height.")
retain: RetainConfig = Field(
default_factory=RetainConfig,
title="Snapshot retention",
description="Retention settings for saved snapshots including default days and per-object overrides.",
default_factory=RetainConfig, title="Snapshot retention."
)
quality: int = Field(
default=70,
title="JPEG quality",
description="JPEG encode quality for saved snapshots (0-100).",
title="Quality of the encoded jpeg (0-100).",
ge=0,
le=100,
)

View File

@ -27,27 +27,9 @@ class TimestampPositionEnum(str, Enum):
class ColorConfig(FrigateBaseModel):
red: int = Field(
default=255,
ge=0,
le=255,
title="Red",
description="Red component (0-255) for timestamp color.",
)
green: int = Field(
default=255,
ge=0,
le=255,
title="Green",
description="Green component (0-255) for timestamp color.",
)
blue: int = Field(
default=255,
ge=0,
le=255,
title="Blue",
description="Blue component (0-255) for timestamp color.",
)
red: int = Field(default=255, ge=0, le=255, title="Red")
green: int = Field(default=255, ge=0, le=255, title="Green")
blue: int = Field(default=255, ge=0, le=255, title="Blue")
class TimestampEffectEnum(str, Enum):
@ -57,27 +39,11 @@ class TimestampEffectEnum(str, Enum):
class TimestampStyleConfig(FrigateBaseModel):
position: TimestampPositionEnum = Field(
default=TimestampPositionEnum.tl,
title="Timestamp position",
description="Position of the timestamp on the image (tl/tr/bl/br).",
)
format: str = Field(
default=DEFAULT_TIME_FORMAT,
title="Timestamp format",
description="Datetime format string used for timestamps (Python datetime format codes).",
)
color: ColorConfig = Field(
default_factory=ColorConfig,
title="Timestamp color",
description="RGB color values for the timestamp text (all values 0-255).",
)
thickness: int = Field(
default=2,
title="Timestamp thickness",
description="Line thickness of the timestamp text.",
default=TimestampPositionEnum.tl, title="Timestamp position."
)
format: str = Field(default=DEFAULT_TIME_FORMAT, title="Timestamp format.")
color: ColorConfig = Field(default_factory=ColorConfig, title="Timestamp color.")
thickness: int = Field(default=2, title="Timestamp thickness.")
effect: Optional[TimestampEffectEnum] = Field(
default=None,
title="Timestamp effect",
description="Visual effect for the timestamp text (none, solid, shadow).",
default=None, title="Timestamp effect."
)

View File

@ -6,13 +6,7 @@ __all__ = ["CameraUiConfig"]
class CameraUiConfig(FrigateBaseModel):
order: int = Field(
default=0,
title="UI order",
description="Numeric order used to sort the camera in the UI (default dashboard and lists); larger numbers appear later.",
)
order: int = Field(default=0, title="Order of camera in UI.")
dashboard: bool = Field(
default=True,
title="Show in UI",
description="Toggle whether this camera is visible everywhere in the Frigate UI. Disabling this will require manually editing the config to view this camera in the UI again.",
default=True, title="Show this camera in Frigate dashboard UI."
)

View File

@ -17,7 +17,6 @@ class CameraConfigUpdateEnum(str, Enum):
birdseye = "birdseye"
detect = "detect"
enabled = "enabled"
ffmpeg = "ffmpeg"
motion = "motion" # includes motion and motion masks
notifications = "notifications"
objects = "objects"
@ -81,8 +80,8 @@ class CameraConfigUpdateSubscriber:
self.camera_configs[camera] = updated_config
return
elif update_type == CameraConfigUpdateEnum.remove:
self.config.cameras.pop(camera, None)
self.camera_configs.pop(camera, None)
self.config.cameras.pop(camera)
self.camera_configs.pop(camera)
return
config = self.camera_configs.get(camera)
@ -92,9 +91,6 @@ class CameraConfigUpdateSubscriber:
if update_type == CameraConfigUpdateEnum.audio:
config.audio = updated_config
elif update_type == CameraConfigUpdateEnum.ffmpeg:
config.ffmpeg = updated_config
config.recreate_ffmpeg_cmds()
elif update_type == CameraConfigUpdateEnum.audio_transcription:
config.audio_transcription = updated_config
elif update_type == CameraConfigUpdateEnum.birdseye:

Some files were not shown because too many files have changed in this diff Show More