Compare commits

..

1 Commits

Author SHA1 Message Date
dependabot[bot]
0e31394a20
Bump lodash-es from 4.17.23 to 4.18.1 in /web
Bumps [lodash-es](https://github.com/lodash/lodash) from 4.17.23 to 4.18.1.
- [Release notes](https://github.com/lodash/lodash/releases)
- [Commits](https://github.com/lodash/lodash/compare/4.17.23...4.18.1)

---
updated-dependencies:
- dependency-name: lodash-es
  dependency-version: 4.18.1
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
2026-04-02 13:46:10 +00:00
438 changed files with 2646 additions and 28023 deletions

View File

@ -16,7 +16,7 @@ jobs:
uses: actions/github-script@v7 uses: actions/github-script@v7
with: with:
script: | script: |
const maintainers = ['blakeblackshear', 'NickM-27', 'hawkeye217', 'dependabot[bot]', 'weblate']; const maintainers = ['blakeblackshear', 'NickM-27', 'hawkeye217', 'dependabot[bot]'];
const author = context.payload.pull_request.user.login; const author = context.payload.pull_request.user.login;
if (maintainers.includes(author)) { if (maintainers.includes(author)) {

View File

@ -50,37 +50,6 @@ jobs:
# run: npm run test # run: npm run test
# working-directory: ./web # working-directory: ./web
web_e2e:
name: Web - E2E Tests
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v6
with:
persist-credentials: false
- uses: actions/setup-node@v6
with:
node-version: 20.x
- run: npm install
working-directory: ./web
- name: Install Playwright Chromium
run: npx playwright install chromium --with-deps
working-directory: ./web
- name: Build web for E2E
run: npm run e2e:build
working-directory: ./web
- name: Run E2E tests
run: npm run e2e
working-directory: ./web
- name: Upload test artifacts
uses: actions/upload-artifact@v4
if: failure()
with:
name: playwright-report
path: |
web/test-results/
web/playwright-report/
retention-days: 7
python_checks: python_checks:
runs-on: ubuntu-latest runs-on: ubuntu-latest
name: Python Checks name: Python Checks

View File

@ -14,8 +14,6 @@ services:
dockerfile: docker/main/Dockerfile dockerfile: docker/main/Dockerfile
# Use target devcontainer-trt for TensorRT dev # Use target devcontainer-trt for TensorRT dev
target: devcontainer target: devcontainer
cache_from:
- ghcr.io/blakeblackshear/frigate:cache-amd64
## Uncomment this block for nvidia gpu support ## Uncomment this block for nvidia gpu support
# deploy: # deploy:
# resources: # resources:

View File

@ -52,14 +52,6 @@ RUN --mount=type=tmpfs,target=/tmp --mount=type=tmpfs,target=/var/cache/apt \
--mount=type=cache,target=/root/.ccache \ --mount=type=cache,target=/root/.ccache \
/deps/build_sqlite_vec.sh /deps/build_sqlite_vec.sh
# Build intel-media-driver from source against bookworm's system libva so it
# works with Debian 12's glibc/libstdc++ (pre-built noble/trixie packages
# require glibc 2.38 which is not available on bookworm).
FROM base AS intel-media-driver
ARG DEBIAN_FRONTEND
RUN --mount=type=bind,source=docker/main/build_intel_media_driver.sh,target=/deps/build_intel_media_driver.sh \
/deps/build_intel_media_driver.sh
FROM scratch AS go2rtc FROM scratch AS go2rtc
ARG TARGETARCH ARG TARGETARCH
WORKDIR /rootfs/usr/local/go2rtc/bin WORKDIR /rootfs/usr/local/go2rtc/bin
@ -208,7 +200,6 @@ RUN --mount=type=bind,source=docker/main/install_hailort.sh,target=/deps/install
FROM scratch AS deps-rootfs FROM scratch AS deps-rootfs
COPY --from=nginx /usr/local/nginx/ /usr/local/nginx/ COPY --from=nginx /usr/local/nginx/ /usr/local/nginx/
COPY --from=sqlite-vec /usr/local/lib/ /usr/local/lib/ COPY --from=sqlite-vec /usr/local/lib/ /usr/local/lib/
COPY --from=intel-media-driver /rootfs/ /
COPY --from=go2rtc /rootfs/ / COPY --from=go2rtc /rootfs/ /
COPY --from=libusb-build /usr/local/lib /usr/local/lib COPY --from=libusb-build /usr/local/lib /usr/local/lib
COPY --from=tempio /rootfs/ / COPY --from=tempio /rootfs/ /

View File

@ -1,48 +0,0 @@
#!/bin/bash
set -euxo pipefail
# Intel media driver is x86_64-only. Create empty rootfs on other arches so
# the downstream COPY --from has a valid source.
if [ "$(uname -m)" != "x86_64" ]; then
mkdir -p /rootfs
exit 0
fi
MEDIA_DRIVER_VERSION="intel-media-25.2.6"
GMMLIB_VERSION="intel-gmmlib-22.7.2"
apt-get -qq update
apt-get -qq install -y wget gnupg ca-certificates cmake g++ make pkg-config
# Use Intel's jammy repo for newer libva-dev (2.22) which provides the
# VVC/VVC-decode headers required by media-driver 25.x
wget -qO - https://repositories.intel.com/gpu/intel-graphics.key | gpg --yes --dearmor --output /usr/share/keyrings/intel-graphics.gpg
echo "deb [arch=amd64 signed-by=/usr/share/keyrings/intel-graphics.gpg] https://repositories.intel.com/gpu/ubuntu jammy client" > /etc/apt/sources.list.d/intel-gpu-jammy.list
apt-get -qq update
apt-get -qq install -y libva-dev
# Build gmmlib (required by media-driver)
wget -qO gmmlib.tar.gz "https://github.com/intel/gmmlib/archive/refs/tags/${GMMLIB_VERSION}.tar.gz"
mkdir /tmp/gmmlib
tar -xf gmmlib.tar.gz -C /tmp/gmmlib --strip-components 1
cmake -S /tmp/gmmlib -B /tmp/gmmlib/build -DCMAKE_BUILD_TYPE=Release
make -C /tmp/gmmlib/build -j"$(nproc)"
make -C /tmp/gmmlib/build install
# Build intel-media-driver
wget -qO media-driver.tar.gz "https://github.com/intel/media-driver/archive/refs/tags/${MEDIA_DRIVER_VERSION}.tar.gz"
mkdir /tmp/media-driver
tar -xf media-driver.tar.gz -C /tmp/media-driver --strip-components 1
cmake -S /tmp/media-driver -B /tmp/media-driver/build \
-DCMAKE_BUILD_TYPE=Release \
-DENABLE_KERNELS=ON \
-DENABLE_NONFREE_KERNELS=ON \
-DCMAKE_INSTALL_PREFIX=/usr \
-DCMAKE_INSTALL_LIBDIR=/usr/lib/x86_64-linux-gnu \
-DCMAKE_C_FLAGS="-Wno-error" \
-DCMAKE_CXX_FLAGS="-Wno-error"
make -C /tmp/media-driver/build -j"$(nproc)"
# Install driver to rootfs for COPY --from
make -C /tmp/media-driver/build install DESTDIR=/rootfs

View File

@ -87,47 +87,38 @@ if [[ "${TARGETARCH}" == "amd64" ]]; then
# intel packages use zst compression so we need to update dpkg # intel packages use zst compression so we need to update dpkg
apt-get install -y dpkg apt-get install -y dpkg
# use intel apt repo for libmfx1 (legacy QSV, pre-Gen12) # use intel apt intel packages
wget -qO - https://repositories.intel.com/gpu/intel-graphics.key | gpg --yes --dearmor --output /usr/share/keyrings/intel-graphics.gpg wget -qO - https://repositories.intel.com/gpu/intel-graphics.key | gpg --yes --dearmor --output /usr/share/keyrings/intel-graphics.gpg
echo "deb [arch=amd64 signed-by=/usr/share/keyrings/intel-graphics.gpg] https://repositories.intel.com/gpu/ubuntu jammy client" | tee /etc/apt/sources.list.d/intel-gpu-jammy.list echo "deb [arch=amd64 signed-by=/usr/share/keyrings/intel-graphics.gpg] https://repositories.intel.com/gpu/ubuntu jammy client" | tee /etc/apt/sources.list.d/intel-gpu-jammy.list
apt-get -qq update apt-get -qq update
# intel-media-va-driver-non-free is built from source in the
# intel-media-driver Dockerfile stage for Battlemage (Xe2) support
apt-get -qq install --no-install-recommends --no-install-suggests -y \ apt-get -qq install --no-install-recommends --no-install-suggests -y \
libmfx1 intel-media-va-driver-non-free libmfx1 libmfxgen1 libvpl2
rm -f /usr/share/keyrings/intel-graphics.gpg
rm -f /etc/apt/sources.list.d/intel-gpu-jammy.list
# upgrade libva2, oneVPL runtime, and libvpl2 from trixie for Battlemage support
echo "deb http://deb.debian.org/debian trixie main" > /etc/apt/sources.list.d/trixie.list
apt-get -qq update
apt-get -qq install -y -t trixie libva2 libva-drm2 libzstd1
apt-get -qq install -y -t trixie libmfx-gen1.2 libvpl2
rm -f /etc/apt/sources.list.d/trixie.list
apt-get -qq update
apt-get -qq install -y ocl-icd-libopencl1 apt-get -qq install -y ocl-icd-libopencl1
# install libtbb12 for NPU support # install libtbb12 for NPU support
apt-get -qq install -y libtbb12 apt-get -qq install -y libtbb12
# install legacy and standard intel compute packages rm -f /usr/share/keyrings/intel-graphics.gpg
rm -f /etc/apt/sources.list.d/intel-gpu-jammy.list
# install legacy and standard intel icd and level-zero-gpu
# see https://github.com/intel/compute-runtime/blob/master/LEGACY_PLATFORMS.md for more info # see https://github.com/intel/compute-runtime/blob/master/LEGACY_PLATFORMS.md for more info
# needed core package # needed core package
wget https://github.com/intel/compute-runtime/releases/download/26.14.37833.4/libigdgmm12_22.9.0_amd64.deb wget https://github.com/intel/compute-runtime/releases/download/25.13.33276.19/libigdgmm12_22.7.0_amd64.deb
dpkg -i libigdgmm12_22.9.0_amd64.deb dpkg -i libigdgmm12_22.7.0_amd64.deb
rm libigdgmm12_22.9.0_amd64.deb rm libigdgmm12_22.7.0_amd64.deb
# legacy compute-runtime packages # legacy packages
wget https://github.com/intel/compute-runtime/releases/download/24.35.30872.36/intel-opencl-icd-legacy1_24.35.30872.36_amd64.deb wget https://github.com/intel/compute-runtime/releases/download/24.35.30872.36/intel-opencl-icd-legacy1_24.35.30872.36_amd64.deb
wget https://github.com/intel/compute-runtime/releases/download/24.35.30872.36/intel-level-zero-gpu-legacy1_1.5.30872.36_amd64.deb wget https://github.com/intel/compute-runtime/releases/download/24.35.30872.36/intel-level-zero-gpu-legacy1_1.5.30872.36_amd64.deb
wget https://github.com/intel/intel-graphics-compiler/releases/download/igc-1.0.17537.24/intel-igc-opencl_1.0.17537.24_amd64.deb wget https://github.com/intel/intel-graphics-compiler/releases/download/igc-1.0.17537.24/intel-igc-opencl_1.0.17537.24_amd64.deb
wget https://github.com/intel/intel-graphics-compiler/releases/download/igc-1.0.17537.24/intel-igc-core_1.0.17537.24_amd64.deb wget https://github.com/intel/intel-graphics-compiler/releases/download/igc-1.0.17537.24/intel-igc-core_1.0.17537.24_amd64.deb
# standard compute-runtime packages # standard packages
wget https://github.com/intel/compute-runtime/releases/download/26.14.37833.4/intel-opencl-icd_26.14.37833.4-0_amd64.deb wget https://github.com/intel/compute-runtime/releases/download/25.13.33276.19/intel-opencl-icd_25.13.33276.19_amd64.deb
wget https://github.com/intel/compute-runtime/releases/download/26.14.37833.4/libze-intel-gpu1_26.14.37833.4-0_amd64.deb wget https://github.com/intel/compute-runtime/releases/download/25.13.33276.19/intel-level-zero-gpu_1.6.33276.19_amd64.deb
wget https://github.com/intel/intel-graphics-compiler/releases/download/v2.32.7/intel-igc-opencl-2_2.32.7+21184_amd64.deb wget https://github.com/intel/intel-graphics-compiler/releases/download/v2.10.10/intel-igc-opencl-2_2.10.10+18926_amd64.deb
wget https://github.com/intel/intel-graphics-compiler/releases/download/v2.32.7/intel-igc-core-2_2.32.7+21184_amd64.deb wget https://github.com/intel/intel-graphics-compiler/releases/download/v2.10.10/intel-igc-core-2_2.10.10+18926_amd64.deb
# npu packages # npu packages
wget https://github.com/oneapi-src/level-zero/releases/download/v1.28.2/level-zero_1.28.2+u22.04_amd64.deb wget https://github.com/oneapi-src/level-zero/releases/download/v1.28.2/level-zero_1.28.2+u22.04_amd64.deb
wget https://github.com/intel/linux-npu-driver/releases/download/v1.19.0/intel-driver-compiler-npu_1.19.0.20250707-16111289554_ubuntu22.04_amd64.deb wget https://github.com/intel/linux-npu-driver/releases/download/v1.19.0/intel-driver-compiler-npu_1.19.0.20250707-16111289554_ubuntu22.04_amd64.deb
@ -137,10 +128,6 @@ if [[ "${TARGETARCH}" == "amd64" ]]; then
dpkg -i *.deb dpkg -i *.deb
rm *.deb rm *.deb
apt-get -qq install -f -y apt-get -qq install -f -y
# Battlemage uses the xe kernel driver, but the VA-API driver is still iHD.
# The oneVPL runtime may look for a driver named after the kernel module.
ln -sf /usr/lib/x86_64-linux-gnu/dri/iHD_drv_video.so /usr/lib/x86_64-linux-gnu/dri/xe_drv_video.so
fi fi
if [[ "${TARGETARCH}" == "arm64" ]]; then if [[ "${TARGETARCH}" == "arm64" ]]; then

View File

@ -11,7 +11,7 @@ joserfc == 1.2.*
cryptography == 44.0.* cryptography == 44.0.*
pathvalidate == 3.3.* pathvalidate == 3.3.*
markupsafe == 3.0.* markupsafe == 3.0.*
python-multipart == 0.0.26 python-multipart == 0.0.20
# Classification Model Training # Classification Model Training
tensorflow == 2.19.* ; platform_machine == 'aarch64' tensorflow == 2.19.* ; platform_machine == 'aarch64'
tensorflow-cpu == 2.19.* ; platform_machine == 'x86_64' tensorflow-cpu == 2.19.* ; platform_machine == 'x86_64'
@ -42,7 +42,7 @@ opencv-python-headless == 4.11.0.*
opencv-contrib-python == 4.11.0.* opencv-contrib-python == 4.11.0.*
scipy == 1.16.* scipy == 1.16.*
# OpenVino & ONNX # OpenVino & ONNX
openvino == 2025.4.* openvino == 2025.3.*
onnxruntime == 1.22.* onnxruntime == 1.22.*
# Embeddings # Embeddings
transformers == 4.45.* transformers == 4.45.*

View File

@ -9,7 +9,6 @@ from typing import Any
from ruamel.yaml import YAML from ruamel.yaml import YAML
sys.path.insert(0, "/opt/frigate") sys.path.insert(0, "/opt/frigate")
from frigate.config.env import substitute_frigate_vars
from frigate.const import ( from frigate.const import (
BIRDSEYE_PIPE, BIRDSEYE_PIPE,
DEFAULT_FFMPEG_VERSION, DEFAULT_FFMPEG_VERSION,
@ -48,6 +47,14 @@ ALLOW_ARBITRARY_EXEC = allow_arbitrary_exec is not None and str(
allow_arbitrary_exec allow_arbitrary_exec
).lower() in ("true", "1", "yes") ).lower() in ("true", "1", "yes")
FRIGATE_ENV_VARS = {k: v for k, v in os.environ.items() if k.startswith("FRIGATE_")}
# read docker secret files as env vars too
if os.path.isdir("/run/secrets"):
for secret_file in os.listdir("/run/secrets"):
if secret_file.startswith("FRIGATE_"):
FRIGATE_ENV_VARS[secret_file] = (
Path(os.path.join("/run/secrets", secret_file)).read_text().strip()
)
config_file = find_config_file() config_file = find_config_file()
@ -96,13 +103,13 @@ if go2rtc_config["webrtc"].get("candidates") is None:
go2rtc_config["webrtc"]["candidates"] = default_candidates go2rtc_config["webrtc"]["candidates"] = default_candidates
if go2rtc_config.get("rtsp", {}).get("username") is not None: if go2rtc_config.get("rtsp", {}).get("username") is not None:
go2rtc_config["rtsp"]["username"] = substitute_frigate_vars( go2rtc_config["rtsp"]["username"] = go2rtc_config["rtsp"]["username"].format(
go2rtc_config["rtsp"]["username"] **FRIGATE_ENV_VARS
) )
if go2rtc_config.get("rtsp", {}).get("password") is not None: if go2rtc_config.get("rtsp", {}).get("password") is not None:
go2rtc_config["rtsp"]["password"] = substitute_frigate_vars( go2rtc_config["rtsp"]["password"] = go2rtc_config["rtsp"]["password"].format(
go2rtc_config["rtsp"]["password"] **FRIGATE_ENV_VARS
) )
# ensure ffmpeg path is set correctly # ensure ffmpeg path is set correctly
@ -138,7 +145,7 @@ for name in list(go2rtc_config.get("streams", {})):
if isinstance(stream, str): if isinstance(stream, str):
try: try:
formatted_stream = substitute_frigate_vars(stream) formatted_stream = stream.format(**FRIGATE_ENV_VARS)
if not ALLOW_ARBITRARY_EXEC and is_restricted_source(formatted_stream): if not ALLOW_ARBITRARY_EXEC and is_restricted_source(formatted_stream):
print( print(
f"[ERROR] Stream '{name}' uses a restricted source (echo/expr/exec) which is disabled by default for security. " f"[ERROR] Stream '{name}' uses a restricted source (echo/expr/exec) which is disabled by default for security. "
@ -157,7 +164,7 @@ for name in list(go2rtc_config.get("streams", {})):
filtered_streams = [] filtered_streams = []
for i, stream_item in enumerate(stream): for i, stream_item in enumerate(stream):
try: try:
formatted_stream = substitute_frigate_vars(stream_item) formatted_stream = stream_item.format(**FRIGATE_ENV_VARS)
if not ALLOW_ARBITRARY_EXEC and is_restricted_source(formatted_stream): if not ALLOW_ARBITRARY_EXEC and is_restricted_source(formatted_stream):
print( print(
f"[ERROR] Stream '{name}' item {i + 1} uses a restricted source (echo/expr/exec) which is disabled by default for security. " f"[ERROR] Stream '{name}' item {i + 1} uses a restricted source (echo/expr/exec) which is disabled by default for security. "

View File

@ -227,6 +227,16 @@ http {
include proxy.conf; include proxy.conf;
} }
# frontend uses this to fetch the version
location /api/go2rtc/api {
include auth_request.conf;
limit_except GET {
deny all;
}
proxy_pass http://go2rtc/api;
include proxy.conf;
}
# integration uses this to add webrtc candidate # integration uses this to add webrtc candidate
location /api/go2rtc/webrtc { location /api/go2rtc/webrtc {
include auth_request.conf; include auth_request.conf;

View File

@ -119,12 +119,6 @@ audio:
Frigate supports fully local audio transcription using either `sherpa-onnx` or OpenAI's open-source Whisper models via `faster-whisper`. The goal of this feature is to support Semantic Search for `speech` audio events. Frigate is not intended to act as a continuous, fully-automatic speech transcription service — automatically transcribing all speech (or queuing many audio events for transcription) requires substantial CPU (or GPU) resources and is impractical on most systems. For this reason, transcriptions for events are initiated manually from the UI or the API rather than being run continuously in the background. Frigate supports fully local audio transcription using either `sherpa-onnx` or OpenAI's open-source Whisper models via `faster-whisper`. The goal of this feature is to support Semantic Search for `speech` audio events. Frigate is not intended to act as a continuous, fully-automatic speech transcription service — automatically transcribing all speech (or queuing many audio events for transcription) requires substantial CPU (or GPU) resources and is impractical on most systems. For this reason, transcriptions for events are initiated manually from the UI or the API rather than being run continuously in the background.
:::info
Audio transcription requires a one-time internet connection to download the Whisper or Sherpa-ONNX model on first use. Once cached, transcription runs fully offline. See [Network Requirements](/frigate/network_requirements#one-time-model-downloads) for details.
:::
Transcription accuracy also depends heavily on the quality of your camera's microphone and recording conditions. Many cameras use inexpensive microphones, and distance to the speaker, low audio bitrate, or background noise can significantly reduce transcription quality. If you need higher accuracy, more robust long-running queues, or large-scale automatic transcription, consider using the HTTP API in combination with an automation platform and a cloud transcription service. Transcription accuracy also depends heavily on the quality of your camera's microphone and recording conditions. Many cameras use inexpensive microphones, and distance to the speaker, low audio bitrate, or background noise can significantly reduce transcription quality. If you need higher accuracy, more robust long-running queues, or large-scale automatic transcription, consider using the HTTP API in combination with an automation platform and a cloud transcription service.
#### Configuration #### Configuration

View File

@ -9,12 +9,6 @@ import NavPath from "@site/src/components/NavPath";
Bird classification identifies known birds using a quantized Tensorflow model. When a known bird is recognized, its common name will be added as a `sub_label`. This information is included in the UI, filters, as well as in notifications. Bird classification identifies known birds using a quantized Tensorflow model. When a known bird is recognized, its common name will be added as a `sub_label`. This information is included in the UI, filters, as well as in notifications.
:::info
Bird classification requires a one-time internet connection to download the classification model and label map from GitHub. Once cached, models work fully offline. See [Network Requirements](/frigate/network_requirements#one-time-model-downloads) for details.
:::
## Minimum System Requirements ## Minimum System Requirements
Bird classification runs a lightweight tflite model on the CPU, there are no significantly different system requirements than running Frigate itself. Bird classification runs a lightweight tflite model on the CPU, there are no significantly different system requirements than running Frigate itself.

View File

@ -9,12 +9,6 @@ import NavPath from "@site/src/components/NavPath";
Object classification allows you to train a custom MobileNetV2 classification model to run on tracked objects (persons, cars, animals, etc.) to identify a finer category or attribute for that object. Classification results are visible in the Tracked Object Details pane in Explore, through the `frigate/tracked_object_details` MQTT topic, in Home Assistant sensors via the official Frigate integration, or through the event endpoints in the HTTP API. Object classification allows you to train a custom MobileNetV2 classification model to run on tracked objects (persons, cars, animals, etc.) to identify a finer category or attribute for that object. Classification results are visible in the Tracked Object Details pane in Explore, through the `frigate/tracked_object_details` MQTT topic, in Home Assistant sensors via the official Frigate integration, or through the event endpoints in the HTTP API.
:::info
Training a custom object classification model requires a one-time internet connection to download MobileNetV2 base weights. Once trained, the model runs fully offline. See [Network Requirements](/frigate/network_requirements#one-time-model-downloads) for details.
:::
## Minimum System Requirements ## Minimum System Requirements
Object classification models are lightweight and run very fast on CPU. Object classification models are lightweight and run very fast on CPU.
@ -164,7 +158,7 @@ Enable debug logs for classification models by adding `frigate.data_processing.r
Navigate to <NavPath path="Settings > System > Logging" />. Navigate to <NavPath path="Settings > System > Logging" />.
- Set **Logging level** to `debug` - Set **Logging level** to `debug`
- Set **Per-process log level > `frigate.data_processing.real_time.custom_classification`** to `debug` for verbose classification logging - Set **Per-process log level > Frigate.Data Processing.Real Time.Custom Classification** to `debug` for verbose classification logging
</TabItem> </TabItem>
<TabItem value="yaml"> <TabItem value="yaml">

View File

@ -9,12 +9,6 @@ import NavPath from "@site/src/components/NavPath";
State classification allows you to train a custom MobileNetV2 classification model on a fixed region of your camera frame(s) to determine a current state. The model can be configured to run on a schedule and/or when motion is detected in that region. Classification results are available through the `frigate/<camera_name>/classification/<model_name>` MQTT topic and in Home Assistant sensors via the official Frigate integration. State classification allows you to train a custom MobileNetV2 classification model on a fixed region of your camera frame(s) to determine a current state. The model can be configured to run on a schedule and/or when motion is detected in that region. Classification results are available through the `frigate/<camera_name>/classification/<model_name>` MQTT topic and in Home Assistant sensors via the official Frigate integration.
:::info
Training a custom state classification model requires a one-time internet connection to download MobileNetV2 base weights. Once trained, the model runs fully offline. See [Network Requirements](/frigate/network_requirements#one-time-model-downloads) for details.
:::
## Minimum System Requirements ## Minimum System Requirements
State classification models are lightweight and run very fast on CPU. State classification models are lightweight and run very fast on CPU.

View File

@ -9,12 +9,6 @@ import NavPath from "@site/src/components/NavPath";
Face recognition identifies known individuals by matching detected faces with previously learned facial data. When a known `person` is recognized, their name will be added as a `sub_label`. This information is included in the UI, filters, as well as in notifications. Face recognition identifies known individuals by matching detected faces with previously learned facial data. When a known `person` is recognized, their name will be added as a `sub_label`. This information is included in the UI, filters, as well as in notifications.
:::info
Face recognition requires a one-time internet connection to download detection and embedding models from GitHub. Once cached, models work fully offline. See [Network Requirements](/frigate/network_requirements#one-time-model-downloads) for details.
:::
## Model Requirements ## Model Requirements
### Face Detection ### Face Detection

View File

@ -29,11 +29,11 @@ You must use a vision-capable model with Frigate. The following models are recom
| Model | Notes | | Model | Notes |
| ------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | ------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `qwen3-vl` | Strong visual and situational understanding, enhanced ability to identify smaller objects and interactions with object. | | `qwen3-vl` | Strong visual and situational understanding, strong ability to identify smaller objects and interactions with object. |
| `qwen3.5` | Strong situational understanding, but missing DeepStack from qwen3-vl leading to worse performance for identifying objects in people's hand and other small details. | | `qwen3.5` | Strong situational understanding, but missing DeepStack from qwen3-vl leading to worse performance for identifying objects in people's hand and other small details. |
| `gemma4` | Strong situational understanding, sometimes resorts to more vague terms like 'interacts' instead of assigning a specific action. |
| `Intern3.5VL` | Relatively fast with good vision comprehension | | `Intern3.5VL` | Relatively fast with good vision comprehension |
| `gemma3` | Slower model with good vision and temporal understanding | | `gemma3` | Slower model with good vision and temporal understanding |
| `qwen2.5-vl` | Fast but capable model with good vision comprehension |
:::info :::info
@ -193,12 +193,6 @@ To use a different OpenAI-compatible API endpoint, set the `OPENAI_BASE_URL` env
Cloud providers run on remote infrastructure and require an API key for authentication. These services handle all model inference on their servers. Cloud providers run on remote infrastructure and require an API key for authentication. These services handle all model inference on their servers.
:::info
Cloud Generative AI providers require an active internet connection to send images and prompts for processing. Local providers like llama.cpp and Ollama (with local models) do not require internet. See [Network Requirements](/frigate/network_requirements#generative-ai) for details.
:::
### Ollama Cloud ### Ollama Cloud
Ollama also supports [cloud models](https://ollama.com/cloud), where your local Ollama instance handles requests from Frigate, but model inference is performed in the cloud. Set up Ollama locally, sign in with your Ollama account, and specify the cloud model name in your Frigate config. For more details, see the Ollama cloud model [docs](https://docs.ollama.com/cloud). Ollama also supports [cloud models](https://ollama.com/cloud), where your local Ollama instance handles requests from Frigate, but model inference is performed in the cloud. Set up Ollama locally, sign in with your Ollama account, and specify the cloud model name in your Frigate config. For more details, see the Ollama cloud model [docs](https://docs.ollama.com/cloud).

View File

@ -60,13 +60,12 @@ Frigate can utilize most Intel integrated GPUs and Arc GPUs to accelerate video
**Recommended hwaccel Preset** **Recommended hwaccel Preset**
| CPU Generation | Intel Driver | Recommended Preset | Notes | | CPU Generation | Intel Driver | Recommended Preset | Notes |
| ------------------ | ------------ | ------------------- | ------------------------------------------- | | -------------- | ------------ | ------------------- | ------------------------------------------- |
| gen1 - gen5 | i965 | preset-vaapi | qsv is not supported, may not support H.265 | | gen1 - gen5 | i965 | preset-vaapi | qsv is not supported, may not support H.265 |
| gen6 - gen7 | iHD | preset-vaapi | qsv is not supported | | gen6 - gen7 | iHD | preset-vaapi | qsv is not supported |
| gen8 - gen12 | iHD | preset-vaapi | preset-intel-qsv-\* can also be used | | gen8 - gen12 | iHD | preset-vaapi | preset-intel-qsv-\* can also be used |
| gen13+ | iHD / Xe | preset-intel-qsv-\* | | | gen13+ | iHD / Xe | preset-intel-qsv-\* | |
| Intel Arc A-series | iHD / Xe | preset-intel-qsv-\* | | | Intel Arc GPU | iHD / Xe | preset-intel-qsv-\* | |
| Intel Arc B-series | iHD / Xe | preset-intel-qsv-\* | Requires host kernel 6.12+ |
::: :::

View File

@ -11,12 +11,6 @@ Frigate can recognize license plates on vehicles and automatically add the detec
LPR works best when the license plate is clearly visible to the camera. For moving vehicles, Frigate continuously refines the recognition process, keeping the most confident result. When a vehicle becomes stationary, LPR continues to run for a short time after to attempt recognition. LPR works best when the license plate is clearly visible to the camera. For moving vehicles, Frigate continuously refines the recognition process, keeping the most confident result. When a vehicle becomes stationary, LPR continues to run for a short time after to attempt recognition.
:::info
License plate recognition requires a one-time internet connection to download OCR and detection models from GitHub. Once cached, models work fully offline. See [Network Requirements](/frigate/network_requirements#one-time-model-downloads) for details.
:::
When a plate is recognized, the details are: When a plate is recognized, the details are:
- Added as a `sub_label` (if [known](#matching)) or the `recognized_license_plate` field (if unknown) to a tracked object. - Added as a `sub_label` (if [known](#matching)) or the `recognized_license_plate` field (if unknown) to a tracked object.

View File

@ -21,12 +21,6 @@ The jsmpeg live view will use more browser and client GPU resources. Using go2rt
| mse | native | native | yes (depends on audio codec) | yes | iPhone requires iOS 17.1+, Firefox is h.264 only. This is Frigate's default when go2rtc is configured. | | mse | native | native | yes (depends on audio codec) | yes | iPhone requires iOS 17.1+, Firefox is h.264 only. This is Frigate's default when go2rtc is configured. |
| webrtc | native | native | yes (depends on audio codec) | yes | Requires extra configuration. Frigate attempts to use WebRTC when MSE fails or when using a camera's two-way talk feature. | | webrtc | native | native | yes (depends on audio codec) | yes | Requires extra configuration. Frigate attempts to use WebRTC when MSE fails or when using a camera's two-way talk feature. |
:::info
WebRTC may use an external STUN server for NAT traversal. MSE and HLS streaming do not require any internet access. See [Network Requirements](/frigate/network_requirements#webrtc-stun) for details.
:::
### Camera Settings Recommendations ### Camera Settings Recommendations
If you are using go2rtc, you should adjust the following settings in your camera's firmware for the best experience with Live view: If you are using go2rtc, you should adjust the following settings in your camera's firmware for the best experience with Live view:

View File

@ -11,12 +11,6 @@ import NavPath from "@site/src/components/NavPath";
Frigate offers native notifications using the [WebPush Protocol](https://web.dev/articles/push-notifications-web-push-protocol) which uses the [VAPID spec](https://tools.ietf.org/html/draft-thomson-webpush-vapid) to deliver notifications to web apps using encryption. Frigate offers native notifications using the [WebPush Protocol](https://web.dev/articles/push-notifications-web-push-protocol) which uses the [VAPID spec](https://tools.ietf.org/html/draft-thomson-webpush-vapid) to deliver notifications to web apps using encryption.
:::info
Push notifications require internet access from the Frigate server to the browser vendor's push service (e.g., Google FCM, Mozilla autopush). See [Network Requirements](/frigate/network_requirements#push-notifications) for details.
:::
## Setting up Notifications ## Setting up Notifications
In order to use notifications the following requirements must be met: In order to use notifications the following requirements must be met:

View File

@ -288,12 +288,6 @@ This detector is available for use with both Hailo-8 and Hailo-8L AI Acceleratio
See the [installation docs](../frigate/installation.md#hailo-8) for information on configuring the Hailo hardware. See the [installation docs](../frigate/installation.md#hailo-8) for information on configuring the Hailo hardware.
:::info
If no custom model is provided, the Hailo detector downloads a default model from the Hailo Model Zoo on first startup. Once cached, the model works fully offline. See [Network Requirements](/frigate/network_requirements#hardware-specific-detector-models) for details.
:::
### Configuration ### Configuration
When configuring the Hailo detector, you have two options to specify the model: a local **path** or a **URL**. When configuring the Hailo detector, you have two options to specify the model: a local **path** or a **URL**.
@ -1799,12 +1793,6 @@ Hardware accelerated object detection is supported on the following SoCs:
This implementation uses the [Rockchip's RKNN-Toolkit2](https://github.com/airockchip/rknn-toolkit2/), version v2.3.2. This implementation uses the [Rockchip's RKNN-Toolkit2](https://github.com/airockchip/rknn-toolkit2/), version v2.3.2.
:::info
If no custom model is provided, the RKNN detector downloads a default model from GitHub on first startup. Once cached, the model works fully offline. See [Network Requirements](/frigate/network_requirements#hardware-specific-detector-models) for details.
:::
:::tip :::tip
When using many cameras one detector may not be enough to keep up. Multiple detectors can be defined assuming NPU resources are available. An example configuration would be: When using many cameras one detector may not be enough to keep up. Multiple detectors can be defined assuming NPU resources are available. An example configuration would be:
@ -2188,12 +2176,6 @@ This implementation uses the [AXera Pulsar2 Toolchain](https://huggingface.co/AX
See the [installation docs](../frigate/installation.md#axera) for information on configuring the AXEngine hardware. See the [installation docs](../frigate/installation.md#axera) for information on configuring the AXEngine hardware.
:::info
The AXEngine detector downloads its default model from HuggingFace on first startup. Once cached, the model works fully offline. See [Network Requirements](/frigate/network_requirements#hardware-specific-detector-models) for details.
:::
### Configuration ### Configuration
When configuring the AXEngine detector, you have to specify the model name. When configuring the AXEngine detector, you have to specify the model name.

View File

@ -24,12 +24,6 @@ For object filters, any single detection below `min_score` will be ignored as a
In frame 2, the score is below the `min_score` value, so Frigate ignores it and it becomes a 0.0. The computed score is the median of the score history (padding to at least 3 values), and only when that computed score crosses the `threshold` is the object marked as a true positive. That happens in frame 4 in the example. In frame 2, the score is below the `min_score` value, so Frigate ignores it and it becomes a 0.0. The computed score is the median of the score history (padding to at least 3 values), and only when that computed score crosses the `threshold` is the object marked as a true positive. That happens in frame 4 in the example.
The **top score** is the highest computed score the tracked object has ever reached during its lifetime. Because the computed score rises and falls as new frames come in, the top score can be thought of as the peak confidence Frigate had in the object. In Frigate's UI (such as the Tracking Details pane in Explore), you may see all three values:
- **Score** — the raw detector score for that single frame.
- **Computed Score** — the median of the most recent score history at that moment. This is the value compared against `threshold`.
- **Top Score** — the highest computed score reached so far for the tracked object.
### Minimum Score ### Minimum Score
Any detection below `min_score` will be immediately thrown out and never tracked because it is considered a false positive. If `min_score` is too low then false positives may be detected and tracked which can confuse the object tracker and may lead to wasted resources. If `min_score` is too high then lower scoring true positives like objects that are further away or partially occluded may be thrown out which can also confuse the tracker and cause valid tracked objects to be lost or disjointed. Any detection below `min_score` will be immediately thrown out and never tracked because it is considered a false positive. If `min_score` is too low then false positives may be detected and tracked which can confuse the object tracker and may lead to wasted resources. If `min_score` is too high then lower scoring true positives like objects that are further away or partially occluded may be thrown out which can also confuse the tracker and cause valid tracked objects to be lost or disjointed.

View File

@ -20,7 +20,7 @@ When a profile is activated, Frigate merges each camera's profile overrides on t
:::info :::info
Profile changes are applied in-memory and take effect immediately — no restart is required. The active profile is persisted across Frigate restarts (stored in the `/config/.profiles` file). Profile changes are applied in-memory and take effect immediately — no restart is required. The active profile is persisted across Frigate restarts (stored in the `/config/.active_profile` file).
::: :::
@ -120,7 +120,7 @@ The following camera configuration sections can be overridden in a profile:
:::note :::note
Only the fields you explicitly set in a profile override are applied. All other fields retain their base configuration values. For masks and zones, profile zones **override** the camera's base masks and zones. If configuring profiles via YAML, you should not define masks or zones in profiles that are not defined in the base config. Only the fields you explicitly set in a profile override are applied. All other fields retain their base configuration values. For zones, profile zones are merged with the camera's base zones — any zone defined in the profile will override or add to the base zones.
::: :::
@ -130,14 +130,14 @@ Profiles can be activated and deactivated from the Frigate UI. Open the Settings
## Example: Home / Away Setup ## Example: Home / Away Setup
A common use case is having different detection and notification settings based on whether you are home or away. This example below is for a system with two cameras, `front_door` and `indoor_cam`. A common use case is having different detection and notification settings based on whether you are home or away.
<ConfigTabs> <ConfigTabs>
<TabItem value="ui"> <TabItem value="ui">
1. Navigate to <NavPath path="Settings > Camera configuration > Profiles" /> and create two profiles: **Home** and **Away**. 1. Navigate to <NavPath path="Settings > Camera configuration > Profiles" /> and create two profiles: **Home** and **Away**.
2. From to the Camera configuration section in Settings, choose the **front_door** camera, and select the **Away** profile from the profile dropdown. Then, enable notifications from the Notifications pane, and set alert labels to `person` and `car` from the Review pane. Then, from the profile dropdown choose **Home** profile, then navigate to Notifications to disable notifications. 2. For the **front_door** camera, configure the **Away** profile to enable notifications and set alert labels to `person` and `car`. Configure the **Home** profile to disable notifications.
3. For the **indoor_cam** camera, perform similar steps - configure the **Away** profile to enable the camera, detection, and recording. Configure the **Home** profile to disable the camera entirely for privacy. 3. For the **indoor_cam** camera, configure the **Away** profile to enable the camera, detection, and recording. Configure the **Home** profile to disable the camera entirely for privacy.
4. Activate the desired profile from <NavPath path="Settings > Camera configuration > Profiles" /> or from the **Profiles** option in Frigate's main menu. 4. Activate the desired profile from <NavPath path="Settings > Camera configuration > Profiles" /> or from the **Profiles** option in Frigate's main menu.
</TabItem> </TabItem>

View File

@ -123,76 +123,6 @@ record:
</TabItem> </TabItem>
</ConfigTabs> </ConfigTabs>
## Pre-capture and Post-capture
The `pre_capture` and `post_capture` settings control how many seconds of video are included before and after an alert or detection. These can be configured independently for alerts and detections, and can be set globally or overridden per camera.
<ConfigTabs>
<TabItem value="ui">
Navigate to <NavPath path="Settings > Global configuration > Recording" /> for global defaults, or <NavPath path="Settings > Camera configuration > (select camera) > Recording" /> to override for a specific camera.
| Field | Description |
| ---------------------------------------------- | ---------------------------------------------------- |
| **Alert retention > Pre-capture seconds** | Seconds of video to include before an alert event |
| **Alert retention > Post-capture seconds** | Seconds of video to include after an alert event |
| **Detection retention > Pre-capture seconds** | Seconds of video to include before a detection event |
| **Detection retention > Post-capture seconds** | Seconds of video to include after a detection event |
</TabItem>
<TabItem value="yaml">
```yaml
record:
enabled: True
alerts:
pre_capture: 5 # seconds before the alert to include
post_capture: 5 # seconds after the alert to include
detections:
pre_capture: 5 # seconds before the detection to include
post_capture: 5 # seconds after the detection to include
```
</TabItem>
</ConfigTabs>
- **Default**: 5 seconds for both pre and post capture.
- **Pre-capture maximum**: 60 seconds.
- These settings apply per review category (alerts and detections), not per object type.
### How pre/post capture interacts with retention mode
The `pre_capture` and `post_capture` values define the **time window** around a review item, but only recording segments that also match the configured **retention mode** are actually kept on disk.
- **`mode: all`** — Retains every segment within the capture window, regardless of whether motion was detected.
- **`mode: motion`** (default) — Only retains segments within the capture window that contain motion. This includes segments with active tracked objects, since object motion implies motion. Segments without any motion are discarded even if they fall within the pre/post capture range.
- **`mode: active_objects`** — Only retains segments within the capture window where tracked objects were actively moving. Segments with general motion but no active objects are discarded.
This means that with the default `motion` mode, you may see less footage than the configured pre/post capture duration if parts of the capture window had no motion.
To guarantee the full pre/post capture duration is always retained:
```yaml
record:
enabled: True
alerts:
pre_capture: 10
post_capture: 10
retain:
days: 30
mode: all # retains all segments within the capture window
```
:::note
Because recording segments are written in 10 second chunks, pre-capture timing depends on segment boundaries. The actual pre-capture footage may be slightly shorter or longer than the exact configured value.
:::
### Where to view pre/post capture footage
Pre and post capture footage is included in the **recording timeline**, visible in the History view. Note that pre/post capture settings only affect which recording segments are **retained on disk** — they do not change the start and end points shown in the UI. The History view will still center on the review item's actual time range, but you can scrub backward and forward through the retained pre/post capture footage on the timeline. The Explore view shows object-specific clips that are trimmed to when the tracked object was actually visible, so pre/post capture time will not be reflected there.
## Will Frigate delete old recordings if my storage runs out? ## Will Frigate delete old recordings if my storage runs out?
As of Frigate 0.12 if there is less than an hour left of storage, the oldest 2 hours of recordings will be deleted. As of Frigate 0.12 if there is less than an hour left of storage, the oldest 2 hours of recordings will be deleted.
@ -281,52 +211,31 @@ Using Frigate UI, Home Assistant, or MQTT, cameras can be automated to only reco
Footage can be exported from Frigate by right-clicking (desktop) or long pressing (mobile) on a review item in the Review pane or by clicking the Export button in the History view. Exported footage is then organized and searchable through the Export view, accessible from the main navigation bar. Footage can be exported from Frigate by right-clicking (desktop) or long pressing (mobile) on a review item in the Review pane or by clicking the Export button in the History view. Exported footage is then organized and searchable through the Export view, accessible from the main navigation bar.
### Custom export with FFmpeg arguments ### Time-lapse export
For advanced use cases, the [custom export HTTP API](../integrations/api/export-recording-custom-export-custom-camera-name-start-start-time-end-end-time-post.api.mdx) lets you pass custom FFmpeg arguments when exporting a recording: Time lapse exporting is available only via the [HTTP API](../integrations/api/export-recording-export-camera-name-start-start-time-end-end-time-post.api.mdx).
``` When exporting a time-lapse the default speed-up is 25x with 30 FPS. This means that every 25 seconds of (real-time) recording is condensed into 1 second of time-lapse video (always without audio) with a smoothness of 30 FPS.
POST /export/custom/{camera_name}/start/{start_time}/end/{end_time}
To configure the speed-up factor, the frame rate and further custom settings, use the `timelapse_args` parameter. The below configuration example would change the time-lapse speed to 60x (for fitting 1 hour of recording into 1 minute of time-lapse) with 25 FPS:
```yaml {3-4}
record:
enabled: True
export:
timelapse_args: "-vf setpts=PTS/60 -r 25"
``` ```
The request body accepts `ffmpeg_input_args` and `ffmpeg_output_args` to control encoding, frame rate, filters, and other FFmpeg options. If neither is provided, Frigate defaults to time-lapse output settings (25x speed, 30 FPS). :::tip
The following example exports a time-lapse at 60x speed with 25 FPS: When using `hwaccel_args`, hardware encoding is used for timelapse generation. This setting can be overridden for a specific camera (e.g., when camera resolution exceeds hardware encoder limits); set the camera-level export hwaccel_args with the appropriate settings. Using an unrecognized value or empty string will fall back to software encoding (libx264).
```json
{
"name": "Front Door Time-lapse",
"ffmpeg_output_args": "-vf setpts=PTS/60 -r 25"
}
```
#### CPU fallback
If hardware acceleration is configured and the export fails (e.g., the GPU is unavailable), set `cpu_fallback: true` in the request body to automatically retry using software encoding.
```json
{
"name": "My Export",
"ffmpeg_output_args": "-c:v libx264 -crf 23",
"cpu_fallback": true
}
```
:::note
Non-admin users are restricted from using FFmpeg arguments that can access the filesystem (e.g., `-filter_complex`, file paths, and protocol references). Admin users have full control over FFmpeg arguments.
::: :::
:::tip :::tip
When `hwaccel_args` is configured, hardware encoding is used for exports. This can be overridden per camera (e.g., when camera resolution exceeds hardware encoder limits) by setting a camera-level `hwaccel_args`. Using an unrecognized value or empty string falls back to software encoding (libx264). The encoder determines its own behavior so the resulting file size may be undesirably large.
To reduce the output file size the ffmpeg parameter `-qp n` can be utilized (where `n` stands for the value of the quantisation parameter). The value can be adjusted to get an acceptable tradeoff between quality and file size for the given scenario.
:::
:::tip
To reduce output file size, add the FFmpeg parameter `-qp n` to `ffmpeg_output_args` (where `n` is the quantization parameter). Adjust the value to balance quality and file size for your scenario.
::: :::

View File

@ -13,12 +13,6 @@ Frigate uses models from [Jina AI](https://huggingface.co/jinaai) to create and
Semantic Search is accessed via the _Explore_ view in the Frigate UI. Semantic Search is accessed via the _Explore_ view in the Frigate UI.
:::info
Semantic search requires a one-time internet connection to download embedding models from HuggingFace. Once cached, models work fully offline. See [Network Requirements](/frigate/network_requirements#one-time-model-downloads) for details.
:::
## Minimum System Requirements ## Minimum System Requirements
Semantic Search works by running a large AI model locally on your system. Small or underpowered systems like a Raspberry Pi will not run Semantic Search reliably or at all. Semantic Search works by running a large AI model locally on your system. Small or underpowered systems like a Raspberry Pi will not run Semantic Search reliably or at all.

View File

@ -146,11 +146,17 @@ A single Coral can handle many cameras using the default model and will be suffi
The OpenVINO detector type is able to run on: The OpenVINO detector type is able to run on:
- 6th Gen Intel Platforms and newer that have an iGPU - 6th Gen Intel Platforms and newer that have an iGPU
- x86 hosts with an Intel Arc GPU (including Arc A-series and B-series Battlemage) - x86 hosts with an Intel Arc GPU
- Intel NPUs - Intel NPUs
- Most modern AMD CPUs (though this is officially not supported by Intel) - Most modern AMD CPUs (though this is officially not supported by Intel)
- x86 & Arm64 hosts via CPU (generally not recommended) - x86 & Arm64 hosts via CPU (generally not recommended)
:::note
Intel B-series (Battlemage) GPUs are not officially supported with Frigate 0.17, though a user has [provided steps to rebuild the Frigate container](https://github.com/blakeblackshear/frigate/discussions/21257) with support for them.
:::
More information is available [in the detector docs](/configuration/object_detectors#openvino-detector) More information is available [in the detector docs](/configuration/object_detectors#openvino-detector)
Inference speeds vary greatly depending on the CPU or GPU used, some known examples of GPU inference times are below: Inference speeds vary greatly depending on the CPU or GPU used, some known examples of GPU inference times are below:

View File

@ -482,8 +482,7 @@ services:
- /dev/apex_0:/dev/apex_0 # Passes a PCIe Coral, follow driver instructions here https://github.com/jnicolson/gasket-builder - /dev/apex_0:/dev/apex_0 # Passes a PCIe Coral, follow driver instructions here https://github.com/jnicolson/gasket-builder
- /dev/video11:/dev/video11 # For Raspberry Pi 4B - /dev/video11:/dev/video11 # For Raspberry Pi 4B
- /dev/dri/renderD128:/dev/dri/renderD128 # AMD / Intel GPU, needs to be updated for your hardware - /dev/dri/renderD128:/dev/dri/renderD128 # AMD / Intel GPU, needs to be updated for your hardware
- /dev/kfd:/dev/kfd # AMD Kernel Fusion Driver for ROCm - /dev/accel:/dev/accel # Intel NPU
- /dev/accel:/dev/accel # AMD / Intel NPU
volumes: volumes:
- /etc/localtime:/etc/localtime:ro - /etc/localtime:/etc/localtime:ro
- /path/to/your/config:/config - /path/to/your/config:/config

View File

@ -1,155 +0,0 @@
---
id: network_requirements
title: Network Requirements
---
# Network Requirements
Frigate is designed to run locally and does not require a persistent internet connection for core functionality. However, certain features need internet access for initial setup or ongoing operation. This page describes what connects to the internet, when, and how to control it.
## How Frigate Uses the Internet
Frigate's internet usage falls into three categories:
1. **One-time model downloads** — ML models are downloaded the first time a feature is enabled, then cached locally. No internet is needed on subsequent startups.
2. **Optional cloud services** — Features like Frigate+ and Generative AI connect to external APIs only when explicitly configured.
3. **Build-time dependencies** — Components bundled into the Docker image during the build process. These require no internet at runtime.
:::tip
After initial setup, Frigate can run fully offline as long as all required models have been downloaded and no cloud-dependent features are enabled.
:::
## One-Time Model Downloads
The following models are downloaded automatically the first time their associated feature is enabled. Once cached in `/config/model_cache/`, they do not require internet again.
| Feature | Models Downloaded | Source |
| --------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------- |
| [Semantic search](/configuration/semantic_search) | Jina CLIP v1 or v2 (ONNX) + tokenizer | HuggingFace |
| [Face recognition](/configuration/face_recognition) | FaceNet, ArcFace, face detection model | GitHub |
| [License plate recognition](/configuration/license_plate_recognition) | PaddleOCR (detection, classification, recognition) + YOLOv9 plate detector | GitHub |
| [Bird classification](/configuration/bird_classification) | MobileNetV2 bird model + label map | GitHub |
| [Custom classification](/configuration/custom_classification/state_classification) (training) | MobileNetV2 ImageNet base weights (via Keras) | Google storage |
| [Audio transcription](/configuration/advanced) | Whisper or Sherpa-ONNX streaming model | HuggingFace / OpenAI |
### Hardware-Specific Detector Models
If you are using one of the following hardware detectors and have not provided your own model file, a default model will be downloaded on first startup:
| Detector | Model Downloaded | Source |
| ------------------------------------------------------------------ | -------------------- | ------------------------ |
| [Rockchip RKNN](/configuration/object_detectors#rockchip-platform) | RKNN detection model | GitHub |
| [Hailo 8 / 8L](/configuration/object_detectors#hailo-8) | YOLOv6n (.hef) | Hailo Model Zoo (AWS S3) |
| [AXERA AXEngine](/configuration/object_detectors) | Detection model | HuggingFace |
:::note
The default CPU, EdgeTPU, and OpenVINO object detection models are bundled into the Docker image and do not require any download at runtime.
:::
### Preventing Model Downloads
If you have already downloaded all required models and want to prevent Frigate from attempting any outbound connections to HuggingFace or the Transformers library, set the following environment variables on your Frigate container:
```yaml
environment:
HF_HUB_OFFLINE: "1"
TRANSFORMERS_OFFLINE: "1"
```
:::warning
Setting these variables without having the correct model files already cached in `/config/model_cache/` will cause failures. Only use these after a successful initial setup with internet access.
:::
### Mirror Support
If your Frigate instance has restricted internet access, you can point model downloads at internal mirrors using environment variables:
| Environment Variable | Default | Used By |
| ----------------------------------- | ----------------------------------- | --------------------------------------------- |
| `HF_ENDPOINT` | `https://huggingface.co` | Semantic search, Sherpa-ONNX, AXEngine models |
| `GITHUB_ENDPOINT` | `https://github.com` | Face recognition, LPR, RKNN models |
| `GITHUB_RAW_ENDPOINT` | `https://raw.githubusercontent.com` | Bird classification |
| `TF_KERAS_MOBILENET_V2_WEIGHTS_URL` | Google storage (Keras default) | Custom classification training |
## Optional Cloud Services
These features connect to external services during normal operation and require internet whenever they are active.
### Frigate+
When a Frigate+ API key is configured, Frigate communicates with `https://api.frigate.video` to download models, upload snapshots for training, submit annotations, and report false positives. Remove the API key to disable all Frigate+ network activity.
See [Frigate+](/integrations/plus) for details.
### Generative AI
When a Generative AI provider is configured, Frigate sends images and prompts to the configured provider for event descriptions, chat, and camera monitoring. Available providers:
| Provider | Internet Required |
| ------------- | ---------------------------------------------------------------- |
| OpenAI | Yes — connects to OpenAI API (or custom base URL) |
| Google Gemini | Yes — connects to Google Generative AI API |
| Azure OpenAI | Yes — connects to your Azure endpoint |
| Ollama | Depends — typically local (`localhost:11434`), but can be remote |
| llama.cpp | No — runs entirely locally |
Disable Generative AI by removing the `genai` configuration from your cameras. See [Generative AI](/configuration/genai/genai_config) for details.
### Version Check
Frigate checks GitHub for the latest release version on startup by querying `https://api.github.com`. This can be disabled:
```yaml
telemetry:
version_check: false
```
### Push Notifications
When [notifications](/configuration/notifications) are enabled and users have registered for push notifications in the web UI, Frigate sends push messages through the browser vendor's push service (e.g., Google FCM, Mozilla autopush). This requires internet access from the Frigate server to these push endpoints.
### MQTT
If an [MQTT broker](/integrations/mqtt) is configured, Frigate maintains a connection to the broker's host and port. This is typically a local network connection, but will require internet if you use a cloud-hosted MQTT broker.
### DeepStack / CodeProject.AI
When using the [DeepStack detector plugin](/configuration/object_detectors), Frigate sends images to the configured API endpoint for inference. This is typically local but depends on where the service is hosted.
## WebRTC (STUN)
For [WebRTC live streaming](/configuration/live), Frigate uses STUN for NAT traversal:
- **go2rtc** defaults to a local STUN listener (`stun:8555`) — no internet required.
- **The web UI's WebRTC player** includes a fallback to Google's public STUN server (`stun:stun.l.google.com:19302`), which requires internet.
## Home Assistant Supervisor
When running as a Home Assistant add-on, the go2rtc startup script queries the local Supervisor API (`http://supervisor/`) to discover the host IP address and WebRTC port. This is a local network call to the Home Assistant host, not an internet connection.
## What Does NOT Require Internet
- **Object detection** — CPU, EdgeTPU, OpenVINO, and other bundled detector models are included in the Docker image.
- **Recording and playback** — All video is stored and served locally.
- **Live streaming** — Camera streams are pulled over your local network. MSE and HLS streaming work without any external connections.
- **The web interface** — Fully self-contained with no external fonts, scripts, analytics, or CDN dependencies. All translations are bundled locally.
- **Custom classification inference** — After training, custom models run entirely locally.
- **Audio detection** — The YAMNet audio classification model is bundled in the Docker image.
## Running Frigate Offline
To run Frigate in an air-gapped or offline environment:
1. **Pre-download models** — Start Frigate with internet access once with all desired features enabled. Models will be cached in `/config/model_cache/`.
2. **Disable version check** — Set `telemetry.version_check: false` in your configuration.
3. **Block outbound model requests** — Set the `HF_HUB_OFFLINE=1` and `TRANSFORMERS_OFFLINE=1` environment variables to prevent HuggingFace and Transformers from attempting any network requests.
4. **Avoid cloud features** — Do not configure Frigate+, Generative AI providers that require internet, or cloud MQTT brokers.
5. **Use local model mirrors** — If limited internet is available, set the `HF_ENDPOINT`, `GITHUB_ENDPOINT`, and `GITHUB_RAW_ENDPOINT` environment variables to point to local mirrors.
After these steps, Frigate will operate with no outbound internet connections.

View File

@ -5,12 +5,6 @@ title: MQTT
These are the MQTT messages generated by Frigate. The default topic_prefix is `frigate`, but can be changed in the config file. These are the MQTT messages generated by Frigate. The default topic_prefix is `frigate`, but can be changed in the config file.
:::info
MQTT requires a network connection to your broker. This is typically local, but will require internet if using a cloud-hosted MQTT broker. See [Network Requirements](/frigate/network_requirements#mqtt) for details.
:::
## General Frigate Topics ## General Frigate Topics
### `frigate/available` ### `frigate/available`

View File

@ -5,12 +5,6 @@ title: Frigate+
For more information about how to use Frigate+ to improve your model, see the [Frigate+ docs](/plus/). For more information about how to use Frigate+ to improve your model, see the [Frigate+ docs](/plus/).
:::info
Frigate+ requires an active internet connection to communicate with `https://api.frigate.video` for model downloads, image uploads, and annotations. See [Network Requirements](/frigate/network_requirements#frigate) for details.
:::
## Setup ## Setup
### Create an account ### Create an account

View File

@ -17,10 +17,6 @@ Please use your own knowledge to assess and vet them before you install anything
The [Advanced Camera Card](https://card.camera/#/README) is a Home Assistant dashboard card with deep Frigate integration. The [Advanced Camera Card](https://card.camera/#/README) is a Home Assistant dashboard card with deep Frigate integration.
## [cctvQL](https://github.com/arunrajiah/cctvql)
[cctvQL](https://github.com/arunrajiah/cctvql) is a natural language query layer for Frigate and other CCTV systems. It connects to Frigate's REST API and MQTT broker to let you ask conversational questions about cameras and events (e.g. "Was there motion at the front door last night?"), with support for real-time event streaming, anomaly detection, PTZ control, alert rules, and a Home Assistant custom component.
## [Double Take](https://github.com/skrashevich/double-take) ## [Double Take](https://github.com/skrashevich/double-take)
[Double Take](https://github.com/skrashevich/double-take) provides an unified UI and API for processing and training images for facial recognition. [Double Take](https://github.com/skrashevich/double-take) provides an unified UI and API for processing and training images for facial recognition.

View File

@ -110,17 +110,3 @@ No. Frigate uses the TCP protocol to connect to your camera's RTSP URL. VLC auto
TCP ensures that all data packets arrive in the correct order. This is crucial for video recording, decoding, and stream processing, which is why Frigate enforces a TCP connection. UDP is faster but less reliable, as it does not guarantee packet delivery or order, and VLC does not have the same requirements as Frigate. TCP ensures that all data packets arrive in the correct order. This is crucial for video recording, decoding, and stream processing, which is why Frigate enforces a TCP connection. UDP is faster but less reliable, as it does not guarantee packet delivery or order, and VLC does not have the same requirements as Frigate.
You can still configure Frigate to use UDP by using ffmpeg input args or the preset `preset-rtsp-udp`. See the [ffmpeg presets](/configuration/ffmpeg_presets) documentation. You can still configure Frigate to use UDP by using ffmpeg input args or the preset `preset-rtsp-udp`. See the [ffmpeg presets](/configuration/ffmpeg_presets) documentation.
### Frigate is slow to start up with a "probing detect stream" message in the logs
When `detect.width` and `detect.height` are not set, Frigate probes each camera's detect stream on startup (and when saving the config) to auto-detect its resolution. For RTSP streams Frigate probes with ffprobe and automatically retries over TCP if UDP doesn't respond, with a 5 second timeout per attempt. A camera that cannot be reached over either transport will add up to ~10 seconds to startup before Frigate falls through with default dimensions, which may show up as width `0` and height `0` in Camera Probe Info under System Metrics.
To skip the probe entirely and make startup instant, set `detect.width` and `detect.height` explicitly in your camera config:
```yaml
cameras:
my_camera:
detect:
width: 1280
height: 720
```

View File

@ -80,85 +80,3 @@ Some users found that mounting a drive via `fstab` with the `sync` option caused
#### Copy Times < 1 second #### Copy Times < 1 second
If the storage is working quickly then this error may be caused by CPU load on the machine being too high for Frigate to have the resources to keep up. Try temporarily shutting down other services to see if the issue improves. If the storage is working quickly then this error may be caused by CPU load on the machine being too high for Frigate to have the resources to keep up. Try temporarily shutting down other services to see if the issue improves.
## I see the message: WARNING : Too many unprocessed recording segments in cache for camera. This likely indicates an issue with the detect stream...
This warning means that the detect stream for the affected camera has fallen behind or stopped processing frames. Frigate's recording cache holds segments waiting to be analyzed by the detector — when more than 6 segments pile up without being processed, Frigate discards the oldest ones to prevent the cache from filling up.
:::warning
This error is a **symptom**, not the root cause. The actual cause is always logged **before** these messages start appearing. You must review the full logs from Frigate startup through the first occurrence of this warning to identify the real issue.
:::
### Step 1: Get the full logs
Collect complete Frigate logs from startup through the first occurrence of the error. Look for errors or warnings that appear **before** the "Too many unprocessed" messages begin — that is where the root cause will be found.
### Step 2: Check the cache directory
Exec into the Frigate container and inspect the recording cache:
```
docker exec -it frigate ls -la /tmp/cache
```
Each camera should have a small number of `.mp4` segment files. If one camera has significantly more files than others, that camera is the source of the problem. A problem with a single camera can cascade and cause all cameras to show this error.
### Step 3: Verify segment duration
Recording segments should be approximately 10 seconds long. Run `ffprobe` on segments in the cache to check:
```
docker exec -it frigate ffprobe -v error -show_entries format=duration -of default=noprint_wrappers=1 /tmp/cache/<camera>@<segment>.mp4
```
If segments are only ~1 second instead of ~10 seconds, the camera is sending corrupt timestamp data, causing segments to be split too frequently and filling the cache 10x faster than expected.
**Common causes of short segments:**
- **"Smart Codec" or "Smart+" enabled on the camera** — These features dynamically change encoding parameters mid-stream, which corrupts timestamps. Disable them in your camera's settings.
- **Changing codec, bitrate, or resolution mid-stream** — Any encoding changes during an active stream can cause unpredictable segment splitting.
- **Camera firmware bugs** — Check for firmware updates from your camera manufacturer.
### Step 4: Check for a stuck detector
If the detect stream is not processing frames, segments will accumulate. Common causes:
- **Detection resolution too high** — Use a substream for detection, not the full resolution main stream.
- **Detection FPS too high** — 5 fps is the recommended maximum for detection.
- **Model too large** — Use smaller model variants (e.g., YOLO `s` or `t` size, not `e` or `x`). Use 320x320 input size rather than 640x640 unless you have a powerful dedicated detector.
- **Virtualization** — Running Frigate in a VM (especially Proxmox) can cause the detector to hang or stall. This is a known issue with GPU/TPU passthrough in virtualized environments and is not something Frigate can fix. Running Frigate in Docker on bare metal is recommended.
### Step 5: Check for GPU hangs
On the host machine, check `dmesg` for GPU-related errors:
```
dmesg | grep -i -E "gpu|drm|reset|hang"
```
Messages like `trying reset from guc_exec_queue_timedout_job` or similar GPU reset/hang messages indicate a driver or hardware issue. Ensure your kernel and GPU drivers (especially Intel) are up to date.
### Step 6: Verify hardware acceleration configuration
An incorrect `hwaccel_args` preset can cause ffmpeg to fail silently or consume excessive CPU, starving the detector of resources.
- After upgrading Frigate, verify your preset matches your hardware (e.g., `preset-intel-qsv-h264` instead of the deprecated `preset-vaapi`).
- For h265 cameras, use the corresponding h265 preset (e.g., `preset-intel-qsv-h265`).
- Note that `hwaccel_args` are only relevant for the detect stream — Frigate does not decode the record stream.
### Step 7: Verify go2rtc stream configuration
Ensure that the ffmpeg source names in your go2rtc configuration match the correct camera stream. A misconfigured stream name (e.g., copying a config from one camera to another without updating the stream reference) will cause the wrong stream to be used or the stream to fail entirely.
### Step 8: Check system resources
If none of the above apply, the issue may be a general resource constraint. Monitor the following on your host:
- **CPU usage** — An overloaded CPU can prevent the detector from keeping up.
- **RAM and swap** — Excessive swapping dramatically slows all I/O operations.
- **Disk I/O** — Use `iotop` or `iostat` to check for saturation.
- **Storage space** — Verify you have free space on the Frigate storage volume (check the Storage page in the Frigate UI).
Try temporarily disabling resource-intensive features like `genai` and `face_recognition` to see if the issue resolves. This can help isolate whether the detector is being starved of resources.

View File

@ -10897,9 +10897,9 @@
"license": "MIT" "license": "MIT"
}, },
"node_modules/express/node_modules/path-to-regexp": { "node_modules/express/node_modules/path-to-regexp": {
"version": "0.1.13", "version": "0.1.12",
"resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.13.tgz", "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.12.tgz",
"integrity": "sha512-A/AGNMFN3c8bOlvV9RreMdrv7jsmF9XIfDeCd87+I8RNg6s78BhJxMu69NEMHBSJFxKidViTEdruRwEk/WIKqA==", "integrity": "sha512-RA1GjUVMnvYFxuqovrEqZoxxW5NUZqbwKtYz/Tt7nXerk0LbLblQmrsgdeOxV5SFHf0UDggjS/bSeOZwt1pmEQ==",
"license": "MIT" "license": "MIT"
}, },
"node_modules/express/node_modules/range-parser": { "node_modules/express/node_modules/range-parser": {

View File

@ -12,7 +12,6 @@ const sidebars: SidebarsConfig = {
"frigate/updating", "frigate/updating",
"frigate/camera_setup", "frigate/camera_setup",
"frigate/video_pipeline", "frigate/video_pipeline",
"frigate/network_requirements",
"frigate/glossary", "frigate/glossary",
], ],
Guides: [ Guides: [

View File

@ -2724,135 +2724,6 @@ paths:
application/json: application/json:
schema: schema:
$ref: "#/components/schemas/HTTPValidationError" $ref: "#/components/schemas/HTTPValidationError"
/exports/batch:
post:
tags:
- Export
summary: Start recording export batch
description: >-
Starts recording exports for a batch of items, each with its own camera
and time range. Optionally assigns them to a new or existing export case.
When neither export_case_id nor new_case_name is provided, exports are
added as uncategorized. Attaching to an existing case is admin-only.
operationId: export_recordings_batch_exports_batch_post
requestBody:
required: true
content:
application/json:
schema:
$ref: "#/components/schemas/BatchExportBody"
responses:
"202":
description: Successful Response
content:
application/json:
schema:
$ref: "#/components/schemas/BatchExportResponse"
"400":
description: Bad Request
content:
application/json:
schema:
$ref: "#/components/schemas/GenericResponse"
"403":
description: Forbidden
content:
application/json:
schema:
$ref: "#/components/schemas/GenericResponse"
"404":
description: Not Found
content:
application/json:
schema:
$ref: "#/components/schemas/GenericResponse"
"503":
description: Service Unavailable
content:
application/json:
schema:
$ref: "#/components/schemas/GenericResponse"
"422":
description: Validation Error
content:
application/json:
schema:
$ref: "#/components/schemas/HTTPValidationError"
/exports/delete:
post:
tags:
- Export
summary: Bulk delete exports
description: >-
Deletes one or more exports by ID. All IDs must exist and none can be
in-progress. Admin-only.
operationId: bulk_delete_exports_exports_delete_post
requestBody:
required: true
content:
application/json:
schema:
$ref: "#/components/schemas/ExportBulkDeleteBody"
responses:
"200":
description: Successful Response
content:
application/json:
schema:
$ref: "#/components/schemas/GenericResponse"
"400":
description: Bad Request - one or more exports are in-progress
content:
application/json:
schema:
$ref: "#/components/schemas/GenericResponse"
"404":
description: Not Found - one or more export IDs do not exist
content:
application/json:
schema:
$ref: "#/components/schemas/GenericResponse"
"422":
description: Validation Error
content:
application/json:
schema:
$ref: "#/components/schemas/HTTPValidationError"
/exports/reassign:
post:
tags:
- Export
summary: Bulk reassign exports to a case
description: >-
Assigns or unassigns one or more exports to/from a case. All IDs must
exist. Pass export_case_id as null to unassign (move to uncategorized).
Admin-only.
operationId: bulk_reassign_exports_exports_reassign_post
requestBody:
required: true
content:
application/json:
schema:
$ref: "#/components/schemas/ExportBulkReassignBody"
responses:
"200":
description: Successful Response
content:
application/json:
schema:
$ref: "#/components/schemas/GenericResponse"
"404":
description: Not Found - one or more export IDs or the target case do not exist
content:
application/json:
schema:
$ref: "#/components/schemas/GenericResponse"
"422":
description: Validation Error
content:
application/json:
schema:
$ref: "#/components/schemas/HTTPValidationError"
/cases: /cases:
get: get:
tags: tags:
@ -2982,6 +2853,39 @@ paths:
application/json: application/json:
schema: schema:
$ref: "#/components/schemas/HTTPValidationError" $ref: "#/components/schemas/HTTPValidationError"
"/export/{export_id}/case":
patch:
tags:
- Export
summary: Assign export to case
description: "Assigns an export to a case, or unassigns it if export_case_id is null."
operationId: assign_export_case_export__export_id__case_patch
parameters:
- name: export_id
in: path
required: true
schema:
type: string
title: Export Id
requestBody:
required: true
content:
application/json:
schema:
$ref: "#/components/schemas/ExportCaseAssignBody"
responses:
"200":
description: Successful Response
content:
application/json:
schema:
$ref: "#/components/schemas/GenericResponse"
"422":
description: Validation Error
content:
application/json:
schema:
$ref: "#/components/schemas/HTTPValidationError"
"/export/{camera_name}/start/{start_time}/end/{end_time}": "/export/{camera_name}/start/{start_time}/end/{end_time}":
post: post:
tags: tags:
@ -3069,6 +2973,32 @@ paths:
application/json: application/json:
schema: schema:
$ref: "#/components/schemas/HTTPValidationError" $ref: "#/components/schemas/HTTPValidationError"
"/export/{event_id}":
delete:
tags:
- Export
summary: Delete export
operationId: export_delete_export__event_id__delete
parameters:
- name: event_id
in: path
required: true
schema:
type: string
title: Event Id
responses:
"200":
description: Successful Response
content:
application/json:
schema:
$ref: "#/components/schemas/GenericResponse"
"422":
description: Validation Error
content:
application/json:
schema:
$ref: "#/components/schemas/HTTPValidationError"
"/export/custom/{camera_name}/start/{start_time}/end/{end_time}": "/export/custom/{camera_name}/start/{start_time}/end/{end_time}":
post: post:
tags: tags:
@ -6571,149 +6501,6 @@ components:
required: required:
- recognizedLicensePlate - recognizedLicensePlate
title: EventsLPRBody title: EventsLPRBody
BatchExportBody:
properties:
items:
items:
$ref: "#/components/schemas/BatchExportItem"
type: array
minItems: 1
maxItems: 50
title: Items
description: List of export items. Each item has its own camera and time range.
export_case_id:
anyOf:
- type: string
maxLength: 30
- type: "null"
title: Export case ID
description: Existing export case ID to assign all exports to. Attaching to an existing case is temporarily admin-only until case-level ACLs exist.
new_case_name:
anyOf:
- type: string
maxLength: 100
- type: "null"
title: New case name
description: Name of a new export case to create when export_case_id is omitted
new_case_description:
anyOf:
- type: string
- type: "null"
title: New case description
description: Optional description for a newly created export case
type: object
required:
- items
title: BatchExportBody
BatchExportItem:
properties:
camera:
type: string
title: Camera name
start_time:
type: number
title: Start time
end_time:
type: number
title: End time
image_path:
anyOf:
- type: string
- type: "null"
title: Existing thumbnail path
description: Optional existing image to use as the export thumbnail
friendly_name:
anyOf:
- type: string
maxLength: 256
- type: "null"
title: Friendly name
description: Optional friendly name for this specific export item
client_item_id:
anyOf:
- type: string
maxLength: 128
- type: "null"
title: Client item ID
description: Optional opaque client identifier echoed back in results
type: object
required:
- camera
- start_time
- end_time
title: BatchExportItem
BatchExportResponse:
properties:
export_case_id:
anyOf:
- type: string
- type: "null"
title: Export Case Id
description: Export case ID associated with the batch
export_ids:
items:
type: string
type: array
title: Export Ids
description: Export IDs successfully queued
results:
items:
$ref: "#/components/schemas/BatchExportResultModel"
type: array
title: Results
description: Per-item batch export results
type: object
required:
- export_ids
- results
title: BatchExportResponse
description: Response model for starting an export batch.
BatchExportResultModel:
properties:
camera:
type: string
title: Camera
description: Camera name for this export attempt
export_id:
anyOf:
- type: string
- type: "null"
title: Export Id
description: The export ID when the export was successfully queued
success:
type: boolean
title: Success
description: Whether the export was successfully queued
status:
anyOf:
- type: string
- type: "null"
title: Status
description: Queue status for this camera export
error:
anyOf:
- type: string
- type: "null"
title: Error
description: Validation or queueing error for this item, if any
item_index:
anyOf:
- type: integer
- type: "null"
title: Item Index
description: Zero-based index of this result within the request items list
client_item_id:
anyOf:
- type: string
- type: "null"
title: Client Item Id
description: Opaque client-supplied item identifier echoed from the request
type: object
required:
- camera
- success
title: BatchExportResultModel
description: Per-item result for a batch export request.
EventsSubLabelBody: EventsSubLabelBody:
properties: properties:
subLabel: subLabel:
@ -6736,41 +6523,18 @@ components:
required: required:
- subLabel - subLabel
title: EventsSubLabelBody title: EventsSubLabelBody
ExportBulkDeleteBody: ExportCaseAssignBody:
properties: properties:
ids:
items:
type: string
minLength: 1
type: array
minItems: 1
title: Ids
type: object
required:
- ids
title: ExportBulkDeleteBody
description: Request body for bulk deleting exports.
ExportBulkReassignBody:
properties:
ids:
items:
type: string
minLength: 1
type: array
minItems: 1
title: Ids
export_case_id: export_case_id:
anyOf: anyOf:
- type: string - type: string
maxLength: 30 maxLength: 30
- type: "null" - type: "null"
title: Export Case Id title: Export Case Id
description: "Case ID to assign to, or null to unassign from current case" description: "Case ID to assign to the export, or null to unassign"
type: object type: object
required: title: ExportCaseAssignBody
- ids description: Request body for assigning or unassigning an export to a case.
title: ExportBulkReassignBody
description: Request body for bulk reassigning exports to a case.
ExportCaseCreateBody: ExportCaseCreateBody:
properties: properties:
name: name:

View File

@ -125,16 +125,6 @@ def metrics(request: Request):
return Response(content=content, media_type=content_type) return Response(content=content, media_type=content_type)
@router.get(
"/genai/models",
dependencies=[Depends(allow_any_authenticated())],
summary="List available GenAI models",
description="Returns available models for each configured GenAI provider.",
)
def genai_models(request: Request):
return JSONResponse(content=request.app.genai_manager.list_models())
@router.get("/config", dependencies=[Depends(allow_any_authenticated())]) @router.get("/config", dependencies=[Depends(allow_any_authenticated())])
def config(request: Request): def config(request: Request):
config_obj: FrigateConfig = request.app.frigate_config config_obj: FrigateConfig = request.app.frigate_config
@ -694,9 +684,6 @@ def config_set(request: Request, body: AppConfigSetBody):
if request.app.stats_emitter is not None: if request.app.stats_emitter is not None:
request.app.stats_emitter.config = config request.app.stats_emitter.config = config
if request.app.dispatcher is not None:
request.app.dispatcher.config = config
if body.update_topic: if body.update_topic:
if body.update_topic.startswith("config/cameras/"): if body.update_topic.startswith("config/cameras/"):
_, _, camera, field = body.update_topic.split("/") _, _, camera, field = body.update_topic.split("/")

View File

@ -64,7 +64,6 @@ def require_admin_by_default():
"/logout", "/logout",
# Authenticated user endpoints (allow_any_authenticated) # Authenticated user endpoints (allow_any_authenticated)
"/profile", "/profile",
"/profiles",
# Public info endpoints (allow_public) # Public info endpoints (allow_public)
"/", "/",
"/version", "/version",
@ -88,9 +87,7 @@ def require_admin_by_default():
"/go2rtc/streams", "/go2rtc/streams",
"/event_ids", "/event_ids",
"/events", "/events",
"/cases",
"/exports", "/exports",
"/jobs/export",
} }
# Path prefixes that should be exempt (for paths with parameters) # Path prefixes that should be exempt (for paths with parameters)
@ -103,9 +100,7 @@ def require_admin_by_default():
"/go2rtc/streams/", # /go2rtc/streams/{camera} "/go2rtc/streams/", # /go2rtc/streams/{camera}
"/users/", # /users/{username}/password (has own auth) "/users/", # /users/{username}/password (has own auth)
"/preview/", # /preview/{file}/thumbnail.jpg "/preview/", # /preview/{file}/thumbnail.jpg
"/cases/", # /cases/{case_id}
"/exports/", # /exports/{export_id} "/exports/", # /exports/{export_id}
"/jobs/export/", # /jobs/export/{export_id}
"/vod/", # /vod/{camera_name}/... "/vod/", # /vod/{camera_name}/...
"/notifications/", # /notifications/pubkey, /notifications/register "/notifications/", # /notifications/pubkey, /notifications/register
) )

View File

@ -30,7 +30,7 @@ from frigate.config.camera.updater import (
CameraConfigUpdateEnum, CameraConfigUpdateEnum,
CameraConfigUpdateTopic, CameraConfigUpdateTopic,
) )
from frigate.config.env import substitute_frigate_vars from frigate.config.env import FRIGATE_ENV_VARS
from frigate.util.builtin import clean_camera_user_pass from frigate.util.builtin import clean_camera_user_pass
from frigate.util.camera_cleanup import cleanup_camera_db, cleanup_camera_files from frigate.util.camera_cleanup import cleanup_camera_db, cleanup_camera_files
from frigate.util.config import find_config_file from frigate.util.config import find_config_file
@ -126,7 +126,7 @@ def go2rtc_add_stream(request: Request, stream_name: str, src: str = ""):
params = {"name": stream_name} params = {"name": stream_name}
if src: if src:
try: try:
params["src"] = substitute_frigate_vars(src) params["src"] = src.format(**FRIGATE_ENV_VARS)
except KeyError: except KeyError:
params["src"] = src params["src"] = src
@ -1224,15 +1224,6 @@ def camera_set(
status_code=400, status_code=400,
) )
if not sub_command and feature in _SUB_COMMAND_FEATURES:
return JSONResponse(
content={
"success": False,
"message": f"Feature '{feature}' requires a sub-command (e.g. mask or zone name)",
},
status_code=400,
)
if camera_name == "*": if camera_name == "*":
cameras = list(frigate_config.cameras.keys()) cameras = list(frigate_config.cameras.keys())
elif camera_name not in frigate_config.cameras: elif camera_name not in frigate_config.cameras:

View File

@ -3,11 +3,9 @@
import base64 import base64
import json import json
import logging import logging
import operator
import time import time
from datetime import datetime from datetime import datetime
from functools import reduce from typing import Any, Dict, Generator, List, Optional
from typing import Any, Dict, List, Optional
import cv2 import cv2
from fastapi import APIRouter, Body, Depends, Request from fastapi import APIRouter, Body, Depends, Request
@ -19,14 +17,6 @@ from frigate.api.auth import (
get_allowed_cameras_for_filter, get_allowed_cameras_for_filter,
require_camera_access, require_camera_access,
) )
from frigate.api.chat_util import (
chunk_content,
distance_to_score,
format_events_with_local_time,
fuse_scores,
hydrate_event,
parse_iso_to_timestamp,
)
from frigate.api.defs.query.events_query_parameters import EventsQueryParams from frigate.api.defs.query.events_query_parameters import EventsQueryParams
from frigate.api.defs.request.chat_body import ChatCompletionRequest from frigate.api.defs.request.chat_body import ChatCompletionRequest
from frigate.api.defs.response.chat_response import ( from frigate.api.defs.response.chat_response import (
@ -42,13 +32,55 @@ from frigate.jobs.vlm_watch import (
start_vlm_watch_job, start_vlm_watch_job,
stop_vlm_watch_job, stop_vlm_watch_job,
) )
from frigate.models import Event
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
router = APIRouter(tags=[Tags.chat]) router = APIRouter(tags=[Tags.chat])
def _chunk_content(content: str, chunk_size: int = 80) -> Generator[str, None, None]:
"""Yield content in word-aware chunks for streaming."""
if not content:
return
words = content.split(" ")
current: List[str] = []
current_len = 0
for w in words:
current.append(w)
current_len += len(w) + 1
if current_len >= chunk_size:
yield " ".join(current) + " "
current = []
current_len = 0
if current:
yield " ".join(current)
def _format_events_with_local_time(
events_list: List[Dict[str, Any]],
) -> List[Dict[str, Any]]:
"""Add human-readable local start/end times to each event for the LLM."""
result = []
for evt in events_list:
if not isinstance(evt, dict):
result.append(evt)
continue
copy_evt = dict(evt)
try:
start_ts = evt.get("start_time")
end_ts = evt.get("end_time")
if start_ts is not None:
dt_start = datetime.fromtimestamp(start_ts)
copy_evt["start_time_local"] = dt_start.strftime("%Y-%m-%d %I:%M:%S %p")
if end_ts is not None:
dt_end = datetime.fromtimestamp(end_ts)
copy_evt["end_time_local"] = dt_end.strftime("%Y-%m-%d %I:%M:%S %p")
except (TypeError, ValueError, OSError):
pass
result.append(copy_evt)
return result
class ToolExecuteRequest(BaseModel): class ToolExecuteRequest(BaseModel):
"""Request model for tool execution.""" """Request model for tool execution."""
@ -126,76 +158,6 @@ def get_tool_definitions() -> List[Dict[str, Any]]:
"required": [], "required": [],
}, },
}, },
{
"type": "function",
"function": {
"name": "find_similar_objects",
"description": (
"Find tracked objects that are visually and semantically similar "
"to a specific past event. Use this when the user references a "
"particular object they have seen and wants to find other "
"sightings of the same or similar one ('that green car', 'the "
"person in the red jacket', 'the package that was delivered'). "
"Prefer this over search_objects whenever the user's intent is "
"'find more like this specific one.' Use search_objects first "
"only if you need to locate the anchor event. Requires semantic "
"search to be enabled."
),
"parameters": {
"type": "object",
"properties": {
"event_id": {
"type": "string",
"description": "The id of the anchor event to find similar objects to.",
},
"after": {
"type": "string",
"description": "Start time in ISO 8601 format (e.g., '2024-01-01T00:00:00Z').",
},
"before": {
"type": "string",
"description": "End time in ISO 8601 format (e.g., '2024-01-01T23:59:59Z').",
},
"cameras": {
"type": "array",
"items": {"type": "string"},
"description": "Optional list of cameras to restrict to. Defaults to all.",
},
"labels": {
"type": "array",
"items": {"type": "string"},
"description": "Optional list of labels to restrict to. Defaults to the anchor event's label.",
},
"sub_labels": {
"type": "array",
"items": {"type": "string"},
"description": "Optional list of sub_labels (names) to restrict to.",
},
"zones": {
"type": "array",
"items": {"type": "string"},
"description": "Optional list of zones. An event matches if any of its zones overlap.",
},
"similarity_mode": {
"type": "string",
"enum": ["visual", "semantic", "fused"],
"description": "Which similarity signal(s) to use. 'fused' (default) combines visual and semantic.",
"default": "fused",
},
"min_score": {
"type": "number",
"description": "Drop matches with a similarity score below this threshold (0.0-1.0).",
},
"limit": {
"type": "integer",
"description": "Maximum number of matches to return (default: 10).",
"default": 10,
},
},
"required": ["event_id"],
},
},
},
{ {
"type": "function", "type": "function",
"function": { "function": {
@ -445,7 +407,7 @@ async def _execute_search_objects(
query_params = EventsQueryParams( query_params = EventsQueryParams(
cameras=arguments.get("camera", "all"), cameras=arguments.get("camera", "all"),
labels=arguments.get("label", "all"), labels=arguments.get("label", "all"),
sub_labels=arguments.get("sub_label", "all"), # case-insensitive on the backend sub_labels=arguments.get("sub_label", "all").lower(),
zones=zones, zones=zones,
zone=zones, zone=zones,
after=after, after=after,
@ -472,166 +434,6 @@ async def _execute_search_objects(
) )
async def _execute_find_similar_objects(
request: Request,
arguments: Dict[str, Any],
allowed_cameras: List[str],
) -> Dict[str, Any]:
"""Execute the find_similar_objects tool.
Returns a plain dict (not JSONResponse) so the chat loop can embed it
directly in tool-result messages.
"""
# 1. Semantic search enabled?
config = request.app.frigate_config
if not getattr(config.semantic_search, "enabled", False):
return {
"error": "semantic_search_disabled",
"message": (
"Semantic search must be enabled to find similar objects. "
"Enable it in the Frigate config under semantic_search."
),
}
context = request.app.embeddings
if context is None:
return {
"error": "semantic_search_disabled",
"message": "Embeddings context is not available.",
}
# 2. Anchor lookup.
event_id = arguments.get("event_id")
if not event_id:
return {"error": "missing_event_id", "message": "event_id is required."}
try:
anchor = Event.get(Event.id == event_id)
except Event.DoesNotExist:
return {
"error": "anchor_not_found",
"message": f"Could not find event {event_id}.",
}
# 3. Parse params.
after = parse_iso_to_timestamp(arguments.get("after"))
before = parse_iso_to_timestamp(arguments.get("before"))
cameras = arguments.get("cameras")
if cameras:
# Respect RBAC: intersect with the user's allowed cameras.
cameras = [c for c in cameras if c in allowed_cameras]
else:
cameras = list(allowed_cameras) if allowed_cameras else None
labels = arguments.get("labels") or [anchor.label]
sub_labels = arguments.get("sub_labels")
zones = arguments.get("zones")
similarity_mode = arguments.get("similarity_mode", "fused")
if similarity_mode not in ("visual", "semantic", "fused"):
similarity_mode = "fused"
min_score = arguments.get("min_score")
limit = int(arguments.get("limit", 10))
limit = max(1, min(limit, 50))
# 4. Run similarity searches. We deliberately do NOT pass event_ids into
# the vec queries — the IN filter on sqlite-vec is broken in the installed
# version (see frigate/embeddings/__init__.py). Mirror the pattern used by
# frigate/api/event.py events_search: fetch top-k globally, then intersect
# with the structured filters via Peewee.
visual_distances: Dict[str, float] = {}
description_distances: Dict[str, float] = {}
try:
if similarity_mode in ("visual", "fused"):
rows = context.search_thumbnail(anchor)
visual_distances = {row[0]: row[1] for row in rows}
if similarity_mode in ("semantic", "fused"):
query_text = (
(anchor.data or {}).get("description")
or anchor.sub_label
or anchor.label
)
rows = context.search_description(query_text)
description_distances = {row[0]: row[1] for row in rows}
except Exception:
logger.exception("Similarity search failed")
return {
"error": "similarity_search_failed",
"message": "Failed to run similarity search.",
}
vec_ids = set(visual_distances) | set(description_distances)
vec_ids.discard(anchor.id)
# vec layer returns up to k=100 per modality; flag when we hit that ceiling
# so the LLM can mention there may be more matches beyond what we saw.
candidate_truncated = (
len(visual_distances) >= 100 or len(description_distances) >= 100
)
if not vec_ids:
return {
"anchor": hydrate_event(anchor),
"results": [],
"similarity_mode": similarity_mode,
"candidate_truncated": candidate_truncated,
}
# 5. Apply structured filters, intersected with vec hits.
clauses = [Event.id.in_(list(vec_ids))]
if after is not None:
clauses.append(Event.start_time >= after)
if before is not None:
clauses.append(Event.start_time <= before)
if cameras:
clauses.append(Event.camera.in_(cameras))
if labels:
clauses.append(Event.label.in_(labels))
if sub_labels:
clauses.append(Event.sub_label.in_(sub_labels))
if zones:
# Mirror the pattern used by frigate/api/event.py for JSON-array zone match.
zone_clauses = [Event.zones.cast("text") % f'*"{zone}"*' for zone in zones]
clauses.append(reduce(operator.or_, zone_clauses))
eligible = {e.id: e for e in Event.select().where(reduce(operator.and_, clauses))}
# 6. Fuse and rank.
scored: List[tuple[str, float]] = []
for eid in eligible:
v_score = (
distance_to_score(visual_distances[eid], context.thumb_stats)
if eid in visual_distances
else None
)
d_score = (
distance_to_score(description_distances[eid], context.desc_stats)
if eid in description_distances
else None
)
fused = fuse_scores(v_score, d_score)
if fused is None:
continue
if min_score is not None and fused < min_score:
continue
scored.append((eid, fused))
scored.sort(key=lambda pair: pair[1], reverse=True)
scored = scored[:limit]
results = [hydrate_event(eligible[eid], score=score) for eid, score in scored]
return {
"anchor": hydrate_event(anchor),
"results": results,
"similarity_mode": similarity_mode,
"candidate_truncated": candidate_truncated,
}
@router.post( @router.post(
"/chat/execute", "/chat/execute",
dependencies=[Depends(allow_any_authenticated())], dependencies=[Depends(allow_any_authenticated())],
@ -657,13 +459,6 @@ async def execute_tool(
if tool_name == "search_objects": if tool_name == "search_objects":
return await _execute_search_objects(arguments, allowed_cameras) return await _execute_search_objects(arguments, allowed_cameras)
if tool_name == "find_similar_objects":
result = await _execute_find_similar_objects(
request, arguments, allowed_cameras
)
status_code = 200 if "error" not in result else 400
return JSONResponse(content=result, status_code=status_code)
if tool_name == "set_camera_state": if tool_name == "set_camera_state":
result = await _execute_set_camera_state(request, arguments) result = await _execute_set_camera_state(request, arguments)
return JSONResponse( return JSONResponse(
@ -725,14 +520,45 @@ async def _execute_get_live_context(
"detections": list(tracked_objects_dict.values()), "detections": list(tracked_objects_dict.values()),
} }
# Grab live frame when the chat model supports vision # Grab live frame and handle based on provider configuration
image_url = await _get_live_frame_image_url(request, camera, allowed_cameras) image_url = await _get_live_frame_image_url(request, camera, allowed_cameras)
if image_url: if image_url:
chat_client = request.app.genai_manager.chat_client genai_manager = request.app.genai_manager
if chat_client is not None and chat_client.supports_vision: if genai_manager.tool_client is genai_manager.vision_client:
# Pass image URL so it can be injected as a user message # Same provider handles both roles — pass image URL so it can
# (images can't be in tool results) # be injected as a user message (images can't be in tool results)
result["_image_url"] = image_url result["_image_url"] = image_url
elif genai_manager.vision_client is not None:
# Separate vision provider — have it describe the image,
# providing detection context so it knows what to focus on
frame_bytes = _decode_data_url(image_url)
if frame_bytes:
detections = result.get("detections", [])
if detections:
detection_lines = []
for d in detections:
parts = [d.get("label", "unknown")]
if d.get("sub_label"):
parts.append(f"({d['sub_label']})")
if d.get("zones"):
parts.append(f"in {', '.join(d['zones'])}")
detection_lines.append(" ".join(parts))
context = (
"The following objects are currently being tracked: "
+ "; ".join(detection_lines)
+ "."
)
else:
context = "No objects are currently being tracked."
description = genai_manager.vision_client._send(
f"Describe what you see in this security camera image. "
f"{context} Focus on the scene, any visible activity, "
f"and details about the tracked objects.",
[frame_bytes],
)
if description:
result["image_description"] = description
return result return result
@ -783,6 +609,17 @@ async def _get_live_frame_image_url(
return None return None
def _decode_data_url(data_url: str) -> Optional[bytes]:
"""Decode a base64 data URL to raw bytes."""
try:
# Format: data:image/jpeg;base64,<data>
_, encoded = data_url.split(",", 1)
return base64.b64decode(encoded)
except (ValueError, Exception) as e:
logger.debug("Failed to decode data URL: %s", e)
return None
async def _execute_set_camera_state( async def _execute_set_camera_state(
request: Request, request: Request,
arguments: Dict[str, Any], arguments: Dict[str, Any],
@ -847,8 +684,6 @@ async def _execute_tool_internal(
except (json.JSONDecodeError, AttributeError) as e: except (json.JSONDecodeError, AttributeError) as e:
logger.warning(f"Failed to extract tool result: {e}") logger.warning(f"Failed to extract tool result: {e}")
return {"error": "Failed to parse tool result"} return {"error": "Failed to parse tool result"}
elif tool_name == "find_similar_objects":
return await _execute_find_similar_objects(request, arguments, allowed_cameras)
elif tool_name == "set_camera_state": elif tool_name == "set_camera_state":
return await _execute_set_camera_state(request, arguments) return await _execute_set_camera_state(request, arguments)
elif tool_name == "get_live_context": elif tool_name == "get_live_context":
@ -871,9 +706,8 @@ async def _execute_tool_internal(
return _execute_get_recap(arguments, allowed_cameras) return _execute_get_recap(arguments, allowed_cameras)
else: else:
logger.error( logger.error(
"Tool call failed: unknown tool %r. Expected one of: search_objects, find_similar_objects, " "Tool call failed: unknown tool %r. Expected one of: search_objects, get_live_context, "
"get_live_context, start_camera_watch, stop_camera_watch, get_profile_status, get_recap. " "start_camera_watch, stop_camera_watch, get_profile_status, get_recap. Arguments received: %s",
"Arguments received: %s",
tool_name, tool_name,
json.dumps(arguments), json.dumps(arguments),
) )
@ -900,9 +734,9 @@ async def _execute_start_camera_watch(
await require_camera_access(camera, request=request) await require_camera_access(camera, request=request)
genai_manager = request.app.genai_manager genai_manager = request.app.genai_manager
chat_client = genai_manager.chat_client vision_client = genai_manager.vision_client or genai_manager.tool_client
if chat_client is None or not chat_client.supports_vision: if vision_client is None:
return {"error": "VLM watch requires a chat model with vision support."} return {"error": "No vision/GenAI provider configured."}
try: try:
job_id = start_vlm_watch_job( job_id = start_vlm_watch_job(
@ -1135,7 +969,7 @@ async def _execute_pending_tools(
json.dumps(tool_args), json.dumps(tool_args),
) )
if tool_name == "search_objects" and isinstance(tool_result, list): if tool_name == "search_objects" and isinstance(tool_result, list):
tool_result = format_events_with_local_time(tool_result) tool_result = _format_events_with_local_time(tool_result)
_keys = { _keys = {
"id", "id",
"camera", "camera",
@ -1236,7 +1070,7 @@ async def chat_completion(
6. Repeats until final answer 6. Repeats until final answer
7. Returns response to user 7. Returns response to user
""" """
genai_client = request.app.genai_manager.chat_client genai_client = request.app.genai_manager.tool_client
if not genai_client: if not genai_client:
return JSONResponse( return JSONResponse(
content={ content={
@ -1288,9 +1122,7 @@ Do not start your response with phrases like "I will check...", "Let me see...",
Always present times to the user in the server's local timezone. When tool results include start_time_local and end_time_local, use those exact strings when listing or describing detection times—do not convert or invent timestamps. Do not use UTC or ISO format with Z for the user-facing answer unless the tool result only provides Unix timestamps without local time fields. Always present times to the user in the server's local timezone. When tool results include start_time_local and end_time_local, use those exact strings when listing or describing detection times—do not convert or invent timestamps. Do not use UTC or ISO format with Z for the user-facing answer unless the tool result only provides Unix timestamps without local time fields.
When users ask about "today", "yesterday", "this week", etc., use the current date above as reference. When users ask about "today", "yesterday", "this week", etc., use the current date above as reference.
When searching for objects or events, use ISO 8601 format for dates (e.g., {current_date_str}T00:00:00Z for the start of today). When searching for objects or events, use ISO 8601 format for dates (e.g., {current_date_str}T00:00:00Z for the start of today).
Always be accurate with time calculations based on the current date provided. Always be accurate with time calculations based on the current date provided.{cameras_section}"""
When a user refers to a specific object they have seen or describe with identifying details ("that green car", "the person in the red jacket", "a package left today"), prefer the find_similar_objects tool over search_objects. Use search_objects first only to locate the anchor event, then pass its id to find_similar_objects. For generic queries like "show me all cars today", keep using search_objects. If a user message begins with [attached_event:<id>], treat that event id as the anchor for any similarity or "tell me more" request in the same message and call find_similar_objects with that id.{cameras_section}"""
conversation.append( conversation.append(
{ {
@ -1328,9 +1160,6 @@ When a user refers to a specific object they have seen or describe with identify
async def stream_body_llm(): async def stream_body_llm():
nonlocal conversation, stream_tool_calls, stream_iterations nonlocal conversation, stream_tool_calls, stream_iterations
while stream_iterations < max_iterations: while stream_iterations < max_iterations:
if await request.is_disconnected():
logger.debug("Client disconnected, stopping chat stream")
return
logger.debug( logger.debug(
f"Streaming LLM (iteration {stream_iterations + 1}/{max_iterations}) " f"Streaming LLM (iteration {stream_iterations + 1}/{max_iterations}) "
f"with {len(conversation)} message(s)" f"with {len(conversation)} message(s)"
@ -1340,9 +1169,6 @@ When a user refers to a specific object they have seen or describe with identify
tools=tools if tools else None, tools=tools if tools else None,
tool_choice="auto", tool_choice="auto",
): ):
if await request.is_disconnected():
logger.debug("Client disconnected, stopping chat stream")
return
kind, value = event kind, value = event
if kind == "content_delta": if kind == "content_delta":
yield ( yield (
@ -1372,11 +1198,6 @@ When a user refers to a specific object they have seen or describe with identify
msg.get("content"), pending msg.get("content"), pending
) )
) )
if await request.is_disconnected():
logger.debug(
"Client disconnected before tool execution"
)
return
( (
executed_calls, executed_calls,
tool_results, tool_results,
@ -1461,7 +1282,7 @@ When a user refers to a specific object they have seen or describe with identify
+ b"\n" + b"\n"
) )
# Stream content in word-sized chunks for smooth UX # Stream content in word-sized chunks for smooth UX
for part in chunk_content(final_content): for part in _chunk_content(final_content):
yield ( yield (
json.dumps({"type": "content", "delta": part}).encode( json.dumps({"type": "content", "delta": part}).encode(
"utf-8" "utf-8"
@ -1560,12 +1381,12 @@ async def start_vlm_monitor(
await require_camera_access(body.camera, request=request) await require_camera_access(body.camera, request=request)
chat_client = genai_manager.chat_client vision_client = genai_manager.vision_client or genai_manager.tool_client
if chat_client is None or not chat_client.supports_vision: if vision_client is None:
return JSONResponse( return JSONResponse(
content={ content={
"success": False, "success": False,
"message": "VLM watch requires a chat model with vision support.", "message": "No vision/GenAI provider configured.",
}, },
status_code=400, status_code=400,
) )

View File

@ -1,135 +0,0 @@
"""Pure, stateless helpers used by the chat tool dispatchers.
These were extracted from frigate/api/chat.py to keep that module focused on
route handlers, tool dispatchers, and streaming loop internals. Nothing in
this file touches the FastAPI request, the embeddings context, or the chat
loop state all inputs and outputs are plain data.
"""
import logging
import math
import time
from datetime import datetime
from typing import Any, Dict, Generator, List, Optional
from frigate.embeddings.util import ZScoreNormalization
from frigate.models import Event
logger = logging.getLogger(__name__)
# Similarity fusion weights for find_similar_objects.
# Visual dominates because the feature's primary use case is "same specific object."
# If these change, update the test in test_chat_find_similar_objects.py.
VISUAL_WEIGHT = 0.65
DESCRIPTION_WEIGHT = 0.35
def chunk_content(content: str, chunk_size: int = 80) -> Generator[str, None, None]:
"""Yield content in word-aware chunks for streaming."""
if not content:
return
words = content.split(" ")
current: List[str] = []
current_len = 0
for w in words:
current.append(w)
current_len += len(w) + 1
if current_len >= chunk_size:
yield " ".join(current) + " "
current = []
current_len = 0
if current:
yield " ".join(current)
def format_events_with_local_time(
events_list: List[Dict[str, Any]],
) -> List[Dict[str, Any]]:
"""Add human-readable local start/end times to each event for the LLM."""
result = []
for evt in events_list:
if not isinstance(evt, dict):
result.append(evt)
continue
copy_evt = dict(evt)
try:
start_ts = evt.get("start_time")
end_ts = evt.get("end_time")
if start_ts is not None:
dt_start = datetime.fromtimestamp(start_ts)
copy_evt["start_time_local"] = dt_start.strftime("%Y-%m-%d %I:%M:%S %p")
if end_ts is not None:
dt_end = datetime.fromtimestamp(end_ts)
copy_evt["end_time_local"] = dt_end.strftime("%Y-%m-%d %I:%M:%S %p")
except (TypeError, ValueError, OSError):
pass
result.append(copy_evt)
return result
def distance_to_score(distance: float, stats: ZScoreNormalization) -> float:
"""Convert a cosine distance to a [0, 1] similarity score.
Uses the existing ZScoreNormalization stats maintained by EmbeddingsContext
to normalize across deployments, then a bounded sigmoid. Lower distance ->
higher score. If stats are uninitialized (stddev == 0), returns a neutral
0.5 so the fallback ordering by raw distance still dominates.
"""
if stats.stddev == 0:
return 0.5
z = (distance - stats.mean) / stats.stddev
# Sigmoid on -z so that small distance (good) -> high score.
return 1.0 / (1.0 + math.exp(z))
def fuse_scores(
visual_score: Optional[float],
description_score: Optional[float],
) -> Optional[float]:
"""Weighted fusion of visual and description similarity scores.
If one side is missing (e.g., no description embedding for this event),
the other side's score is returned alone with no penalty. If both are
missing, returns None and the caller should drop the event.
"""
if visual_score is None and description_score is None:
return None
if visual_score is None:
return description_score
if description_score is None:
return visual_score
return VISUAL_WEIGHT * visual_score + DESCRIPTION_WEIGHT * description_score
def parse_iso_to_timestamp(value: Optional[str]) -> Optional[float]:
"""Parse an ISO-8601 string as server-local time -> unix timestamp.
Mirrors the parsing _execute_search_objects uses so both tools accept the
same format from the LLM.
"""
if value is None:
return None
try:
s = value.replace("Z", "").strip()[:19]
dt = datetime.strptime(s, "%Y-%m-%dT%H:%M:%S")
return time.mktime(dt.timetuple())
except (ValueError, AttributeError, TypeError):
logger.warning("Invalid timestamp format: %s", value)
return None
def hydrate_event(event: Event, score: Optional[float] = None) -> Dict[str, Any]:
"""Convert an Event row into the dict shape returned by find_similar_objects."""
data: Dict[str, Any] = {
"id": event.id,
"camera": event.camera,
"label": event.label,
"sub_label": event.sub_label,
"start_time": event.start_time,
"end_time": event.end_time,
"zones": event.zones,
}
if score is not None:
data["score"] = score
return data

View File

@ -1,65 +0,0 @@
from typing import List, Optional
from pydantic import BaseModel, Field, model_validator
MAX_BATCH_EXPORT_ITEMS = 50
class BatchExportItem(BaseModel):
camera: str = Field(title="Camera name")
start_time: float = Field(title="Start time")
end_time: float = Field(title="End time")
image_path: Optional[str] = Field(
default=None,
title="Existing thumbnail path",
description="Optional existing image to use as the export thumbnail",
)
friendly_name: Optional[str] = Field(
default=None,
title="Friendly name",
max_length=256,
description="Optional friendly name for this specific export item",
)
client_item_id: Optional[str] = Field(
default=None,
title="Client item ID",
max_length=128,
description="Optional opaque client identifier echoed back in results",
)
class BatchExportBody(BaseModel):
items: List[BatchExportItem] = Field(
title="Items",
min_length=1,
max_length=MAX_BATCH_EXPORT_ITEMS,
description="List of export items. Each item has its own camera and time range.",
)
export_case_id: Optional[str] = Field(
default=None,
title="Export case ID",
max_length=30,
description=(
"Existing export case ID to assign all exports to. Attaching to an "
"existing case is temporarily admin-only until case-level ACLs exist."
),
)
new_case_name: Optional[str] = Field(
default=None,
title="New case name",
max_length=100,
description="Name of a new export case to create when export_case_id is omitted",
)
new_case_description: Optional[str] = Field(
default=None,
title="New case description",
description="Optional description for a newly created export case",
)
@model_validator(mode="after")
def validate_case_target(self) -> "BatchExportBody":
for item in self.items:
if item.end_time <= item.start_time:
raise ValueError("end_time must be after start_time")
return self

View File

@ -1,24 +0,0 @@
"""Request bodies for bulk export operations."""
from typing import Optional
from pydantic import BaseModel, Field, conlist, constr
class ExportBulkDeleteBody(BaseModel):
"""Request body for bulk deleting exports."""
# List of export IDs with at least one element and each element with at least one char
ids: conlist(constr(min_length=1), min_length=1)
class ExportBulkReassignBody(BaseModel):
"""Request body for bulk reassigning exports to a case."""
# List of export IDs with at least one element and each element with at least one char
ids: conlist(constr(min_length=1), min_length=1)
export_case_id: Optional[str] = Field(
default=None,
max_length=30,
description="Case ID to assign to, or null to unassign from current case",
)

View File

@ -23,3 +23,13 @@ class ExportCaseUpdateBody(BaseModel):
description: Optional[str] = Field( description: Optional[str] = Field(
default=None, description="Updated description of the export case" default=None, description="Updated description of the export case"
) )
class ExportCaseAssignBody(BaseModel):
"""Request body for assigning or unassigning an export to a case."""
export_case_id: Optional[str] = Field(
default=None,
max_length=30,
description="Case ID to assign to the export, or null to unassign",
)

View File

@ -1,4 +1,4 @@
from typing import Any, List, Optional from typing import List, Optional
from pydantic import BaseModel, Field from pydantic import BaseModel, Field
@ -28,96 +28,6 @@ class StartExportResponse(BaseModel):
export_id: Optional[str] = Field( export_id: Optional[str] = Field(
default=None, description="The export ID if successfully started" default=None, description="The export ID if successfully started"
) )
status: Optional[str] = Field(
default=None,
description="Queue status for the export job",
)
class BatchExportResultModel(BaseModel):
"""Per-item result for a batch export request."""
camera: str = Field(description="Camera name for this export attempt")
export_id: Optional[str] = Field(
default=None,
description="The export ID when the export was successfully queued",
)
success: bool = Field(description="Whether the export was successfully queued")
status: Optional[str] = Field(
default=None,
description="Queue status for this camera export",
)
error: Optional[str] = Field(
default=None,
description="Validation or queueing error for this item, if any",
)
item_index: Optional[int] = Field(
default=None,
description="Zero-based index of this result within the request items list",
)
client_item_id: Optional[str] = Field(
default=None,
description="Opaque client-supplied item identifier echoed from the request",
)
class BatchExportResponse(BaseModel):
"""Response model for starting an export batch."""
export_case_id: Optional[str] = Field(
default=None,
description="Export case ID associated with the batch",
)
export_ids: List[str] = Field(description="Export IDs successfully queued")
results: List[BatchExportResultModel] = Field(
description="Per-item batch export results"
)
class ExportJobModel(BaseModel):
"""Model representing a queued or running export job."""
id: str = Field(description="Unique identifier for the export job")
job_type: str = Field(description="Job type")
status: str = Field(description="Current job status")
camera: str = Field(description="Camera associated with this export job")
name: Optional[str] = Field(
default=None,
description="Friendly name for the export",
)
export_case_id: Optional[str] = Field(
default=None,
description="ID of the export case this export belongs to",
)
request_start_time: float = Field(description="Requested export start time")
request_end_time: float = Field(description="Requested export end time")
start_time: Optional[float] = Field(
default=None,
description="Unix timestamp when execution started",
)
end_time: Optional[float] = Field(
default=None,
description="Unix timestamp when execution completed",
)
error_message: Optional[str] = Field(
default=None,
description="Error message for failed jobs",
)
results: Optional[dict[str, Any]] = Field(
default=None,
description="Result metadata for completed jobs",
)
current_step: str = Field(
default="queued",
description="Current execution step (queued, preparing, encoding, encoding_retry, finalizing)",
)
progress_percent: float = Field(
default=0.0,
description="Progress percentage of the current step (0.0 - 100.0)",
)
ExportJobsResponse = List[ExportJobModel]
ExportsResponse = List[ExportModel] ExportsResponse = List[ExportModel]

View File

@ -199,18 +199,13 @@ def events(
sub_label_clauses.append((Event.sub_label.is_null())) sub_label_clauses.append((Event.sub_label.is_null()))
for label in filtered_sub_labels: for label in filtered_sub_labels:
lowered = label.lower()
sub_label_clauses.append( sub_label_clauses.append(
(fn.LOWER(Event.sub_label.cast("text")) == lowered) (Event.sub_label.cast("text") == label)
) # include exact matches (case-insensitive) ) # include exact matches
# include this label when part of a list (LIKE is case-insensitive in sqlite for ASCII) # include this label when part of a list
sub_label_clauses.append( sub_label_clauses.append((Event.sub_label.cast("text") % f"*{label},*"))
(fn.LOWER(Event.sub_label.cast("text")) % f"*{lowered},*") sub_label_clauses.append((Event.sub_label.cast("text") % f"*, {label}*"))
)
sub_label_clauses.append(
(fn.LOWER(Event.sub_label.cast("text")) % f"*, {lowered}*")
)
sub_label_clause = reduce(operator.or_, sub_label_clauses) sub_label_clause = reduce(operator.or_, sub_label_clauses)
clauses.append((sub_label_clause)) clauses.append((sub_label_clause))
@ -614,18 +609,13 @@ def events_search(
sub_label_clauses.append((Event.sub_label.is_null())) sub_label_clauses.append((Event.sub_label.is_null()))
for label in filtered_sub_labels: for label in filtered_sub_labels:
lowered = label.lower()
sub_label_clauses.append( sub_label_clauses.append(
(fn.LOWER(Event.sub_label.cast("text")) == lowered) (Event.sub_label.cast("text") == label)
) # include exact matches (case-insensitive) ) # include exact matches
# include this label when part of a list (LIKE is case-insensitive in sqlite for ASCII) # include this label when part of a list
sub_label_clauses.append( sub_label_clauses.append((Event.sub_label.cast("text") % f"*{label},*"))
(fn.LOWER(Event.sub_label.cast("text")) % f"*{lowered},*") sub_label_clauses.append((Event.sub_label.cast("text") % f"*, {label}*"))
)
sub_label_clauses.append(
(fn.LOWER(Event.sub_label.cast("text")) % f"*, {lowered}*")
)
event_filters.append((reduce(operator.or_, sub_label_clauses))) event_filters.append((reduce(operator.or_, sub_label_clauses)))

File diff suppressed because it is too large Load Diff

View File

@ -746,7 +746,7 @@ async def set_not_reviewed(
description="Use GenAI to summarize review items over a period of time.", description="Use GenAI to summarize review items over a period of time.",
) )
def generate_review_summary(request: Request, start_ts: float, end_ts: float): def generate_review_summary(request: Request, start_ts: float, end_ts: float):
if not request.app.genai_manager.description_client: if not request.app.genai_manager.vision_client:
return JSONResponse( return JSONResponse(
content=( content=(
{ {

View File

@ -52,7 +52,6 @@ from frigate.embeddings import EmbeddingProcess, EmbeddingsContext
from frigate.events.audio import AudioProcessor from frigate.events.audio import AudioProcessor
from frigate.events.cleanup import EventCleanup from frigate.events.cleanup import EventCleanup
from frigate.events.maintainer import EventProcessor from frigate.events.maintainer import EventProcessor
from frigate.jobs.export import reap_stale_exports
from frigate.jobs.motion_search import stop_all_motion_search_jobs from frigate.jobs.motion_search import stop_all_motion_search_jobs
from frigate.log import _stop_logging from frigate.log import _stop_logging
from frigate.models import ( from frigate.models import (
@ -189,6 +188,17 @@ class FrigateApp:
except PermissionError: except PermissionError:
logger.error("Unable to write to /config to save DB state") logger.error("Unable to write to /config to save DB state")
def cleanup_timeline_db(db: SqliteExtDatabase) -> None:
db.execute_sql(
"DELETE FROM timeline WHERE source_id NOT IN (SELECT id FROM event);"
)
try:
with open(f"{CONFIG_DIR}/.timeline", "w") as f:
f.write(str(datetime.datetime.now().timestamp()))
except PermissionError:
logger.error("Unable to write to /config to save DB state")
# Migrate DB schema # Migrate DB schema
migrate_db = SqliteExtDatabase(self.config.database.path) migrate_db = SqliteExtDatabase(self.config.database.path)
@ -205,6 +215,11 @@ class FrigateApp:
router.run() router.run()
# this is a temporary check to clean up user DB from beta
# will be removed before final release
if not os.path.exists(f"{CONFIG_DIR}/.timeline"):
cleanup_timeline_db(migrate_db)
# check if vacuum needs to be run # check if vacuum needs to be run
if os.path.exists(f"{CONFIG_DIR}/.vacuum"): if os.path.exists(f"{CONFIG_DIR}/.vacuum"):
with open(f"{CONFIG_DIR}/.vacuum") as f: with open(f"{CONFIG_DIR}/.vacuum") as f:
@ -596,11 +611,6 @@ class FrigateApp:
# Clean up any stale replay camera artifacts (filesystem + DB) # Clean up any stale replay camera artifacts (filesystem + DB)
cleanup_replay_cameras() cleanup_replay_cameras()
# Reap any Export rows still marked in_progress from a previous
# session (crash, kill, broken migration). Runs synchronously before
# uvicorn binds so no API request can observe a stale row.
reap_stale_exports()
self.init_inter_process_communicator() self.init_inter_process_communicator()
self.start_detectors() self.start_detectors()
self.init_dispatcher() self.init_dispatcher()

View File

@ -118,21 +118,10 @@ class Dispatcher:
try: try:
if command_type == "set": if command_type == "set":
# Commands that require a sub-command (mask/zone name)
sub_command_required = {
"motion_mask",
"object_mask",
"zone",
}
if sub_command: if sub_command:
self._camera_settings_handlers[command]( self._camera_settings_handlers[command](
camera_name, sub_command, payload camera_name, sub_command, payload
) )
elif command in sub_command_required:
logger.error(
"Command %s requires a sub-command (mask/zone name)",
command,
)
else: else:
self._camera_settings_handlers[command](camera_name, payload) self._camera_settings_handlers[command](camera_name, payload)
elif command_type == "ptz": elif command_type == "ptz":

View File

@ -18,8 +18,8 @@ class GenAIProviderEnum(str, Enum):
class GenAIRoleEnum(str, Enum): class GenAIRoleEnum(str, Enum):
chat = "chat" tools = "tools"
descriptions = "descriptions" vision = "vision"
embeddings = "embeddings" embeddings = "embeddings"
@ -49,21 +49,21 @@ class GenAIConfig(FrigateBaseModel):
roles: list[GenAIRoleEnum] = Field( roles: list[GenAIRoleEnum] = Field(
default_factory=lambda: [ default_factory=lambda: [
GenAIRoleEnum.embeddings, GenAIRoleEnum.embeddings,
GenAIRoleEnum.descriptions, GenAIRoleEnum.vision,
GenAIRoleEnum.chat, GenAIRoleEnum.tools,
], ],
title="Roles", title="Roles",
description="GenAI roles (chat, descriptions, embeddings); one provider per role.", description="GenAI roles (tools, vision, embeddings); one provider per role.",
) )
provider_options: dict[str, Any] = Field( provider_options: dict[str, Any] = Field(
default={}, default={},
title="Provider options", title="Provider options",
description="Additional provider-specific options to pass to the GenAI client.", description="Additional provider-specific options to pass to the GenAI client.",
json_schema_extra={"additionalProperties": {}}, json_schema_extra={"additionalProperties": {"type": "string"}},
) )
runtime_options: dict[str, Any] = Field( runtime_options: dict[str, Any] = Field(
default={}, default={},
title="Runtime options", title="Runtime options",
description="Runtime options passed to the provider for each inference call.", description="Runtime options passed to the provider for each inference call.",
json_schema_extra={"additionalProperties": {}}, json_schema_extra={"additionalProperties": {"type": "string"}},
) )

View File

@ -92,12 +92,6 @@ class RecordExportConfig(FrigateBaseModel):
title="Export hwaccel args", title="Export hwaccel args",
description="Hardware acceleration args to use for export/transcode operations.", description="Hardware acceleration args to use for export/transcode operations.",
) )
max_concurrent: int = Field(
default=3,
ge=1,
title="Maximum concurrent exports",
description="Maximum number of export jobs to process at the same time.",
)
class RecordConfig(FrigateBaseModel): class RecordConfig(FrigateBaseModel):

View File

@ -730,9 +730,6 @@ class FrigateConfig(FrigateBaseModel):
) )
if need_detect_dimensions: if need_detect_dimensions:
logger.info(
f"detect.width and detect.height not set for {camera_config.name}, probing detect stream to determine resolution."
)
stream_info = {"width": 0, "height": 0, "fourcc": None} stream_info = {"width": 0, "height": 0, "fourcc": None}
try: try:
stream_info = stream_info_retriever.get_stream_info( stream_info = stream_info_retriever.get_stream_info(

View File

@ -1,5 +1,4 @@
import os import os
import re
from pathlib import Path from pathlib import Path
from typing import Annotated from typing import Annotated
@ -16,77 +15,8 @@ if os.path.isdir(secrets_dir) and os.access(secrets_dir, os.R_OK):
) )
# Matches a FRIGATE_* identifier following an opening brace.
_FRIGATE_IDENT_RE = re.compile(r"FRIGATE_[A-Za-z0-9_]+")
def substitute_frigate_vars(value: str) -> str:
"""Substitute `{FRIGATE_*}` placeholders in *value*.
Reproduces the subset of `str.format()` brace semantics that Frigate's
config has historically supported, while leaving unrelated brace content
(e.g. ffmpeg `%{localtime\\:...}` expressions) untouched:
* `{{` and `}}` collapse to literal `{` / `}` (the documented escape).
* `{FRIGATE_NAME}` is replaced from `FRIGATE_ENV_VARS`; an unknown name
raises `KeyError` to preserve the existing "Invalid substitution"
error path.
* A `{` that begins `{FRIGATE_` but is not a well-formed
`{FRIGATE_NAME}` placeholder raises `ValueError` (malformed
placeholder). Callers that catch `KeyError` to allow unknown-var
passthrough will still surface malformed syntax as an error.
* Any other `{` or `}` is treated as a literal and passed through.
"""
out: list[str] = []
i = 0
n = len(value)
while i < n:
ch = value[i]
if ch == "{":
# Escaped literal `{{`.
if i + 1 < n and value[i + 1] == "{":
out.append("{")
i += 2
continue
# Possible `{FRIGATE_*}` placeholder.
if value.startswith("{FRIGATE_", i):
ident_match = _FRIGATE_IDENT_RE.match(value, i + 1)
if (
ident_match is not None
and ident_match.end() < n
and value[ident_match.end()] == "}"
):
key = ident_match.group(0)
if key not in FRIGATE_ENV_VARS:
raise KeyError(key)
out.append(FRIGATE_ENV_VARS[key])
i = ident_match.end() + 1
continue
# Looks like a FRIGATE placeholder but is malformed
# (no closing brace, illegal char, format spec, etc.).
raise ValueError(
f"Malformed FRIGATE_ placeholder near {value[i : i + 32]!r}"
)
# Plain `{` — pass through (e.g. `%{localtime\:...}`).
out.append("{")
i += 1
continue
if ch == "}":
# Escaped literal `}}`.
if i + 1 < n and value[i + 1] == "}":
out.append("}")
i += 2
continue
out.append("}")
i += 1
continue
out.append(ch)
i += 1
return "".join(out)
def validate_env_string(v: str) -> str: def validate_env_string(v: str) -> str:
return substitute_frigate_vars(v) return v.format(**FRIGATE_ENV_VARS)
EnvString = Annotated[str, AfterValidator(validate_env_string)] EnvString = Annotated[str, AfterValidator(validate_env_string)]

View File

@ -44,22 +44,6 @@ DEFAULT_ATTRIBUTE_LABEL_MAP = {
], ],
"motorcycle": ["license_plate"], "motorcycle": ["license_plate"],
} }
ATTRIBUTE_LABEL_DISPLAY_MAP = {
"amazon": "Amazon",
"an_post": "An Post",
"canada_post": "Canada Post",
"dhl": "DHL",
"dpd": "DPD",
"fedex": "FedEx",
"gls": "GLS",
"nzpost": "NZ Post",
"postnl": "PostNL",
"postnord": "PostNord",
"purolator": "Purolator",
"royal_mail": "Royal Mail",
"ups": "UPS",
"usps": "USPS",
}
LABEL_CONSOLIDATION_MAP = { LABEL_CONSOLIDATION_MAP = {
"car": 0.8, "car": 0.8,
"face": 0.5, "face": 0.5,

View File

@ -16,7 +16,7 @@ from frigate.config import CameraConfig, FrigateConfig
from frigate.const import CLIPS_DIR, UPDATE_EVENT_DESCRIPTION from frigate.const import CLIPS_DIR, UPDATE_EVENT_DESCRIPTION
from frigate.data_processing.post.semantic_trigger import SemanticTriggerProcessor from frigate.data_processing.post.semantic_trigger import SemanticTriggerProcessor
from frigate.data_processing.types import PostProcessDataEnum from frigate.data_processing.types import PostProcessDataEnum
from frigate.genai.manager import GenAIClientManager from frigate.genai import GenAIClient
from frigate.models import Event from frigate.models import Event
from frigate.types import TrackedObjectUpdateTypesEnum from frigate.types import TrackedObjectUpdateTypesEnum
from frigate.util.builtin import EventsPerSecond, InferenceSpeed from frigate.util.builtin import EventsPerSecond, InferenceSpeed
@ -41,7 +41,7 @@ class ObjectDescriptionProcessor(PostProcessorApi):
embeddings: "Embeddings", embeddings: "Embeddings",
requestor: InterProcessRequestor, requestor: InterProcessRequestor,
metrics: DataProcessorMetrics, metrics: DataProcessorMetrics,
genai_manager: GenAIClientManager, client: GenAIClient,
semantic_trigger_processor: SemanticTriggerProcessor | None, semantic_trigger_processor: SemanticTriggerProcessor | None,
): ):
super().__init__(config, metrics, None) super().__init__(config, metrics, None)
@ -49,7 +49,7 @@ class ObjectDescriptionProcessor(PostProcessorApi):
self.embeddings = embeddings self.embeddings = embeddings
self.requestor = requestor self.requestor = requestor
self.metrics = metrics self.metrics = metrics
self.genai_manager = genai_manager self.genai_client = client
self.semantic_trigger_processor = semantic_trigger_processor self.semantic_trigger_processor = semantic_trigger_processor
self.tracked_events: dict[str, list[Any]] = {} self.tracked_events: dict[str, list[Any]] = {}
self.early_request_sent: dict[str, bool] = {} self.early_request_sent: dict[str, bool] = {}
@ -198,9 +198,6 @@ class ObjectDescriptionProcessor(PostProcessorApi):
if data_type != PostProcessDataEnum.tracked_object: if data_type != PostProcessDataEnum.tracked_object:
return return
if self.genai_manager.description_client is None:
return
state: str | None = frame_data.get("state", None) state: str | None = frame_data.get("state", None)
if state is not None: if state is not None:
@ -332,12 +329,7 @@ class ObjectDescriptionProcessor(PostProcessorApi):
"""Embed the description for an event.""" """Embed the description for an event."""
start = datetime.datetime.now().timestamp() start = datetime.datetime.now().timestamp()
camera_config = self.config.cameras[str(event.camera)] camera_config = self.config.cameras[str(event.camera)]
client = self.genai_manager.description_client description = self.genai_client.generate_object_description(
if client is None:
return
description = client.generate_object_description(
camera_config, thumbnails, event camera_config, thumbnails, event
) )

View File

@ -19,15 +19,9 @@ from frigate.comms.inter_process import InterProcessRequestor
from frigate.config import FrigateConfig from frigate.config import FrigateConfig
from frigate.config.camera import CameraConfig from frigate.config.camera import CameraConfig
from frigate.config.camera.review import GenAIReviewConfig, ImageSourceEnum from frigate.config.camera.review import GenAIReviewConfig, ImageSourceEnum
from frigate.const import ( from frigate.const import CACHE_DIR, CLIPS_DIR, UPDATE_REVIEW_DESCRIPTION
ATTRIBUTE_LABEL_DISPLAY_MAP,
CACHE_DIR,
CLIPS_DIR,
UPDATE_REVIEW_DESCRIPTION,
)
from frigate.data_processing.types import PostProcessDataEnum from frigate.data_processing.types import PostProcessDataEnum
from frigate.genai import GenAIClient from frigate.genai import GenAIClient
from frigate.genai.manager import GenAIClientManager
from frigate.models import Recordings, ReviewSegment from frigate.models import Recordings, ReviewSegment
from frigate.util.builtin import EventsPerSecond, InferenceSpeed from frigate.util.builtin import EventsPerSecond, InferenceSpeed
from frigate.util.image import get_image_from_recording from frigate.util.image import get_image_from_recording
@ -47,12 +41,12 @@ class ReviewDescriptionProcessor(PostProcessorApi):
config: FrigateConfig, config: FrigateConfig,
requestor: InterProcessRequestor, requestor: InterProcessRequestor,
metrics: DataProcessorMetrics, metrics: DataProcessorMetrics,
genai_manager: GenAIClientManager, client: GenAIClient,
): ):
super().__init__(config, metrics, None) super().__init__(config, metrics, None)
self.requestor = requestor self.requestor = requestor
self.metrics = metrics self.metrics = metrics
self.genai_manager = genai_manager self.genai_client = client
self.review_desc_speed = InferenceSpeed(self.metrics.review_desc_speed) self.review_desc_speed = InferenceSpeed(self.metrics.review_desc_speed)
self.review_desc_dps = EventsPerSecond() self.review_desc_dps = EventsPerSecond()
self.review_desc_dps.start() self.review_desc_dps.start()
@ -69,12 +63,7 @@ class ReviewDescriptionProcessor(PostProcessorApi):
Estimates ~1 token per 1250 pixels. Targets 98% context utilization with safety margin. Estimates ~1 token per 1250 pixels. Targets 98% context utilization with safety margin.
Capped at 20 frames. Capped at 20 frames.
""" """
client = self.genai_manager.description_client context_size = self.genai_client.get_context_size()
if client is None:
return 3
context_size = client.get_context_size()
camera_config = self.config.cameras[camera] camera_config = self.config.cameras[camera]
detect_width = camera_config.detect.width detect_width = camera_config.detect.width
@ -122,9 +111,6 @@ class ReviewDescriptionProcessor(PostProcessorApi):
if data_type != PostProcessDataEnum.review: if data_type != PostProcessDataEnum.review:
return return
if self.genai_manager.description_client is None:
return
camera = data["after"]["camera"] camera = data["after"]["camera"]
camera_config = self.config.cameras[camera] camera_config = self.config.cameras[camera]
@ -214,7 +200,7 @@ class ReviewDescriptionProcessor(PostProcessorApi):
target=run_analysis, target=run_analysis,
args=( args=(
self.requestor, self.requestor,
self.genai_manager.description_client, self.genai_client,
self.review_desc_speed, self.review_desc_speed,
camera_config, camera_config,
final_data, final_data,
@ -330,12 +316,7 @@ class ReviewDescriptionProcessor(PostProcessorApi):
os.path.join(CLIPS_DIR, "genai-requests", f"{start_ts}-{end_ts}") os.path.join(CLIPS_DIR, "genai-requests", f"{start_ts}-{end_ts}")
).mkdir(parents=True, exist_ok=True) ).mkdir(parents=True, exist_ok=True)
client = self.genai_manager.description_client return self.genai_client.generate_review_summary(
if client is None:
return None
return client.generate_review_summary(
start_ts, start_ts,
end_ts, end_ts,
events_with_context, events_with_context,
@ -561,11 +542,10 @@ def run_analysis(
if "-verified" in label: if "-verified" in label:
continue continue
elif label in labelmap_objects: elif label in labelmap_objects:
object_type = label.replace("_", " ") object_type = titlecase(label.replace("_", " "))
if label in attribute_labels: if label in attribute_labels:
display_name = ATTRIBUTE_LABEL_DISPLAY_MAP.get(label, object_type) unified_objects.append(f"{object_type} (delivery/service)")
unified_objects.append(f"{display_name} (delivery/service)")
else: else:
unified_objects.append(object_type) unified_objects.append(object_type)

View File

@ -1,12 +1,8 @@
"""Local only processors for handling real time object processing.""" """Local only processors for handling real time object processing."""
import logging import logging
import threading
from abc import ABC, abstractmethod from abc import ABC, abstractmethod
from collections import deque from typing import Any
from concurrent.futures import Future
from queue import Empty, Full, Queue
from typing import Any, Callable
import numpy as np import numpy as np
@ -78,123 +74,3 @@ class RealTimeProcessorApi(ABC):
payload: The updated configuration object. payload: The updated configuration object.
""" """
pass pass
def drain_results(self) -> list[dict[str, Any]]:
"""Return pending results that need IPC side-effects.
Deferred processors accumulate results on a worker thread.
The maintainer calls this each loop iteration to collect them
and perform publishes on the main thread.
Synchronous processors return an empty list (default).
"""
return []
def shutdown(self) -> None:
"""Stop any background work and release resources.
Called when the processor is being removed or the maintainer
is shutting down. Default is a no-op for synchronous processors.
"""
pass
class DeferredRealtimeProcessorApi(RealTimeProcessorApi):
"""Base class for processors that offload heavy work to a background thread.
Subclasses implement:
- process_frame(): do cheap gating + crop + copy, then call _enqueue_task()
- _process_task(task): heavy work (inference, consensus) on the worker thread
- handle_request(): optionally use _enqueue_request() for sync request/response
- expire_object(): call _enqueue_task() with a control message
The worker thread owns all processor state. No locks are needed because
only the worker mutates state. Results that need IPC are placed in
_pending_results via _emit_result(), and the maintainer drains them
each loop iteration.
"""
def __init__(
self,
config: FrigateConfig,
metrics: DataProcessorMetrics,
max_queue: int = 8,
) -> None:
super().__init__(config, metrics)
self._task_queue: Queue = Queue(maxsize=max_queue)
self._pending_results: deque[dict[str, Any]] = deque()
self._results_lock = threading.Lock()
self._stop_event = threading.Event()
self._worker = threading.Thread(
target=self._drain_loop,
daemon=True,
name=f"{type(self).__name__}_worker",
)
self._worker.start()
def _drain_loop(self) -> None:
"""Worker thread main loop — drains the task queue until stopped."""
while not self._stop_event.is_set():
try:
task = self._task_queue.get(timeout=0.5)
except Empty:
continue
if (
isinstance(task, tuple)
and len(task) == 2
and isinstance(task[1], Future)
):
# Request/response: (callable_and_args, future)
(func, args), future = task
try:
result = func(args)
future.set_result(result)
except Exception as e:
future.set_exception(e)
else:
try:
self._process_task(task)
except Exception:
logger.exception("Error processing deferred task")
def _enqueue_task(self, task: Any) -> bool:
"""Enqueue a task for the worker. Returns False if queue is full (dropped)."""
try:
self._task_queue.put_nowait(task)
return True
except Full:
logger.debug("Deferred processor queue full, dropping task")
return False
def _enqueue_request(self, func: Callable, args: Any, timeout: float = 10.0) -> Any:
"""Enqueue a request and block until the worker returns a result."""
future: Future = Future()
self._task_queue.put(((func, args), future), timeout=timeout)
return future.result(timeout=timeout)
def _emit_result(self, result: dict[str, Any]) -> None:
"""Called by the worker thread to stage a result for the maintainer."""
with self._results_lock:
self._pending_results.append(result)
def drain_results(self) -> list[dict[str, Any]]:
"""Called by the maintainer on the main thread to collect pending results."""
with self._results_lock:
results = list(self._pending_results)
self._pending_results.clear()
return results
def shutdown(self) -> None:
"""Signal the worker to stop and wait for it to finish."""
self._stop_event.set()
self._worker.join(timeout=5.0)
@abstractmethod
def _process_task(self, task: Any) -> None:
"""Process a single task on the worker thread.
Subclasses implement inference, consensus, training image saves here.
Call _emit_result() to stage results for the maintainer to publish.
"""
pass

View File

@ -1,6 +1,7 @@
"""Real time processor that works with classification tflite models.""" """Real time processor that works with classification tflite models."""
import datetime import datetime
import json
import logging import logging
import os import os
from typing import Any from typing import Any
@ -9,18 +10,25 @@ import cv2
import numpy as np import numpy as np
from frigate.comms.embeddings_updater import EmbeddingsRequestEnum from frigate.comms.embeddings_updater import EmbeddingsRequestEnum
from frigate.comms.event_metadata_updater import EventMetadataPublisher from frigate.comms.event_metadata_updater import (
EventMetadataPublisher,
EventMetadataTypeEnum,
)
from frigate.comms.inter_process import InterProcessRequestor from frigate.comms.inter_process import InterProcessRequestor
from frigate.config import FrigateConfig from frigate.config import FrigateConfig
from frigate.config.classification import CustomClassificationConfig from frigate.config.classification import (
CustomClassificationConfig,
ObjectClassificationType,
)
from frigate.const import CLIPS_DIR, MODEL_CACHE_DIR from frigate.const import CLIPS_DIR, MODEL_CACHE_DIR
from frigate.log import suppress_stderr_during from frigate.log import suppress_stderr_during
from frigate.types import TrackedObjectUpdateTypesEnum
from frigate.util.builtin import EventsPerSecond, InferenceSpeed, load_labels from frigate.util.builtin import EventsPerSecond, InferenceSpeed, load_labels
from frigate.util.image import calculate_region from frigate.util.image import calculate_region
from frigate.util.object import box_overlaps from frigate.util.object import box_overlaps
from ..types import DataProcessorMetrics from ..types import DataProcessorMetrics
from .api import DeferredRealtimeProcessorApi from .api import RealTimeProcessorApi
try: try:
from tflite_runtime.interpreter import Interpreter from tflite_runtime.interpreter import Interpreter
@ -32,7 +40,7 @@ logger = logging.getLogger(__name__)
MAX_OBJECT_CLASSIFICATIONS = 16 MAX_OBJECT_CLASSIFICATIONS = 16
class CustomStateClassificationProcessor(DeferredRealtimeProcessorApi): class CustomStateClassificationProcessor(RealTimeProcessorApi):
def __init__( def __init__(
self, self,
config: FrigateConfig, config: FrigateConfig,
@ -40,7 +48,7 @@ class CustomStateClassificationProcessor(DeferredRealtimeProcessorApi):
requestor: InterProcessRequestor, requestor: InterProcessRequestor,
metrics: DataProcessorMetrics, metrics: DataProcessorMetrics,
): ):
super().__init__(config, metrics, max_queue=4) super().__init__(config, metrics)
self.model_config = model_config self.model_config = model_config
if not self.model_config.name: if not self.model_config.name:
@ -251,34 +259,14 @@ class CustomStateClassificationProcessor(DeferredRealtimeProcessorApi):
) )
return return
cropped_frame = rgb[y1:y2, x1:x2] frame = rgb[y1:y2, x1:x2]
try: try:
resized_frame = cv2.resize(cropped_frame, (224, 224)) resized_frame = cv2.resize(frame, (224, 224))
except Exception: except Exception:
logger.warning("Failed to resize image for state classification") logger.warning("Failed to resize image for state classification")
return return
# Copy for training image saves on worker thread
crop_bgr = cv2.cvtColor(cropped_frame, cv2.COLOR_RGB2BGR)
self._enqueue_task(("classify", camera, now, resized_frame, crop_bgr))
def _process_task(self, task: Any) -> None:
kind = task[0]
if kind == "classify":
_, camera, timestamp, resized_frame, crop_bgr = task
self._classify_state(camera, timestamp, resized_frame, crop_bgr)
elif kind == "reload":
self.__build_detector()
def _classify_state(
self,
camera: str,
timestamp: float,
resized_frame: np.ndarray,
crop_bgr: np.ndarray,
) -> None:
if self.interpreter is None: if self.interpreter is None:
# When interpreter is None, always save (score is 0.0, which is < 1.0) # When interpreter is None, always save (score is 0.0, which is < 1.0)
if self._should_save_image(camera, "unknown", 0.0): if self._should_save_image(camera, "unknown", 0.0):
@ -289,18 +277,15 @@ class CustomStateClassificationProcessor(DeferredRealtimeProcessorApi):
) )
write_classification_attempt( write_classification_attempt(
self.train_dir, self.train_dir,
crop_bgr, cv2.cvtColor(frame, cv2.COLOR_RGB2BGR),
"none-none", "none-none",
timestamp, now,
"unknown", "unknown",
0.0, 0.0,
max_files=save_attempts, max_files=save_attempts,
) )
return return
if not self.tensor_input_details or not self.tensor_output_details:
return
input = np.expand_dims(resized_frame, axis=0) input = np.expand_dims(resized_frame, axis=0)
self.interpreter.set_tensor(self.tensor_input_details[0]["index"], input) self.interpreter.set_tensor(self.tensor_input_details[0]["index"], input)
self.interpreter.invoke() self.interpreter.invoke()
@ -313,7 +298,7 @@ class CustomStateClassificationProcessor(DeferredRealtimeProcessorApi):
) )
best_id = int(np.argmax(probs)) best_id = int(np.argmax(probs))
score = round(probs[best_id], 2) score = round(probs[best_id], 2)
self.__update_metrics(datetime.datetime.now().timestamp() - timestamp) self.__update_metrics(datetime.datetime.now().timestamp() - now)
detected_state = self.labelmap[best_id] detected_state = self.labelmap[best_id]
@ -325,9 +310,9 @@ class CustomStateClassificationProcessor(DeferredRealtimeProcessorApi):
) )
write_classification_attempt( write_classification_attempt(
self.train_dir, self.train_dir,
crop_bgr, cv2.cvtColor(frame, cv2.COLOR_RGB2BGR),
"none-none", "none-none",
timestamp, now,
detected_state, detected_state,
score, score,
max_files=save_attempts, max_files=save_attempts,
@ -342,14 +327,9 @@ class CustomStateClassificationProcessor(DeferredRealtimeProcessorApi):
verified_state = self.verify_state_change(camera, detected_state) verified_state = self.verify_state_change(camera, detected_state)
if verified_state is not None: if verified_state is not None:
self._emit_result( self.requestor.send_data(
{ f"{camera}/classification/{self.model_config.name}",
"type": "classification", verified_state,
"processor": "state",
"model_name": self.model_config.name,
"camera": camera,
"state": verified_state,
}
) )
def handle_request( def handle_request(
@ -357,8 +337,6 @@ class CustomStateClassificationProcessor(DeferredRealtimeProcessorApi):
) -> dict[str, Any] | None: ) -> dict[str, Any] | None:
if topic == EmbeddingsRequestEnum.reload_classification_model.value: if topic == EmbeddingsRequestEnum.reload_classification_model.value:
if request_data.get("model_name") == self.model_config.name: if request_data.get("model_name") == self.model_config.name:
def _do_reload(data: dict[str, Any]) -> dict[str, Any]:
self.__build_detector() self.__build_detector()
logger.info( logger.info(
f"Successfully loaded updated model for {self.model_config.name}" f"Successfully loaded updated model for {self.model_config.name}"
@ -367,9 +345,6 @@ class CustomStateClassificationProcessor(DeferredRealtimeProcessorApi):
"success": True, "success": True,
"message": f"Loaded {self.model_config.name} model.", "message": f"Loaded {self.model_config.name} model.",
} }
result: dict[str, Any] = self._enqueue_request(_do_reload, request_data)
return result
else: else:
return None return None
else: else:
@ -379,7 +354,7 @@ class CustomStateClassificationProcessor(DeferredRealtimeProcessorApi):
pass pass
class CustomObjectClassificationProcessor(DeferredRealtimeProcessorApi): class CustomObjectClassificationProcessor(RealTimeProcessorApi):
def __init__( def __init__(
self, self,
config: FrigateConfig, config: FrigateConfig,
@ -388,7 +363,7 @@ class CustomObjectClassificationProcessor(DeferredRealtimeProcessorApi):
requestor: InterProcessRequestor, requestor: InterProcessRequestor,
metrics: DataProcessorMetrics, metrics: DataProcessorMetrics,
): ):
super().__init__(config, metrics, max_queue=8) super().__init__(config, metrics)
self.model_config = model_config self.model_config = model_config
if not self.model_config.name: if not self.model_config.name:
@ -561,41 +536,18 @@ class CustomObjectClassificationProcessor(DeferredRealtimeProcessorApi):
) )
rgb = cv2.cvtColor(frame, cv2.COLOR_YUV2RGB_I420) rgb = cv2.cvtColor(frame, cv2.COLOR_YUV2RGB_I420)
crop = rgb[y:y2, x:x2] crop = rgb[
y:y2,
x:x2,
]
if crop.shape != (224, 224):
try: try:
resized_crop = cv2.resize(crop, (224, 224)) resized_crop = cv2.resize(crop, (224, 224))
except Exception: except Exception:
logger.warning("Failed to resize image for object classification") logger.warning("Failed to resize image for state classification")
return return
# Copy crop for training images (will be used on worker thread)
crop_bgr = cv2.cvtColor(crop, cv2.COLOR_RGB2BGR)
self._enqueue_task(
("classify", object_id, obj_data["camera"], now, resized_crop, crop_bgr)
)
def _process_task(self, task: Any) -> None:
kind = task[0]
if kind == "classify":
_, object_id, camera, timestamp, resized_crop, crop_bgr = task
self._classify_object(object_id, camera, timestamp, resized_crop, crop_bgr)
elif kind == "expire":
_, object_id = task
if object_id in self.classification_history:
self.classification_history.pop(object_id)
elif kind == "reload":
self.__build_detector()
def _classify_object(
self,
object_id: str,
camera: str,
timestamp: float,
resized_crop: np.ndarray,
crop_bgr: np.ndarray,
) -> None:
if self.interpreter is None: if self.interpreter is None:
save_attempts = ( save_attempts = (
self.model_config.save_attempts self.model_config.save_attempts
@ -604,9 +556,9 @@ class CustomObjectClassificationProcessor(DeferredRealtimeProcessorApi):
) )
write_classification_attempt( write_classification_attempt(
self.train_dir, self.train_dir,
crop_bgr, cv2.cvtColor(crop, cv2.COLOR_RGB2BGR),
object_id, object_id,
timestamp, now,
"unknown", "unknown",
0.0, 0.0,
max_files=save_attempts, max_files=save_attempts,
@ -617,10 +569,7 @@ class CustomObjectClassificationProcessor(DeferredRealtimeProcessorApi):
if object_id not in self.classification_history: if object_id not in self.classification_history:
self.classification_history[object_id] = [] self.classification_history[object_id] = []
self.classification_history[object_id].append(("unknown", 0.0, timestamp)) self.classification_history[object_id].append(("unknown", 0.0, now))
return
if not self.tensor_input_details or not self.tensor_output_details:
return return
input = np.expand_dims(resized_crop, axis=0) input = np.expand_dims(resized_crop, axis=0)
@ -635,7 +584,7 @@ class CustomObjectClassificationProcessor(DeferredRealtimeProcessorApi):
) )
best_id = int(np.argmax(probs)) best_id = int(np.argmax(probs))
score = round(probs[best_id], 2) score = round(probs[best_id], 2)
self.__update_metrics(datetime.datetime.now().timestamp() - timestamp) self.__update_metrics(datetime.datetime.now().timestamp() - now)
save_attempts = ( save_attempts = (
self.model_config.save_attempts self.model_config.save_attempts
@ -644,9 +593,9 @@ class CustomObjectClassificationProcessor(DeferredRealtimeProcessorApi):
) )
write_classification_attempt( write_classification_attempt(
self.train_dir, self.train_dir,
crop_bgr, cv2.cvtColor(crop, cv2.COLOR_RGB2BGR),
object_id, object_id,
timestamp, now,
self.labelmap[best_id], self.labelmap[best_id],
score, score,
max_files=save_attempts, max_files=save_attempts,
@ -661,39 +610,76 @@ class CustomObjectClassificationProcessor(DeferredRealtimeProcessorApi):
sub_label = self.labelmap[best_id] sub_label = self.labelmap[best_id]
logger.debug( logger.debug(
f"{self.model_config.name}: Object {object_id} passed threshold with sub_label={sub_label}, score={score}" f"{self.model_config.name}: Object {object_id} (label={obj_data['label']}) passed threshold with sub_label={sub_label}, score={score}"
) )
consensus_label, consensus_score = self.get_weighted_score( consensus_label, consensus_score = self.get_weighted_score(
object_id, sub_label, score, timestamp object_id, sub_label, score, now
) )
logger.debug( logger.debug(
f"{self.model_config.name}: get_weighted_score returned consensus_label={consensus_label}, consensus_score={consensus_score} for {object_id}" f"{self.model_config.name}: get_weighted_score returned consensus_label={consensus_label}, consensus_score={consensus_score} for {object_id}"
) )
if consensus_label is not None and self.model_config.object_config is not None: if consensus_label is not None:
self._emit_result( camera = obj_data["camera"]
{ logger.debug(
"type": "classification", f"{self.model_config.name}: Publishing sub_label={consensus_label} for {obj_data['label']} object {object_id} on {camera}"
"processor": "object",
"model_name": self.model_config.name,
"classification_type": self.model_config.object_config.classification_type,
"object_id": object_id,
"camera": camera,
"timestamp": timestamp,
"label": consensus_label,
"score": consensus_score,
}
) )
def handle_request( if (
self, topic: str, request_data: dict[str, Any] self.model_config.object_config.classification_type
) -> dict[str, Any] | None: == ObjectClassificationType.sub_label
):
self.sub_label_publisher.publish(
(object_id, consensus_label, consensus_score),
EventMetadataTypeEnum.sub_label,
)
self.requestor.send_data(
"tracked_object_update",
json.dumps(
{
"type": TrackedObjectUpdateTypesEnum.classification,
"id": object_id,
"camera": camera,
"timestamp": now,
"model": self.model_config.name,
"sub_label": consensus_label,
"score": consensus_score,
}
),
)
elif (
self.model_config.object_config.classification_type
== ObjectClassificationType.attribute
):
self.sub_label_publisher.publish(
(
object_id,
self.model_config.name,
consensus_label,
consensus_score,
),
EventMetadataTypeEnum.attribute.value,
)
self.requestor.send_data(
"tracked_object_update",
json.dumps(
{
"type": TrackedObjectUpdateTypesEnum.classification,
"id": object_id,
"camera": camera,
"timestamp": now,
"model": self.model_config.name,
"attribute": consensus_label,
"score": consensus_score,
}
),
)
def handle_request(self, topic: str, request_data: dict) -> dict | None:
if topic == EmbeddingsRequestEnum.reload_classification_model.value: if topic == EmbeddingsRequestEnum.reload_classification_model.value:
if request_data.get("model_name") == self.model_config.name: if request_data.get("model_name") == self.model_config.name:
def _do_reload(data: dict[str, Any]) -> dict[str, Any]:
self.__build_detector() self.__build_detector()
logger.info( logger.info(
f"Successfully loaded updated model for {self.model_config.name}" f"Successfully loaded updated model for {self.model_config.name}"
@ -702,16 +688,14 @@ class CustomObjectClassificationProcessor(DeferredRealtimeProcessorApi):
"success": True, "success": True,
"message": f"Loaded {self.model_config.name} model.", "message": f"Loaded {self.model_config.name} model.",
} }
result: dict[str, Any] = self._enqueue_request(_do_reload, request_data)
return result
else: else:
return None return None
else: else:
return None return None
def expire_object(self, object_id: str, camera: str) -> None: def expire_object(self, object_id: str, camera: str) -> None:
self._enqueue_task(("expire", object_id)) if object_id in self.classification_history:
self.classification_history.pop(object_id)
def write_classification_attempt( def write_classification_attempt(

View File

@ -2,7 +2,6 @@
import base64 import base64
import datetime import datetime
import json
import logging import logging
import threading import threading
from multiprocessing.synchronize import Event as MpEvent from multiprocessing.synchronize import Event as MpEvent
@ -34,7 +33,6 @@ from frigate.config.camera.updater import (
CameraConfigUpdateEnum, CameraConfigUpdateEnum,
CameraConfigUpdateSubscriber, CameraConfigUpdateSubscriber,
) )
from frigate.config.classification import ObjectClassificationType
from frigate.data_processing.common.license_plate.model import ( from frigate.data_processing.common.license_plate.model import (
LicensePlateModelRunner, LicensePlateModelRunner,
) )
@ -63,7 +61,6 @@ from frigate.db.sqlitevecq import SqliteVecQueueDatabase
from frigate.events.types import EventTypeEnum, RegenerateDescriptionEnum from frigate.events.types import EventTypeEnum, RegenerateDescriptionEnum
from frigate.genai import GenAIClientManager from frigate.genai import GenAIClientManager
from frigate.models import Event, Recordings, ReviewSegment, Trigger from frigate.models import Event, Recordings, ReviewSegment, Trigger
from frigate.types import TrackedObjectUpdateTypesEnum
from frigate.util.builtin import serialize from frigate.util.builtin import serialize
from frigate.util.file import get_event_thumbnail_bytes from frigate.util.file import get_event_thumbnail_bytes
from frigate.util.image import SharedMemoryFrameManager from frigate.util.image import SharedMemoryFrameManager
@ -95,7 +92,6 @@ class EmbeddingMaintainer(threading.Thread):
CameraConfigUpdateEnum.add, CameraConfigUpdateEnum.add,
CameraConfigUpdateEnum.remove, CameraConfigUpdateEnum.remove,
CameraConfigUpdateEnum.object_genai, CameraConfigUpdateEnum.object_genai,
CameraConfigUpdateEnum.review,
CameraConfigUpdateEnum.review_genai, CameraConfigUpdateEnum.review_genai,
CameraConfigUpdateEnum.semantic_search, CameraConfigUpdateEnum.semantic_search,
], ],
@ -206,13 +202,15 @@ class EmbeddingMaintainer(threading.Thread):
# post processors # post processors
self.post_processors: list[PostProcessorApi] = [] self.post_processors: list[PostProcessorApi] = []
if any(c.review.genai.enabled_in_config for c in self.config.cameras.values()): if self.genai_manager.vision_client is not None and any(
c.review.genai.enabled_in_config for c in self.config.cameras.values()
):
self.post_processors.append( self.post_processors.append(
ReviewDescriptionProcessor( ReviewDescriptionProcessor(
self.config, self.config,
self.requestor, self.requestor,
self.metrics, self.metrics,
self.genai_manager, self.genai_manager.vision_client,
) )
) )
@ -250,14 +248,16 @@ class EmbeddingMaintainer(threading.Thread):
) )
self.post_processors.append(semantic_trigger_processor) self.post_processors.append(semantic_trigger_processor)
if any(c.objects.genai.enabled_in_config for c in self.config.cameras.values()): if self.genai_manager.vision_client is not None and any(
c.objects.genai.enabled_in_config for c in self.config.cameras.values()
):
self.post_processors.append( self.post_processors.append(
ObjectDescriptionProcessor( ObjectDescriptionProcessor(
self.config, self.config,
self.embeddings, self.embeddings,
self.requestor, self.requestor,
self.metrics, self.metrics,
self.genai_manager, self.genai_manager.vision_client,
semantic_trigger_processor, semantic_trigger_processor,
) )
) )
@ -277,15 +277,10 @@ class EmbeddingMaintainer(threading.Thread):
self._process_recordings_updates() self._process_recordings_updates()
self._process_review_updates() self._process_review_updates()
self._process_frame_updates() self._process_frame_updates()
self._process_deferred_results()
self._expire_dedicated_lpr() self._expire_dedicated_lpr()
self._process_finalized() self._process_finalized()
self._process_event_metadata() self._process_event_metadata()
# Shutdown deferred processors
for processor in self.realtime_processors:
processor.shutdown()
self.config_updater.stop() self.config_updater.stop()
self.enrichment_config_subscriber.stop() self.enrichment_config_subscriber.stop()
self.event_subscriber.stop() self.event_subscriber.stop()
@ -310,10 +305,6 @@ class EmbeddingMaintainer(threading.Thread):
self._handle_custom_classification_update(topic, payload) self._handle_custom_classification_update(topic, payload)
return return
if topic == "config/genai":
self.config.genai = payload
self.genai_manager.update_config(self.config)
# Broadcast to all processors — each decides if the topic is relevant # Broadcast to all processors — each decides if the topic is relevant
for processor in self.realtime_processors: for processor in self.realtime_processors:
processor.update_config(topic, payload) processor.update_config(topic, payload)
@ -328,9 +319,10 @@ class EmbeddingMaintainer(threading.Thread):
model_name = topic.split("/")[-1] model_name = topic.split("/")[-1]
if model_config is None: if model_config is None:
remaining = [] self.realtime_processors = [
for processor in self.realtime_processors: processor
if ( for processor in self.realtime_processors
if not (
isinstance( isinstance(
processor, processor,
( (
@ -339,11 +331,8 @@ class EmbeddingMaintainer(threading.Thread):
), ),
) )
and processor.model_config.name == model_name and processor.model_config.name == model_name
): )
processor.shutdown() ]
else:
remaining.append(processor)
self.realtime_processors = remaining
logger.info( logger.info(
f"Successfully removed classification processor for model: {model_name}" f"Successfully removed classification processor for model: {model_name}"
@ -711,68 +700,6 @@ class EmbeddingMaintainer(threading.Thread):
self.frame_manager.close(frame_name) self.frame_manager.close(frame_name)
def _process_deferred_results(self) -> None:
"""Drain results from deferred processors and perform IPC side-effects."""
for processor in self.realtime_processors:
results = processor.drain_results()
for result in results:
if result.get("type") != "classification":
continue
if result["processor"] == "state":
self.requestor.send_data(
f"{result['camera']}/classification/{result['model_name']}",
result["state"],
)
elif result["processor"] == "object":
object_id = result["object_id"]
camera = result["camera"]
timestamp = result["timestamp"]
model_name = result["model_name"]
label = result["label"]
score = result["score"]
classification_type = result["classification_type"]
if classification_type == ObjectClassificationType.sub_label:
self.event_metadata_publisher.publish(
(object_id, label, score),
EventMetadataTypeEnum.sub_label,
)
self.requestor.send_data(
"tracked_object_update",
json.dumps(
{
"type": TrackedObjectUpdateTypesEnum.classification,
"id": object_id,
"camera": camera,
"timestamp": timestamp,
"model": model_name,
"sub_label": label,
"score": score,
}
),
)
elif classification_type == ObjectClassificationType.attribute:
self.event_metadata_publisher.publish(
(object_id, model_name, label, score),
EventMetadataTypeEnum.attribute.value,
)
self.requestor.send_data(
"tracked_object_update",
json.dumps(
{
"type": TrackedObjectUpdateTypesEnum.classification,
"id": object_id,
"camera": camera,
"timestamp": timestamp,
"model": model_name,
"attribute": label,
"score": score,
}
),
)
def _embed_thumbnail(self, event_id: str, thumbnail: bytes) -> None: def _embed_thumbnail(self, event_id: str, thumbnail: bytes) -> None:
"""Embed the thumbnail for an event.""" """Embed the thumbnail for an event."""
if not self.config.semantic_search.enabled: if not self.config.semantic_search.enabled:

View File

@ -106,8 +106,8 @@ When forming your description:
## Response Field Guidelines ## Response Field Guidelines
Respond with a JSON object matching the provided schema. Field-specific guidance: Respond with a JSON object matching the provided schema. Field-specific guidance:
- `scene`: Describe how the sequence begins, then the progression of events all significant movements and actions in order. For example, if a vehicle arrives and then a person exits, describe both sequentially. For named subjects (those with a `` separator in "Objects in Scene"), always use their name do not replace them with generic terms. For unnamed objects (e.g., "person", "car"), refer to them naturally with articles (e.g., "a person", "the car"). Your description should align with and support the threat level you assign. - `scene`: Describe how the sequence begins, then the progression of events all significant movements and actions in order. For example, if a vehicle arrives and then a person exits, describe both sequentially. Always use subject names from "Objects in Scene" do not replace named subjects with generic terms like "a person" or "the individual". Your description should align with and support the threat level you assign.
- `title`: Characterize **what took place and where** interpret the overall purpose or outcome, do not simply compress the scene description into fewer words. Include the relevant location (zone, area, or entry point). For named subjects, always use their name. For unnamed objects, refer to them naturally with articles. No editorial qualifiers like "routine" or "suspicious." - `title`: Characterize **what took place and where** interpret the overall purpose or outcome, do not simply compress the scene description into fewer words. Include the relevant location (zone, area, or entry point). Always include subject names from "Objects in Scene" do not replace named subjects with generic terms. No editorial qualifiers like "routine" or "suspicious."
- `potential_threat_level`: Must be consistent with your scene description and the activity patterns above. - `potential_threat_level`: Must be consistent with your scene description and the activity patterns above.
{get_concern_prompt()} {get_concern_prompt()}
@ -190,7 +190,6 @@ Each line represents a detection state, not necessarily unique individuals. The
if any("" in obj for obj in review_data["unified_objects"]): if any("" in obj for obj in review_data["unified_objects"]):
metadata.potential_threat_level = 0 metadata.potential_threat_level = 0
metadata.title = metadata.title[0].upper() + metadata.title[1:]
metadata.time = review_data["start"] metadata.time = review_data["start"]
return metadata return metadata
except Exception as e: except Exception as e:
@ -200,9 +199,6 @@ Each line represents a detection state, not necessarily unique individuals. The
) )
return None return None
else: else:
logger.debug(
f"Invalid response received from GenAI provider for review description on {review_data['camera']}. Response: {response}",
)
return None return None
def generate_review_summary( def generate_review_summary(
@ -324,22 +320,6 @@ Guidelines:
"""Submit a request to the provider.""" """Submit a request to the provider."""
return None return None
@property
def supports_vision(self) -> bool:
"""Whether the model supports vision/image input.
Defaults to True for cloud providers. Providers that can detect
capability at runtime (e.g. llama.cpp) should override this.
"""
return True
def list_models(self) -> list[str]:
"""Return the list of model names available from this provider.
Providers should override this to query their backend.
"""
return []
def get_context_size(self) -> int: def get_context_size(self) -> int:
"""Get the context window size for this provider in tokens.""" """Get the context window size for this provider in tokens."""
return 4096 return 4096

View File

@ -82,14 +82,6 @@ class OpenAIClient(GenAIClient):
return str(result.choices[0].message.content.strip()) return str(result.choices[0].message.content.strip())
return None return None
def list_models(self) -> list[str]:
"""Return available model IDs from Azure OpenAI."""
try:
return sorted(m.id for m in self.provider.models.list().data)
except Exception as e:
logger.warning("Failed to list Azure OpenAI models: %s", e)
return []
def get_context_size(self) -> int: def get_context_size(self) -> int:
"""Get the context window size for Azure OpenAI.""" """Get the context window size for Azure OpenAI."""
return 128000 return 128000

View File

@ -87,14 +87,6 @@ class GeminiClient(GenAIClient):
return None return None
return description return description
def list_models(self) -> list[str]:
"""Return available model names from Gemini."""
try:
return sorted(m.name or "" for m in self.provider.models.list())
except Exception as e:
logger.warning("Failed to list Gemini models: %s", e)
return []
def get_context_size(self) -> int: def get_context_size(self) -> int:
"""Get the context window size for Gemini.""" """Get the context window size for Gemini."""
# Gemini Pro Vision has a 1M token context window # Gemini Pro Vision has a 1M token context window

View File

@ -38,122 +38,18 @@ class LlamaCppClient(GenAIClient):
provider: str | None # base_url provider: str | None # base_url
provider_options: dict[str, Any] provider_options: dict[str, Any]
_context_size: int | None
_supports_vision: bool
_supports_audio: bool
_supports_tools: bool
def _init_provider(self) -> str | None: def _init_provider(self) -> str | None:
"""Initialize the client and query model metadata from the server.""" """Initialize the client."""
self.provider_options = { self.provider_options = {
**self.genai_config.provider_options, **self.genai_config.provider_options,
} }
self._context_size = None return (
self._supports_vision = False
self._supports_audio = False
self._supports_tools = False
base_url = (
self.genai_config.base_url.rstrip("/") self.genai_config.base_url.rstrip("/")
if self.genai_config.base_url if self.genai_config.base_url
else None else None
) )
if base_url is None:
return None
configured_model = self.genai_config.model
# Query /v1/models to validate the configured model exists
try:
response = requests.get(
f"{base_url}/v1/models",
timeout=10,
)
response.raise_for_status()
models_data = response.json()
model_found = False
for model in models_data.get("data", []):
model_ids = {model.get("id")}
for alias in model.get("aliases", []):
model_ids.add(alias)
if configured_model in model_ids:
model_found = True
break
if not model_found:
available = []
for m in models_data.get("data", []):
available.append(m.get("id", "unknown"))
for alias in m.get("aliases", []):
available.append(alias)
logger.error(
"Model '%s' not found on llama.cpp server. Available models: %s",
configured_model,
available,
)
return None
except Exception as e:
logger.warning(
"Failed to query llama.cpp /v1/models endpoint: %s. "
"Model validation skipped.",
e,
)
# Query /props for context size, modalities, and tool support.
# The standard /props?model=<name> endpoint works with llama-server.
# If it fails, try the llama-swap per-model passthrough endpoint which
# returns props for a specific model without requiring it to be loaded.
try:
try:
response = requests.get(
f"{base_url}/props",
params={"model": configured_model},
timeout=10,
)
response.raise_for_status()
props = response.json()
except Exception:
response = requests.get(
f"{base_url}/upstream/{configured_model}/props",
timeout=10,
)
response.raise_for_status()
props = response.json()
# Context size from server runtime config
default_settings = props.get("default_generation_settings", {})
n_ctx = default_settings.get("n_ctx")
if n_ctx:
self._context_size = int(n_ctx)
# Modalities (vision, audio)
modalities = props.get("modalities", {})
self._supports_vision = modalities.get("vision", False)
self._supports_audio = modalities.get("audio", False)
# Tool support from chat template capabilities
chat_caps = props.get("chat_template_caps", {})
self._supports_tools = chat_caps.get("supports_tools", False)
logger.info(
"llama.cpp model '%s' initialized — context: %s, vision: %s, audio: %s, tools: %s",
configured_model,
self._context_size or "unknown",
self._supports_vision,
self._supports_audio,
self._supports_tools,
)
except Exception as e:
logger.warning(
"Failed to query llama.cpp /props endpoint: %s. "
"Using defaults for context size and capabilities.",
e,
)
return base_url
def _send( def _send(
self, self,
prompt: str, prompt: str,
@ -221,56 +117,9 @@ class LlamaCppClient(GenAIClient):
logger.warning("llama.cpp returned an error: %s", str(e)) logger.warning("llama.cpp returned an error: %s", str(e))
return None return None
@property
def supports_vision(self) -> bool:
"""Whether the loaded model supports vision/image input."""
return self._supports_vision
@property
def supports_audio(self) -> bool:
"""Whether the loaded model supports audio input."""
return self._supports_audio
@property
def supports_tools(self) -> bool:
"""Whether the loaded model supports tool/function calling."""
return self._supports_tools
def list_models(self) -> list[str]:
"""Return available model IDs from the llama.cpp server."""
base_url = self.provider or (
self.genai_config.base_url.rstrip("/")
if self.genai_config.base_url
else None
)
if base_url is None:
return []
try:
response = requests.get(f"{base_url}/v1/models", timeout=10)
response.raise_for_status()
models = []
for m in response.json().get("data", []):
models.append(m.get("id", "unknown"))
for alias in m.get("aliases", []):
models.append(alias)
return sorted(models)
except Exception as e:
logger.warning("Failed to list llama.cpp models: %s", e)
return []
def get_context_size(self) -> int: def get_context_size(self) -> int:
"""Get the context window size for llama.cpp. """Get the context window size for llama.cpp."""
return int(self.provider_options.get("context_size", 4096))
Resolution order:
1. provider_options["context_size"] (user override)
2. Value queried from llama.cpp server at init
3. Default fallback of 4096
"""
if "context_size" in self.provider_options:
return int(self.provider_options["context_size"])
if self._context_size is not None:
return self._context_size
return 4096
def _build_payload( def _build_payload(
self, self,

View File

@ -1,15 +1,15 @@
"""GenAI client manager for Frigate. """GenAI client manager for Frigate.
Manages GenAI provider clients from Frigate config. Clients are created lazily Manages GenAI provider clients from Frigate config. Configuration is read only
on first access so that providers whose roles are never used (e.g. chat when in _update_config(); no other code should read config.genai. Exposes clients
no chat feature is active) are never initialized. by role: tool_client, vision_client, embeddings_client.
""" """
import logging import logging
from typing import TYPE_CHECKING, Optional from typing import TYPE_CHECKING, Optional
from frigate.config import FrigateConfig from frigate.config import FrigateConfig
from frigate.config.camera.genai import GenAIConfig, GenAIRoleEnum from frigate.config.camera.genai import GenAIRoleEnum
if TYPE_CHECKING: if TYPE_CHECKING:
from frigate.genai import GenAIClient from frigate.genai import GenAIClient
@ -21,98 +21,68 @@ class GenAIClientManager:
"""Manages GenAI provider clients from Frigate config.""" """Manages GenAI provider clients from Frigate config."""
def __init__(self, config: FrigateConfig) -> None: def __init__(self, config: FrigateConfig) -> None:
self._configs: dict[str, GenAIConfig] = {} self._tool_client: Optional[GenAIClient] = None
self._role_map: dict[GenAIRoleEnum, str] = {} self._vision_client: Optional[GenAIClient] = None
self._clients: dict[str, "GenAIClient"] = {} self._embeddings_client: Optional[GenAIClient] = None
self.update_config(config) self.update_config(config)
def update_config(self, config: FrigateConfig) -> None: def update_config(self, config: FrigateConfig) -> None:
"""Store provider configs and build the role→name mapping. """Build role clients from current Frigate config.genai.
Called from __init__ and can be called again when config is reloaded. Called from __init__ and can be called again when config is reloaded.
Clients are not created here; they are instantiated lazily on first Each role (tools, vision, embeddings) gets the client for the provider
access via a role property or list_models(). that has that role in its roles list.
""" """
from frigate.genai import PROVIDERS, load_providers from frigate.genai import PROVIDERS, load_providers
self._configs = {} self._tool_client = None
self._role_map = {} self._vision_client = None
self._clients = {} self._embeddings_client = None
if not config.genai: if not config.genai:
return return
load_providers() load_providers()
for name, genai_cfg in config.genai.items(): for _name, genai_cfg in config.genai.items():
if not genai_cfg.provider: if not genai_cfg.provider:
continue continue
if genai_cfg.provider not in PROVIDERS: provider_cls = PROVIDERS.get(genai_cfg.provider)
if not provider_cls:
logger.warning( logger.warning(
"Unknown GenAI provider %s in config, skipping.", "Unknown GenAI provider %s in config, skipping.",
genai_cfg.provider, genai_cfg.provider,
) )
continue continue
self._configs[name] = genai_cfg
for role in genai_cfg.roles:
self._role_map[role] = name
def _get_client(self, name: str) -> "Optional[GenAIClient]":
"""Return the client for *name*, creating it on first access."""
if name in self._clients:
return self._clients[name]
from frigate.genai import PROVIDERS
genai_cfg = self._configs.get(name)
if not genai_cfg:
return None
if not genai_cfg.provider:
return None
provider_cls = PROVIDERS.get(genai_cfg.provider)
if not provider_cls:
return None
try: try:
client: "GenAIClient" = provider_cls(genai_cfg) client = provider_cls(genai_cfg)
except Exception as e: except Exception as e:
logger.exception( logger.exception(
"Failed to create GenAI client for provider %s: %s", "Failed to create GenAI client for provider %s: %s",
genai_cfg.provider, genai_cfg.provider,
e, e,
) )
return None continue
self._clients[name] = client for role in genai_cfg.roles:
return client if role == GenAIRoleEnum.tools:
self._tool_client = client
elif role == GenAIRoleEnum.vision:
self._vision_client = client
elif role == GenAIRoleEnum.embeddings:
self._embeddings_client = client
@property @property
def chat_client(self) -> "Optional[GenAIClient]": def tool_client(self) -> "Optional[GenAIClient]":
"""Client configured for the chat role (e.g. chat with function calling).""" """Client configured for the tools role (e.g. chat with function calling)."""
name = self._role_map.get(GenAIRoleEnum.chat) return self._tool_client
return self._get_client(name) if name else None
@property @property
def description_client(self) -> "Optional[GenAIClient]": def vision_client(self) -> "Optional[GenAIClient]":
"""Client configured for the descriptions role (e.g. review descriptions, object descriptions).""" """Client configured for the vision role (e.g. review descriptions, object descriptions)."""
name = self._role_map.get(GenAIRoleEnum.descriptions) return self._vision_client
return self._get_client(name) if name else None
@property @property
def embeddings_client(self) -> "Optional[GenAIClient]": def embeddings_client(self) -> "Optional[GenAIClient]":
"""Client configured for the embeddings role.""" """Client configured for the embeddings role."""
name = self._role_map.get(GenAIRoleEnum.embeddings) return self._embeddings_client
return self._get_client(name) if name else None
def list_models(self) -> dict[str, list[str]]:
"""Return available models keyed by config entry name."""
result: dict[str, list[str]] = {}
for name in self._configs:
client = self._get_client(name)
if client:
result[name] = client.list_models()
return result

View File

@ -113,15 +113,6 @@ class OllamaClient(GenAIClient):
schema = response_format.get("json_schema", {}).get("schema") schema = response_format.get("json_schema", {}).get("schema")
if schema: if schema:
ollama_options["format"] = self._clean_schema_for_ollama(schema) ollama_options["format"] = self._clean_schema_for_ollama(schema)
logger.debug(
"Ollama generate request: model=%s, prompt_len=%s, image_count=%s, "
"has_format=%s, options=%s",
self.genai_config.model,
len(prompt),
len(images) if images else 0,
"format" in ollama_options,
{k: v for k, v in ollama_options.items() if k != "format"},
)
result = self.provider.generate( result = self.provider.generate(
self.genai_config.model, self.genai_config.model,
prompt, prompt,
@ -129,24 +120,9 @@ class OllamaClient(GenAIClient):
**ollama_options, **ollama_options,
) )
logger.debug( logger.debug(
"Ollama generate response: done=%s, done_reason=%s, eval_count=%s, " f"Ollama tokens used: eval_count={result.get('eval_count')}, prompt_eval_count={result.get('prompt_eval_count')}"
"prompt_eval_count=%s, response_len=%s",
result.get("done"),
result.get("done_reason"),
result.get("eval_count"),
result.get("prompt_eval_count"),
len(result.get("response", "") or ""),
) )
response_text = str(result["response"]).strip() return str(result["response"]).strip()
if not response_text:
logger.warning(
"Ollama returned a blank response for model %s (done_reason=%s, "
"eval_count=%s). Check model output, ensure thinking is disabled.",
self.genai_config.model,
result.get("done_reason"),
result.get("eval_count"),
)
return response_text
except ( except (
TimeoutException, TimeoutException,
ResponseError, ResponseError,
@ -156,29 +132,6 @@ class OllamaClient(GenAIClient):
logger.warning("Ollama returned an error: %s", str(e)) logger.warning("Ollama returned an error: %s", str(e))
return None return None
def list_models(self) -> list[str]:
"""Return available model names from the Ollama server."""
client = self.provider
if client is None:
# Provider init may have failed due to invalid model, but we can
# still list available models with a fresh client.
if not self.genai_config.base_url:
return []
try:
client = ApiClient(
host=self.genai_config.base_url, timeout=self.timeout
)
except Exception:
return []
try:
response = client.list()
return sorted(
m.get("name", m.get("model", "")) for m in response.get("models", [])
)
except Exception as e:
logger.warning("Failed to list Ollama models: %s", e)
return []
def get_context_size(self) -> int: def get_context_size(self) -> int:
"""Get the context window size for Ollama.""" """Get the context window size for Ollama."""
return int( return int(

View File

@ -80,36 +80,12 @@ class OpenAIClient(GenAIClient):
and hasattr(result, "choices") and hasattr(result, "choices")
and len(result.choices) > 0 and len(result.choices) > 0
): ):
message = result.choices[0].message return str(result.choices[0].message.content.strip())
content = message.content
if not content:
# When reasoning is enabled for some OpenAI backends the actual response
# is incorrectly placed in reasoning_content instead of content.
# This is buggy/incorrect behavior — reasoning should not be
# enabled for these models.
reasoning_content = getattr(message, "reasoning_content", None)
if reasoning_content:
logger.warning(
"Response content was empty but reasoning_content was provided; "
"reasoning appears to be enabled and should be disabled for this model."
)
content = reasoning_content
return str(content.strip()) if content else None
return None return None
except (TimeoutException, Exception) as e: except (TimeoutException, Exception) as e:
logger.warning("OpenAI returned an error: %s", str(e)) logger.warning("OpenAI returned an error: %s", str(e))
return None return None
def list_models(self) -> list[str]:
"""Return available model IDs from the OpenAI-compatible API."""
try:
return sorted(m.id for m in self.provider.models.list().data)
except Exception as e:
logger.warning("Failed to list OpenAI models: %s", e)
return []
def get_context_size(self) -> int: def get_context_size(self) -> int:
"""Get the context window size for OpenAI.""" """Get the context window size for OpenAI."""
if self.context_size is not None: if self.context_size is not None:

View File

@ -1,504 +0,0 @@
"""Export job management with queued background execution."""
import logging
import os
import threading
import time
from dataclasses import dataclass
from pathlib import Path
from queue import Full, Queue
from typing import Any, Callable, Optional
from peewee import DoesNotExist
from frigate.comms.inter_process import InterProcessRequestor
from frigate.config import FrigateConfig
from frigate.const import UPDATE_JOB_STATE
from frigate.jobs.job import Job
from frigate.models import Export
from frigate.record.export import PlaybackSourceEnum, RecordingExporter
from frigate.types import JobStatusTypesEnum
logger = logging.getLogger(__name__)
# Maximum number of jobs that can sit in the queue waiting to run.
# Prevents a runaway client from unbounded memory growth.
MAX_QUEUED_EXPORT_JOBS = 100
# Minimum interval between progress broadcasts. FFmpeg can emit progress
# events many times per second; we coalesce them so the WebSocket isn't
# flooded with redundant updates.
PROGRESS_BROADCAST_MIN_INTERVAL = 1.0
# Delay before removing a completed job from the in-memory map. Gives the
# frontend a chance to receive the final state via WebSocket before SWR
# polling takes over.
COMPLETED_JOB_CLEANUP_DELAY = 5.0
class ExportQueueFullError(RuntimeError):
"""Raised when the export queue is at capacity."""
@dataclass
class ExportJob(Job):
"""Job state for export operations."""
job_type: str = "export"
camera: str = ""
name: Optional[str] = None
image_path: Optional[str] = None
export_case_id: Optional[str] = None
request_start_time: float = 0.0
request_end_time: float = 0.0
playback_source: str = PlaybackSourceEnum.recordings.value
ffmpeg_input_args: Optional[str] = None
ffmpeg_output_args: Optional[str] = None
cpu_fallback: bool = False
current_step: str = "queued"
progress_percent: float = 0.0
def to_dict(self) -> dict[str, Any]:
"""Convert to dictionary for API responses.
Only exposes fields that are part of the public ExportJobModel schema.
Internal execution details (image_path, ffmpeg args, cpu_fallback) are
intentionally omitted so they don't leak through the API.
"""
return {
"id": self.id,
"job_type": self.job_type,
"status": self.status,
"camera": self.camera,
"name": self.name,
"export_case_id": self.export_case_id,
"request_start_time": self.request_start_time,
"request_end_time": self.request_end_time,
"start_time": self.start_time,
"end_time": self.end_time,
"error_message": self.error_message,
"results": self.results,
"current_step": self.current_step,
"progress_percent": self.progress_percent,
}
class ExportQueueWorker(threading.Thread):
"""Worker that executes queued exports."""
def __init__(self, manager: "ExportJobManager", worker_index: int) -> None:
super().__init__(
daemon=True,
name=f"export_queue_worker_{worker_index}",
)
self.manager = manager
def run(self) -> None:
while True:
job = self.manager.queue.get()
try:
self.manager.run_job(job)
except Exception:
logger.exception(
"Export queue worker failed while processing %s", job.id
)
finally:
self.manager.queue.task_done()
class JobStatePublisher:
"""Publishes a single job state payload to the dispatcher.
Each call opens a short-lived :py:class:`InterProcessRequestor`, sends
the payload, and closes the socket. The short-lived design avoids
REQ/REP state corruption that would arise from sharing a single REQ
socket across the API thread and worker threads (REQ sockets must
strictly alternate send/recv).
With the 1s broadcast throttle in place, socket creation overhead is
negligible. The class also exists so tests can substitute a no-op
instance instead of stubbing ZMQ see ``BaseTestHttp.setUp``.
"""
def publish(self, payload: dict[str, Any]) -> None:
try:
requestor = InterProcessRequestor()
except Exception as err:
logger.warning("Failed to open job state requestor: %s", err)
return
try:
requestor.send_data(UPDATE_JOB_STATE, payload)
except Exception as err:
logger.debug("Job state broadcast failed: %s", err)
finally:
try:
requestor.stop()
except Exception:
pass
class ExportJobManager:
"""Concurrency-limited manager for queued export jobs."""
def __init__(
self,
config: FrigateConfig,
max_concurrent: int,
max_queued: int = MAX_QUEUED_EXPORT_JOBS,
publisher: Optional[JobStatePublisher] = None,
) -> None:
self.config = config
self.max_concurrent = max(1, max_concurrent)
self.queue: Queue[ExportJob] = Queue(maxsize=max(1, max_queued))
self.jobs: dict[str, ExportJob] = {}
self.lock = threading.Lock()
self.workers: list[ExportQueueWorker] = []
self.started = False
self.publisher = publisher if publisher is not None else JobStatePublisher()
self._last_broadcast_monotonic: float = 0.0
self._broadcast_throttle_lock = threading.Lock()
def _broadcast_all_jobs(self, force: bool = False) -> None:
"""Publish aggregate export job state via the job_state WS topic.
When ``force`` is False, broadcasts within
``PROGRESS_BROADCAST_MIN_INTERVAL`` of the previous one are skipped
to avoid flooding the WebSocket with rapid progress updates.
``force`` bypasses the throttle and is used for status transitions
(enqueue/start/finish) where the frontend needs the latest state.
"""
now = time.monotonic()
with self._broadcast_throttle_lock:
if (
not force
and now - self._last_broadcast_monotonic
< PROGRESS_BROADCAST_MIN_INTERVAL
):
return
self._last_broadcast_monotonic = now
with self.lock:
active = [
j
for j in self.jobs.values()
if j.status in (JobStatusTypesEnum.queued, JobStatusTypesEnum.running)
]
any_running = any(j.status == JobStatusTypesEnum.running for j in active)
payload: dict[str, Any] = {
"job_type": "export",
"status": "running" if any_running else "queued",
"results": {"jobs": [j.to_dict() for j in active]},
}
try:
self.publisher.publish(payload)
except Exception as err:
logger.warning("Publisher raised during job state broadcast: %s", err)
def _make_progress_callback(self, job: ExportJob) -> Callable[[str, float], None]:
"""Build a callback the exporter can invoke during execution."""
def on_progress(step: str, percent: float) -> None:
job.current_step = step
job.progress_percent = percent
self._broadcast_all_jobs()
return on_progress
def _schedule_job_cleanup(self, job_id: str) -> None:
"""Drop a completed job from ``self.jobs`` after a short delay."""
def cleanup() -> None:
with self.lock:
self.jobs.pop(job_id, None)
timer = threading.Timer(COMPLETED_JOB_CLEANUP_DELAY, cleanup)
timer.daemon = True
timer.start()
def ensure_started(self) -> None:
"""Ensure worker threads are started exactly once."""
with self.lock:
if self.started:
self._restart_dead_workers_locked()
return
for index in range(self.max_concurrent):
worker = ExportQueueWorker(self, index)
worker.start()
self.workers.append(worker)
self.started = True
def _restart_dead_workers_locked(self) -> None:
for index, worker in enumerate(self.workers):
if worker.is_alive():
continue
logger.error(
"Export queue worker %s died unexpectedly, restarting", worker.name
)
replacement = ExportQueueWorker(self, index)
replacement.start()
self.workers[index] = replacement
def enqueue(self, job: ExportJob) -> str:
"""Queue a job for background execution.
Raises ExportQueueFullError if the queue is at capacity.
"""
self.ensure_started()
try:
self.queue.put_nowait(job)
except Full as err:
raise ExportQueueFullError(
"Export queue is full; try again once current exports finish"
) from err
with self.lock:
self.jobs[job.id] = job
self._broadcast_all_jobs(force=True)
return job.id
def get_job(self, job_id: str) -> Optional[ExportJob]:
"""Get a job by ID."""
with self.lock:
return self.jobs.get(job_id)
def list_active_jobs(self) -> list[ExportJob]:
"""List queued and running jobs."""
with self.lock:
return [
job
for job in self.jobs.values()
if job.status in (JobStatusTypesEnum.queued, JobStatusTypesEnum.running)
]
def cancel_queued_jobs_for_case(self, case_id: str) -> list[ExportJob]:
"""Cancel queued export jobs assigned to a deleted case."""
cancelled_jobs: list[ExportJob] = []
with self.lock:
with self.queue.mutex:
retained_jobs: list[ExportJob] = []
while self.queue.queue:
job = self.queue.queue.popleft()
if (
job.export_case_id == case_id
and job.status == JobStatusTypesEnum.queued
):
job.status = JobStatusTypesEnum.cancelled
job.end_time = time.time()
cancelled_jobs.append(job)
continue
retained_jobs.append(job)
self.queue.queue.extend(retained_jobs)
if cancelled_jobs:
self.queue.unfinished_tasks = max(
0,
self.queue.unfinished_tasks - len(cancelled_jobs),
)
if self.queue.unfinished_tasks == 0:
self.queue.all_tasks_done.notify_all()
self.queue.not_full.notify_all()
return cancelled_jobs
def available_slots(self) -> int:
"""Approximate number of additional jobs that could be queued right now.
Uses Queue.qsize() which is best-effort; callers should treat the
result as advisory since another thread could enqueue between
checking and enqueueing.
"""
return max(0, self.queue.maxsize - self.queue.qsize())
def run_job(self, job: ExportJob) -> None:
"""Execute a queued export job."""
job.status = JobStatusTypesEnum.running
job.start_time = time.time()
self._broadcast_all_jobs(force=True)
exporter = RecordingExporter(
self.config,
job.id,
job.camera,
job.name,
job.image_path,
int(job.request_start_time),
int(job.request_end_time),
PlaybackSourceEnum(job.playback_source),
job.export_case_id,
job.ffmpeg_input_args,
job.ffmpeg_output_args,
job.cpu_fallback,
on_progress=self._make_progress_callback(job),
)
try:
exporter.run()
export = Export.get_or_none(Export.id == job.id)
if export is None:
job.status = JobStatusTypesEnum.failed
job.error_message = "Export failed"
elif export.in_progress:
job.status = JobStatusTypesEnum.failed
job.error_message = "Export did not complete"
else:
job.status = JobStatusTypesEnum.success
job.results = {
"export_id": export.id,
"export_case_id": export.export_case_id,
"video_path": export.video_path,
"thumb_path": export.thumb_path,
}
except DoesNotExist:
job.status = JobStatusTypesEnum.failed
job.error_message = "Export not found"
except Exception as err:
logger.exception("Export job %s failed: %s", job.id, err)
job.status = JobStatusTypesEnum.failed
job.error_message = str(err)
finally:
job.end_time = time.time()
self._broadcast_all_jobs(force=True)
self._schedule_job_cleanup(job.id)
_job_manager: Optional[ExportJobManager] = None
_job_manager_lock = threading.Lock()
def _get_max_concurrent(config: FrigateConfig) -> int:
return int(config.record.export.max_concurrent)
def reap_stale_exports() -> None:
"""Sweep Export rows stuck with in_progress=True from previous sessions.
On Frigate startup no export job is alive yet, so any in_progress=True
row must be a leftover from a previous session that crashed, was killed
mid-export, or returned early from RecordingExporter.run() without
flipping the flag. For each stale row we either:
- delete the row (and any thumb) if the video file is missing or empty,
since there is nothing worth recovering
- flip in_progress to False if the video file exists on disk and is
non-empty, treating it as a completed export the user can manage
through the normal UI
Must only be called when the export job manager is certain to have no
active jobs i.e., at Frigate startup, before any worker runs.
All exceptions are caught and logged; the caller does not need to wrap
this in a try/except. A failure on a single row will not stop the rest
of the sweep, and a failure in the top-level query will log and return.
"""
try:
stale_exports = list(Export.select().where(Export.in_progress == True)) # noqa: E712
except Exception:
logger.exception("Failed to query stale in-progress exports")
return
if not stale_exports:
logger.debug("No stale in-progress exports found on startup")
return
flipped = 0
deleted = 0
errored = 0
for export in stale_exports:
try:
video_path = export.video_path
has_usable_file = False
if video_path:
try:
has_usable_file = os.path.getsize(video_path) > 0
except OSError:
has_usable_file = False
if has_usable_file:
# Unassign from any case on recovery: the user should
# re-triage a recovered export rather than have it silently
# reappear inside a case they curated.
Export.update(
{Export.in_progress: False, Export.export_case: None}
).where(Export.id == export.id).execute()
flipped += 1
logger.info(
"Recovered stale in-progress export %s (file intact on disk)",
export.id,
)
continue
if export.thumb_path:
Path(export.thumb_path).unlink(missing_ok=True)
if video_path:
Path(video_path).unlink(missing_ok=True)
Export.delete().where(Export.id == export.id).execute()
deleted += 1
logger.info(
"Deleted stale in-progress export %s (no usable file on disk)",
export.id,
)
except Exception:
errored += 1
logger.exception("Failed to reap stale export %s", export.id)
logger.info(
"Stale export cleanup complete: %d recovered, %d deleted, %d errored",
flipped,
deleted,
errored,
)
def get_export_job_manager(config: FrigateConfig) -> ExportJobManager:
"""Get or create the singleton export job manager."""
global _job_manager
with _job_manager_lock:
if _job_manager is None:
_job_manager = ExportJobManager(config, _get_max_concurrent(config))
_job_manager.ensure_started()
return _job_manager
def start_export_job(config: FrigateConfig, job: ExportJob) -> str:
"""Queue an export job and return its ID."""
return get_export_job_manager(config).enqueue(job)
def get_export_job(config: FrigateConfig, job_id: str) -> Optional[ExportJob]:
"""Get a queued or completed export job by ID."""
return get_export_job_manager(config).get_job(job_id)
def list_active_export_jobs(config: FrigateConfig) -> list[ExportJob]:
"""List queued and running export jobs."""
return get_export_job_manager(config).list_active_jobs()
def cancel_queued_export_jobs_for_case(
config: FrigateConfig, case_id: str
) -> list[ExportJob]:
"""Cancel queued export jobs that still point at a deleted case."""
return get_export_job_manager(config).cancel_queued_jobs_for_case(case_id)
def available_export_queue_slots(config: FrigateConfig) -> int:
"""Approximate number of additional export jobs that could be queued now."""
return get_export_job_manager(config).available_slots()

View File

@ -121,12 +121,11 @@ class VLMWatchRunner(threading.Thread):
def _run_iteration(self) -> float: def _run_iteration(self) -> float:
"""Run one VLM analysis iteration. Returns seconds until next run.""" """Run one VLM analysis iteration. Returns seconds until next run."""
chat_client = self.genai_manager.chat_client vision_client = (
if chat_client is None or not chat_client.supports_vision: self.genai_manager.vision_client or self.genai_manager.tool_client
logger.warning(
"VLM watch job %s: no chat client with vision support available",
self.job.id,
) )
if vision_client is None:
logger.warning("VLM watch job %s: no vision client available", self.job.id)
return 30 return 30
frame = self.frame_processor.get_current_frame(self.job.camera, {}) frame = self.frame_processor.get_current_frame(self.job.camera, {})
@ -164,7 +163,7 @@ class VLMWatchRunner(threading.Thread):
} }
) )
response = chat_client.chat_with_tools( response = vision_client.chat_with_tools(
messages=self.conversation, messages=self.conversation,
tools=None, tools=None,
tool_choice=None, tool_choice=None,

View File

@ -152,12 +152,21 @@ class OnvifController:
cam = self.camera_configs[cam_name] cam = self.camera_configs[cam_name]
try: try:
user = cam.onvif.user
password = cam.onvif.password
if user is not None and isinstance(user, bytes):
user = user.decode("utf-8")
if password is not None and isinstance(password, bytes):
password = password.decode("utf-8")
self.cams[cam_name] = { self.cams[cam_name] = {
"onvif": ONVIFCamera( "onvif": ONVIFCamera(
cam.onvif.host, cam.onvif.host,
cam.onvif.port, cam.onvif.port,
cam.onvif.user, user,
cam.onvif.password, password,
wsdl_dir=str(Path(find_spec("onvif").origin).parent / "wsdl"), wsdl_dir=str(Path(find_spec("onvif").origin).parent / "wsdl"),
adjust_time=cam.onvif.ignore_time_mismatch, adjust_time=cam.onvif.ignore_time_mismatch,
encrypt=not cam.onvif.tls_insecure, encrypt=not cam.onvif.tls_insecure,
@ -450,15 +459,15 @@ class OnvifController:
presets = [] presets = []
for preset in presets: for preset in presets:
# Ensure preset name is a Unicode string and handle UTF-8 characters correctly
preset_name = getattr(preset, "Name") or f"preset {preset['token']}" preset_name = getattr(preset, "Name") or f"preset {preset['token']}"
# Some cameras (e.g. Reolink) return UTF-8 bytes that zeep decodes
# as latin-1, producing mojibake. Detect that and repair it by if isinstance(preset_name, bytes):
# round-tripping through latin-1 -> utf-8. preset_name = preset_name.decode("utf-8")
try:
preset_name = preset_name.encode("latin-1").decode("utf-8") # Convert to lowercase while preserving UTF-8 characters
except (UnicodeEncodeError, UnicodeDecodeError): preset_name_lower = preset_name.lower()
pass self.cams[camera_name]["presets"][preset_name_lower] = preset["token"]
self.cams[camera_name]["presets"][preset_name.lower()] = preset["token"]
# get list of supported features # get list of supported features
supported_features = [] supported_features = []
@ -686,6 +695,9 @@ class OnvifController:
self.cams[camera_name]["active"] = False self.cams[camera_name]["active"] = False
async def _move_to_preset(self, camera_name: str, preset: str) -> None: async def _move_to_preset(self, camera_name: str, preset: str) -> None:
if isinstance(preset, bytes):
preset = preset.decode("utf-8")
preset = preset.lower() preset = preset.lower()
if preset not in self.cams[camera_name]["presets"]: if preset not in self.cams[camera_name]["presets"]:

View File

@ -4,14 +4,13 @@ import datetime
import logging import logging
import os import os
import random import random
import re
import shutil import shutil
import string import string
import subprocess as sp import subprocess as sp
import threading import threading
from enum import Enum from enum import Enum
from pathlib import Path from pathlib import Path
from typing import Callable, Optional from typing import Optional
from peewee import DoesNotExist from peewee import DoesNotExist
@ -37,24 +36,22 @@ logger = logging.getLogger(__name__)
DEFAULT_TIME_LAPSE_FFMPEG_ARGS = "-vf setpts=0.04*PTS -r 30" DEFAULT_TIME_LAPSE_FFMPEG_ARGS = "-vf setpts=0.04*PTS -r 30"
TIMELAPSE_DATA_INPUT_ARGS = "-an -skip_frame nokey" TIMELAPSE_DATA_INPUT_ARGS = "-an -skip_frame nokey"
# Matches the setpts factor used in timelapse exports (e.g. setpts=0.04*PTS). # ffmpeg flags that can read from or write to arbitrary files.
# Captures the floating-point factor so we can scale expected duration. # filter flags are blocked because source filters like movie= and
SETPTS_FACTOR_RE = re.compile(r"setpts=([0-9]*\.?[0-9]+)\*PTS") # amovie= can read arbitrary files from the filesystem.
# ffmpeg flags that can read from or write to arbitrary files
BLOCKED_FFMPEG_ARGS = frozenset( BLOCKED_FFMPEG_ARGS = frozenset(
{ {
"-i", "-i",
"-filter_script", "-filter_script",
"-vstats_file",
"-passlogfile",
"-sdp_file",
"-dump_attachment",
"-filter_complex", "-filter_complex",
"-lavfi", "-lavfi",
"-vf", "-vf",
"-af", "-af",
"-filter", "-filter",
"-vstats_file",
"-passlogfile",
"-sdp_file",
"-dump_attachment",
"-attach", "-attach",
} }
) )
@ -65,11 +62,8 @@ def validate_ffmpeg_args(args: str) -> tuple[bool, str]:
Blocks: Blocks:
- The -i flag and other flags that read/write arbitrary files - The -i flag and other flags that read/write arbitrary files
- Filter flags (can read files via movie=/amovie= source filters)
- Absolute/relative file paths (potential extra outputs) - Absolute/relative file paths (potential extra outputs)
- URLs and ffmpeg protocol references (data exfiltration) - URLs and ffmpeg protocol references (data exfiltration)
Admin users skip this validation entirely since they are trusted.
""" """
if not args or not args.strip(): if not args or not args.strip():
return True, "" return True, ""
@ -121,7 +115,6 @@ class RecordingExporter(threading.Thread):
ffmpeg_input_args: Optional[str] = None, ffmpeg_input_args: Optional[str] = None,
ffmpeg_output_args: Optional[str] = None, ffmpeg_output_args: Optional[str] = None,
cpu_fallback: bool = False, cpu_fallback: bool = False,
on_progress: Optional[Callable[[str, float], None]] = None,
) -> None: ) -> None:
super().__init__() super().__init__()
self.config = config self.config = config
@ -136,213 +129,10 @@ class RecordingExporter(threading.Thread):
self.ffmpeg_input_args = ffmpeg_input_args self.ffmpeg_input_args = ffmpeg_input_args
self.ffmpeg_output_args = ffmpeg_output_args self.ffmpeg_output_args = ffmpeg_output_args
self.cpu_fallback = cpu_fallback self.cpu_fallback = cpu_fallback
self.on_progress = on_progress
# ensure export thumb dir # ensure export thumb dir
Path(os.path.join(CLIPS_DIR, "export")).mkdir(exist_ok=True) Path(os.path.join(CLIPS_DIR, "export")).mkdir(exist_ok=True)
def _emit_progress(self, step: str, percent: float) -> None:
"""Invoke the progress callback if one was supplied."""
if self.on_progress is None:
return
try:
self.on_progress(step, max(0.0, min(100.0, percent)))
except Exception:
logger.exception("Export progress callback failed")
def _expected_output_duration_seconds(self) -> float:
"""Compute the expected duration of the output video in seconds.
Users often request a wide time range (e.g. a full hour) when only
a few minutes of recordings actually live on disk for that span,
so the requested range overstates the work and progress would
plateau very early. We sum the actual saved seconds from the
Recordings/Previews tables and use that as the input duration.
Timelapse exports then scale this by the setpts factor.
"""
requested_duration = max(0.0, float(self.end_time - self.start_time))
recorded = self._sum_source_duration_seconds()
input_duration = (
recorded if recorded is not None and recorded > 0 else requested_duration
)
if not self.ffmpeg_output_args:
return input_duration
match = SETPTS_FACTOR_RE.search(self.ffmpeg_output_args)
if match is None:
return input_duration
try:
factor = float(match.group(1))
except ValueError:
return input_duration
if factor <= 0:
return input_duration
return input_duration * factor
def _sum_source_duration_seconds(self) -> Optional[float]:
"""Sum saved-video seconds inside [start_time, end_time].
Queries Recordings or Previews depending on the playback source,
clamps each segment to the requested range, and returns the total.
Returns ``None`` on any error so the caller can fall back to the
requested range duration without losing progress reporting.
"""
try:
if self.playback_source == PlaybackSourceEnum.recordings:
rows = (
Recordings.select(Recordings.start_time, Recordings.end_time)
.where(
Recordings.start_time.between(self.start_time, self.end_time)
| Recordings.end_time.between(self.start_time, self.end_time)
| (
(self.start_time > Recordings.start_time)
& (self.end_time < Recordings.end_time)
)
)
.where(Recordings.camera == self.camera)
.iterator()
)
else:
rows = (
Previews.select(Previews.start_time, Previews.end_time)
.where(
Previews.start_time.between(self.start_time, self.end_time)
| Previews.end_time.between(self.start_time, self.end_time)
| (
(self.start_time > Previews.start_time)
& (self.end_time < Previews.end_time)
)
)
.where(Previews.camera == self.camera)
.iterator()
)
except Exception:
logger.exception(
"Failed to sum source duration for export %s", self.export_id
)
return None
total = 0.0
try:
for row in rows:
clipped_start = max(float(row.start_time), float(self.start_time))
clipped_end = min(float(row.end_time), float(self.end_time))
if clipped_end > clipped_start:
total += clipped_end - clipped_start
except Exception:
logger.exception(
"Failed to read recording rows for export %s", self.export_id
)
return None
return total
def _inject_progress_flags(self, ffmpeg_cmd: list[str]) -> list[str]:
"""Insert FFmpeg progress reporting flags before the output path.
``-progress pipe:2`` writes structured key=value lines to stderr,
``-nostats`` suppresses the noisy default stats output.
"""
if not ffmpeg_cmd:
return ffmpeg_cmd
return ffmpeg_cmd[:-1] + ["-progress", "pipe:2", "-nostats", ffmpeg_cmd[-1]]
def _run_ffmpeg_with_progress(
self,
ffmpeg_cmd: list[str],
playlist_lines: str | list[str],
step: str = "encoding",
) -> tuple[int, str]:
"""Run an FFmpeg export command, parsing progress events from stderr.
Returns ``(returncode, captured_stderr)``. Stdout is left attached to
the parent process so we don't have to drain it (and risk a deadlock
if the buffer fills). Progress percent is computed against the
expected output duration; values are clamped to [0, 100] inside
:py:meth:`_emit_progress`.
"""
cmd = ["nice", "-n", str(PROCESS_PRIORITY_LOW)] + self._inject_progress_flags(
ffmpeg_cmd
)
if isinstance(playlist_lines, list):
stdin_payload = "\n".join(playlist_lines)
else:
stdin_payload = playlist_lines
expected_duration = self._expected_output_duration_seconds()
self._emit_progress(step, 0.0)
proc = sp.Popen(
cmd,
stdin=sp.PIPE,
stderr=sp.PIPE,
text=True,
encoding="ascii",
errors="replace",
)
assert proc.stdin is not None
assert proc.stderr is not None
try:
proc.stdin.write(stdin_payload)
except (BrokenPipeError, OSError):
# FFmpeg may have rejected the input early; still wait for it
# to terminate so the returncode is meaningful.
pass
finally:
try:
proc.stdin.close()
except (BrokenPipeError, OSError):
pass
captured: list[str] = []
try:
for raw_line in proc.stderr:
captured.append(raw_line)
line = raw_line.strip()
if not line:
continue
if line.startswith("out_time_us="):
if expected_duration <= 0:
continue
try:
out_time_us = int(line.split("=", 1)[1])
except (ValueError, IndexError):
continue
if out_time_us < 0:
continue
out_seconds = out_time_us / 1_000_000.0
percent = (out_seconds / expected_duration) * 100.0
self._emit_progress(step, percent)
elif line == "progress=end":
self._emit_progress(step, 100.0)
break
except Exception:
logger.exception("Failed reading FFmpeg progress for %s", self.export_id)
proc.wait()
# Drain any remaining stderr so callers can log it on failure.
try:
remaining = proc.stderr.read()
if remaining:
captured.append(remaining)
except Exception:
pass
return proc.returncode, "".join(captured)
def get_datetime_from_timestamp(self, timestamp: int) -> str: def get_datetime_from_timestamp(self, timestamp: int) -> str:
# return in iso format # return in iso format
return datetime.datetime.fromtimestamp(timestamp).strftime("%Y-%m-%d %H:%M:%S") return datetime.datetime.fromtimestamp(timestamp).strftime("%Y-%m-%d %H:%M:%S")
@ -615,7 +405,6 @@ class RecordingExporter(threading.Thread):
logger.debug( logger.debug(
f"Beginning export for {self.camera} from {self.start_time} to {self.end_time}" f"Beginning export for {self.camera} from {self.start_time} to {self.end_time}"
) )
self._emit_progress("preparing", 0.0)
export_name = ( export_name = (
self.user_provided_name self.user_provided_name
or f"{self.camera.replace('_', ' ')} {self.get_datetime_from_timestamp(self.start_time)} {self.get_datetime_from_timestamp(self.end_time)}" or f"{self.camera.replace('_', ' ')} {self.get_datetime_from_timestamp(self.start_time)} {self.get_datetime_from_timestamp(self.end_time)}"
@ -653,23 +442,16 @@ class RecordingExporter(threading.Thread):
except DoesNotExist: except DoesNotExist:
return return
# When neither custom ffmpeg arg is set the default path uses p = sp.run(
# `-c copy` (stream copy — no re-encoding). Report that as a ["nice", "-n", str(PROCESS_PRIORITY_LOW)] + ffmpeg_cmd,
# distinct step so the UI doesn't mislabel a remux as encoding. input="\n".join(playlist_lines),
# The retry branch below always re-encodes because cpu_fallback encoding="ascii",
# requires custom args; it stays "encoding_retry". capture_output=True,
is_stream_copy = (
self.ffmpeg_input_args is None and self.ffmpeg_output_args is None
)
initial_step = "copying" if is_stream_copy else "encoding"
returncode, stderr = self._run_ffmpeg_with_progress(
ffmpeg_cmd, playlist_lines, step=initial_step
) )
# If export failed and cpu_fallback is enabled, retry without hwaccel # If export failed and cpu_fallback is enabled, retry without hwaccel
if ( if (
returncode != 0 p.returncode != 0
and self.cpu_fallback and self.cpu_fallback
and self.ffmpeg_input_args is not None and self.ffmpeg_input_args is not None
and self.ffmpeg_output_args is not None and self.ffmpeg_output_args is not None
@ -687,21 +469,23 @@ class RecordingExporter(threading.Thread):
video_path, use_hwaccel=False video_path, use_hwaccel=False
) )
returncode, stderr = self._run_ffmpeg_with_progress( p = sp.run(
ffmpeg_cmd, playlist_lines, step="encoding_retry" ["nice", "-n", str(PROCESS_PRIORITY_LOW)] + ffmpeg_cmd,
input="\n".join(playlist_lines),
encoding="ascii",
capture_output=True,
) )
if returncode != 0: if p.returncode != 0:
logger.error( logger.error(
f"Failed to export {self.playback_source.value} for command {' '.join(ffmpeg_cmd)}" f"Failed to export {self.playback_source.value} for command {' '.join(ffmpeg_cmd)}"
) )
logger.error(stderr) logger.error(p.stderr)
Path(video_path).unlink(missing_ok=True) Path(video_path).unlink(missing_ok=True)
Export.delete().where(Export.id == self.export_id).execute() Export.delete().where(Export.id == self.export_id).execute()
Path(thumb_path).unlink(missing_ok=True) Path(thumb_path).unlink(missing_ok=True)
return return
else: else:
self._emit_progress("finalizing", 100.0)
Export.update({Export.in_progress: False}).where( Export.update({Export.in_progress: False}).where(
Export.id == self.export_id Export.id == self.export_id
).execute() ).execute()

View File

@ -372,7 +372,6 @@ class RecordingMaintainer(threading.Thread):
) )
record_config = self.config.cameras[camera].record record_config = self.config.cameras[camera].record
segment_stats: SegmentInfo | None = None
highest = None highest = None
if record_config.continuous.days > 0: if record_config.continuous.days > 0:
@ -402,18 +401,8 @@ class RecordingMaintainer(threading.Thread):
if highest == "continuous" if highest == "continuous"
else RetainModeEnum.motion else RetainModeEnum.motion
) )
segment_stats = self.segment_stats(camera, start_time, end_time)
# Here we only check if we should move the segment based on non-object recording retention
# we will always want to check for overlapping review items below before dropping the segment
if not segment_stats.should_discard_segment(record_mode):
return await self.move_segment( return await self.move_segment(
camera, camera, start_time, end_time, duration, cache_path, record_mode
start_time,
end_time,
duration,
cache_path,
segment_stats,
) )
# we fell through the continuous / motion check, so we need to check the review items # we fell through the continuous / motion check, so we need to check the review items
@ -446,11 +435,6 @@ class RecordingMaintainer(threading.Thread):
if review.severity == "alert" if review.severity == "alert"
else record_config.detections.retain.mode else record_config.detections.retain.mode
) )
if segment_stats is None:
segment_stats = self.segment_stats(camera, start_time, end_time)
if not segment_stats.should_discard_segment(record_mode):
# move from cache to recordings immediately # move from cache to recordings immediately
return await self.move_segment( return await self.move_segment(
camera, camera,
@ -458,18 +442,12 @@ class RecordingMaintainer(threading.Thread):
end_time, end_time,
duration, duration,
cache_path, cache_path,
segment_stats, record_mode,
) )
else: # if it doesn't overlap with an review item, go ahead and drop the segment
self.drop_segment(cache_path) # if it ends more than the configured pre_capture for the camera
return None # BUT only if continuous/motion is NOT enabled (otherwise wait for processing)
elif highest is None:
# if it doesn't overlap with a review item, drop the segment once it
# ends more than event_pre_capture before the most recently processed
# frame. at this point we've already decided not to keep it for
# continuous/motion retention (either disabled or segment_stats said
# discard), so waiting longer just fills the cache.
else:
camera_info = self.object_recordings_info[camera] camera_info = self.object_recordings_info[camera]
most_recently_processed_frame_time = ( most_recently_processed_frame_time = (
camera_info[-1][0] if len(camera_info) > 0 else 0 camera_info[-1][0] if len(camera_info) > 0 else 0
@ -477,7 +455,6 @@ class RecordingMaintainer(threading.Thread):
retain_cutoff = datetime.datetime.fromtimestamp( retain_cutoff = datetime.datetime.fromtimestamp(
most_recently_processed_frame_time - record_config.event_pre_capture most_recently_processed_frame_time - record_config.event_pre_capture
).astimezone(datetime.timezone.utc) ).astimezone(datetime.timezone.utc)
if end_time < retain_cutoff: if end_time < retain_cutoff:
self.drop_segment(cache_path) self.drop_segment(cache_path)
@ -601,8 +578,15 @@ class RecordingMaintainer(threading.Thread):
end_time: datetime.datetime, end_time: datetime.datetime,
duration: float, duration: float,
cache_path: str, cache_path: str,
segment_info: SegmentInfo, store_mode: RetainModeEnum,
) -> Optional[dict[str, Any]]: ) -> Optional[dict[str, Any]]:
segment_info = self.segment_stats(camera, start_time, end_time)
# check if the segment shouldn't be stored
if segment_info.should_discard_segment(store_mode):
self.drop_segment(cache_path)
return None
# directory will be in utc due to start_time being in utc # directory will be in utc due to start_time being in utc
directory = os.path.join( directory = os.path.join(
RECORD_DIR, RECORD_DIR,

View File

@ -197,7 +197,7 @@ class StorageMaintainer(threading.Thread):
# check if need to delete retained segments # check if need to delete retained segments
if deleted_segments_size < hourly_bandwidth: if deleted_segments_size < hourly_bandwidth:
logger.error( logger.error(
f"Could not clear {hourly_bandwidth} MB, currently {deleted_segments_size:.2f} MB have been cleared. Retained recordings must be deleted." f"Could not clear {hourly_bandwidth} MB, currently {deleted_segments_size} MB have been cleared. Retained recordings must be deleted."
) )
recordings = ( recordings = (
Recordings.select( Recordings.select(
@ -225,7 +225,7 @@ class StorageMaintainer(threading.Thread):
# this file was not found so we must assume no space was cleaned up # this file was not found so we must assume no space was cleaned up
pass pass
else: else:
logger.info(f"Cleaned up {deleted_segments_size:.2f} MB of recordings") logger.info(f"Cleaned up {deleted_segments_size} MB of recordings")
logger.debug(f"Expiring {len(deleted_recordings)} recordings") logger.debug(f"Expiring {len(deleted_recordings)} recordings")
# delete up to 100,000 at a time # delete up to 100,000 at a time

View File

@ -2,7 +2,6 @@ import datetime
import logging import logging
import os import os
import unittest import unittest
from unittest.mock import patch
from fastapi import Request from fastapi import Request
from fastapi.testclient import TestClient from fastapi.testclient import TestClient
@ -15,7 +14,6 @@ from frigate.api.fastapi_app import create_fastapi_app
from frigate.config import FrigateConfig from frigate.config import FrigateConfig
from frigate.const import BASE_DIR, CACHE_DIR from frigate.const import BASE_DIR, CACHE_DIR
from frigate.debug_replay import DebugReplayManager from frigate.debug_replay import DebugReplayManager
from frigate.jobs.export import JobStatePublisher
from frigate.models import Event, Recordings, ReviewSegment from frigate.models import Event, Recordings, ReviewSegment
from frigate.review.types import SeverityEnum from frigate.review.types import SeverityEnum
from frigate.test.const import TEST_DB, TEST_DB_CLEANUPS from frigate.test.const import TEST_DB, TEST_DB_CLEANUPS
@ -46,19 +44,6 @@ class BaseTestHttp(unittest.TestCase):
self.db = SqliteQueueDatabase(TEST_DB) self.db = SqliteQueueDatabase(TEST_DB)
self.db.bind(models) self.db.bind(models)
# The export job manager broadcasts via JobStatePublisher on
# enqueue/start/finish. There is no dispatcher process bound to
# the IPC socket in tests, so a real publish() would block on
# recv_json forever. Replace publish with a no-op for the
# lifetime of this test; the lookup goes through the class so any
# already-instantiated publisher (the singleton manager's) picks
# up the no-op too.
publisher_patch = patch.object(
JobStatePublisher, "publish", lambda self, payload: None
)
publisher_patch.start()
self.addCleanup(publisher_patch.stop)
self.minimal_config = { self.minimal_config = {
"mqtt": {"host": "mqtt"}, "mqtt": {"host": "mqtt"},
"cameras": { "cameras": {

File diff suppressed because it is too large Load Diff

View File

@ -1,303 +0,0 @@
"""Tests for the find_similar_objects chat tool."""
import asyncio
import os
import tempfile
import unittest
from types import SimpleNamespace
from unittest.mock import MagicMock
from playhouse.sqlite_ext import SqliteExtDatabase
from frigate.api.chat import (
_execute_find_similar_objects,
get_tool_definitions,
)
from frigate.api.chat_util import (
DESCRIPTION_WEIGHT,
VISUAL_WEIGHT,
distance_to_score,
fuse_scores,
)
from frigate.embeddings.util import ZScoreNormalization
from frigate.models import Event
def _run(coro):
return asyncio.new_event_loop().run_until_complete(coro)
class TestDistanceToScore(unittest.TestCase):
def test_lower_distance_gives_higher_score(self):
stats = ZScoreNormalization()
# Seed the stats with a small distribution so stddev > 0.
stats._update([0.1, 0.2, 0.3, 0.4, 0.5])
close_score = distance_to_score(0.1, stats)
far_score = distance_to_score(0.5, stats)
self.assertGreater(close_score, far_score)
self.assertGreaterEqual(close_score, 0.0)
self.assertLessEqual(close_score, 1.0)
self.assertGreaterEqual(far_score, 0.0)
self.assertLessEqual(far_score, 1.0)
def test_uninitialized_stats_returns_neutral_score(self):
stats = ZScoreNormalization() # n == 0, stddev == 0
self.assertEqual(distance_to_score(0.3, stats), 0.5)
class TestFuseScores(unittest.TestCase):
def test_weights_sum_to_one(self):
self.assertAlmostEqual(VISUAL_WEIGHT + DESCRIPTION_WEIGHT, 1.0)
def test_fuses_both_sides(self):
fused = fuse_scores(visual_score=0.8, description_score=0.4)
expected = VISUAL_WEIGHT * 0.8 + DESCRIPTION_WEIGHT * 0.4
self.assertAlmostEqual(fused, expected)
def test_missing_description_uses_visual_only(self):
fused = fuse_scores(visual_score=0.7, description_score=None)
self.assertAlmostEqual(fused, 0.7)
def test_missing_visual_uses_description_only(self):
fused = fuse_scores(visual_score=None, description_score=0.6)
self.assertAlmostEqual(fused, 0.6)
def test_both_missing_returns_none(self):
self.assertIsNone(fuse_scores(visual_score=None, description_score=None))
class TestToolDefinition(unittest.TestCase):
def test_find_similar_objects_is_registered(self):
tools = get_tool_definitions()
names = [t["function"]["name"] for t in tools]
self.assertIn("find_similar_objects", names)
def test_find_similar_objects_schema(self):
tools = get_tool_definitions()
tool = next(t for t in tools if t["function"]["name"] == "find_similar_objects")
params = tool["function"]["parameters"]["properties"]
self.assertIn("event_id", params)
self.assertIn("after", params)
self.assertIn("before", params)
self.assertIn("cameras", params)
self.assertIn("labels", params)
self.assertIn("sub_labels", params)
self.assertIn("zones", params)
self.assertIn("similarity_mode", params)
self.assertIn("min_score", params)
self.assertIn("limit", params)
self.assertEqual(tool["function"]["parameters"]["required"], ["event_id"])
self.assertEqual(
params["similarity_mode"]["enum"], ["visual", "semantic", "fused"]
)
class TestExecuteFindSimilarObjects(unittest.TestCase):
def setUp(self):
self.tmp = tempfile.NamedTemporaryFile(suffix=".db", delete=False)
self.tmp.close()
self.db = SqliteExtDatabase(self.tmp.name)
Event.bind(self.db, bind_refs=False, bind_backrefs=False)
self.db.connect()
self.db.create_tables([Event])
# Insert an anchor plus two candidates.
def make(event_id, label="car", camera="driveway", start=1_700_000_100):
Event.create(
id=event_id,
label=label,
sub_label=None,
camera=camera,
start_time=start,
end_time=start + 10,
top_score=0.9,
score=0.9,
false_positive=False,
zones=[],
thumbnail="",
has_clip=True,
has_snapshot=True,
region=[0, 0, 1, 1],
box=[0, 0, 1, 1],
area=1,
retain_indefinitely=False,
ratio=1.0,
plus_id="",
model_hash="",
detector_type="",
model_type="",
data={"description": "a green sedan"},
)
make("anchor", start=1_700_000_200)
make("cand_a", start=1_700_000_100)
make("cand_b", start=1_700_000_150)
self.make = make
def tearDown(self):
self.db.close()
os.unlink(self.tmp.name)
def _make_request(self, semantic_enabled=True, embeddings=None):
app = SimpleNamespace(
embeddings=embeddings,
frigate_config=SimpleNamespace(
semantic_search=SimpleNamespace(enabled=semantic_enabled),
),
)
return SimpleNamespace(app=app)
def test_semantic_search_disabled_returns_error(self):
req = self._make_request(semantic_enabled=False)
result = _run(
_execute_find_similar_objects(
req,
{"event_id": "anchor"},
allowed_cameras=["driveway"],
)
)
self.assertEqual(result["error"], "semantic_search_disabled")
def test_anchor_not_found_returns_error(self):
embeddings = MagicMock()
req = self._make_request(embeddings=embeddings)
result = _run(
_execute_find_similar_objects(
req,
{"event_id": "nope"},
allowed_cameras=["driveway"],
)
)
self.assertEqual(result["error"], "anchor_not_found")
def test_empty_candidates_returns_empty_results(self):
embeddings = MagicMock()
req = self._make_request(embeddings=embeddings)
# Filter to a camera with no other events.
result = _run(
_execute_find_similar_objects(
req,
{"event_id": "anchor", "cameras": ["nonexistent_cam"]},
allowed_cameras=["nonexistent_cam"],
)
)
self.assertEqual(result["results"], [])
self.assertFalse(result["candidate_truncated"])
self.assertEqual(result["anchor"]["id"], "anchor")
def test_fused_calls_both_searches_and_ranks(self):
embeddings = MagicMock()
# cand_a visually closer, cand_b semantically closer.
embeddings.search_thumbnail.return_value = [
("cand_a", 0.10),
("cand_b", 0.40),
]
embeddings.search_description.return_value = [
("cand_a", 0.50),
("cand_b", 0.20),
]
embeddings.thumb_stats = ZScoreNormalization()
embeddings.thumb_stats._update([0.1, 0.2, 0.3, 0.4, 0.5])
embeddings.desc_stats = ZScoreNormalization()
embeddings.desc_stats._update([0.1, 0.2, 0.3, 0.4, 0.5])
req = self._make_request(embeddings=embeddings)
result = _run(
_execute_find_similar_objects(
req,
{"event_id": "anchor"},
allowed_cameras=["driveway"],
)
)
embeddings.search_thumbnail.assert_called_once()
embeddings.search_description.assert_called_once()
# cand_a should rank first because visual is weighted higher.
self.assertEqual(result["results"][0]["id"], "cand_a")
self.assertIn("score", result["results"][0])
self.assertEqual(result["similarity_mode"], "fused")
def test_visual_mode_only_calls_thumbnail(self):
embeddings = MagicMock()
embeddings.search_thumbnail.return_value = [("cand_a", 0.1)]
embeddings.thumb_stats = ZScoreNormalization()
embeddings.thumb_stats._update([0.1, 0.2, 0.3])
req = self._make_request(embeddings=embeddings)
_run(
_execute_find_similar_objects(
req,
{"event_id": "anchor", "similarity_mode": "visual"},
allowed_cameras=["driveway"],
)
)
embeddings.search_thumbnail.assert_called_once()
embeddings.search_description.assert_not_called()
def test_semantic_mode_only_calls_description(self):
embeddings = MagicMock()
embeddings.search_description.return_value = [("cand_a", 0.1)]
embeddings.desc_stats = ZScoreNormalization()
embeddings.desc_stats._update([0.1, 0.2, 0.3])
req = self._make_request(embeddings=embeddings)
_run(
_execute_find_similar_objects(
req,
{"event_id": "anchor", "similarity_mode": "semantic"},
allowed_cameras=["driveway"],
)
)
embeddings.search_description.assert_called_once()
embeddings.search_thumbnail.assert_not_called()
def test_min_score_drops_low_scoring_results(self):
embeddings = MagicMock()
embeddings.search_thumbnail.return_value = [
("cand_a", 0.10),
("cand_b", 0.90),
]
embeddings.search_description.return_value = []
embeddings.thumb_stats = ZScoreNormalization()
embeddings.thumb_stats._update([0.1, 0.2, 0.3, 0.4, 0.5])
embeddings.desc_stats = ZScoreNormalization()
req = self._make_request(embeddings=embeddings)
result = _run(
_execute_find_similar_objects(
req,
{"event_id": "anchor", "similarity_mode": "visual", "min_score": 0.6},
allowed_cameras=["driveway"],
)
)
ids = [r["id"] for r in result["results"]]
self.assertIn("cand_a", ids)
self.assertNotIn("cand_b", ids)
def test_labels_defaults_to_anchor_label(self):
self.make("person_a", label="person")
embeddings = MagicMock()
embeddings.search_thumbnail.return_value = [
("cand_a", 0.1),
("cand_b", 0.2),
]
embeddings.search_description.return_value = []
embeddings.thumb_stats = ZScoreNormalization()
embeddings.thumb_stats._update([0.1, 0.2, 0.3])
embeddings.desc_stats = ZScoreNormalization()
req = self._make_request(embeddings=embeddings)
result = _run(
_execute_find_similar_objects(
req,
{"event_id": "anchor", "similarity_mode": "visual"},
allowed_cameras=["driveway"],
)
)
ids = [r["id"] for r in result["results"]]
self.assertNotIn("person_a", ids)
if __name__ == "__main__":
unittest.main()

View File

@ -1,211 +0,0 @@
"""Tests for DeferredRealtimeProcessorApi."""
import sys
import time
import unittest
from typing import Any
from unittest.mock import MagicMock, patch
import numpy as np
from frigate.data_processing.real_time.api import DeferredRealtimeProcessorApi
# Mock TFLite before importing classification module
_MOCK_MODULES = [
"tflite_runtime",
"tflite_runtime.interpreter",
"ai_edge_litert",
"ai_edge_litert.interpreter",
]
for mod in _MOCK_MODULES:
if mod not in sys.modules:
sys.modules[mod] = MagicMock()
from frigate.data_processing.real_time.custom_classification import ( # noqa: E402
CustomObjectClassificationProcessor,
)
class StubDeferredProcessor(DeferredRealtimeProcessorApi):
"""Minimal concrete subclass for testing the deferred base."""
def __init__(self, max_queue: int = 8):
config = MagicMock()
metrics = MagicMock()
super().__init__(config, metrics, max_queue=max_queue)
self.processed_items: list[tuple] = []
def process_frame(self, obj_data: dict[str, Any], frame: np.ndarray) -> None:
"""Enqueue every call — no gating logic in the stub."""
self._enqueue_task(("frame", obj_data, frame.copy()))
def _process_task(self, task: tuple) -> None:
kind = task[0]
if kind == "frame":
_, obj_data, frame = task
self.processed_items.append((obj_data["id"], frame.shape))
self._emit_result(
{
"type": "test_result",
"id": obj_data["id"],
"label": "cat",
"score": 0.95,
}
)
elif kind == "expire":
_, object_id = task
self.processed_items.append(("expired", object_id))
def handle_request(
self, topic: str, request_data: dict[str, Any]
) -> dict[str, Any] | None:
if topic == "reload":
def _do_reload(data):
return {"success": True, "model": data.get("name")}
return self._enqueue_request(_do_reload, request_data)
return None
def expire_object(self, object_id: str, camera: str) -> None:
self._enqueue_task(("expire", object_id))
class TestDeferredProcessorBase(unittest.TestCase):
def test_enqueue_and_drain(self):
"""Tasks enqueued on main thread are processed by worker, results are drainable."""
proc = StubDeferredProcessor()
frame = np.zeros((100, 100, 3), dtype=np.uint8)
proc.process_frame({"id": "obj1"}, frame)
proc.process_frame({"id": "obj2"}, frame)
# Give the worker time to process
time.sleep(0.1)
results = proc.drain_results()
self.assertEqual(len(results), 2)
self.assertEqual(results[0]["id"], "obj1")
self.assertEqual(results[1]["id"], "obj2")
# Second drain should be empty
self.assertEqual(len(proc.drain_results()), 0)
def test_backpressure_drops_tasks(self):
"""When queue is full, new tasks are silently dropped."""
proc = StubDeferredProcessor(max_queue=2)
frame = np.zeros((10, 10, 3), dtype=np.uint8)
for i in range(10):
proc.process_frame({"id": f"obj{i}"}, frame)
time.sleep(0.2)
results = proc.drain_results()
# The key property: no crash, no unbounded growth
self.assertLessEqual(len(results), 10)
self.assertGreater(len(results), 0)
def test_handle_request_through_worker(self):
"""handle_request blocks until the worker processes it and returns a response."""
proc = StubDeferredProcessor()
result = proc.handle_request("reload", {"name": "my_model"})
self.assertEqual(result, {"success": True, "model": "my_model"})
def test_expire_object_serialized_with_work(self):
"""expire_object goes through the queue, serialized with inference work."""
proc = StubDeferredProcessor()
frame = np.zeros((10, 10, 3), dtype=np.uint8)
proc.process_frame({"id": "obj1"}, frame)
proc.expire_object("obj1", "front_door")
time.sleep(0.1)
# Both should have been processed in order
self.assertEqual(len(proc.processed_items), 2)
self.assertEqual(proc.processed_items[0][0], "obj1")
self.assertEqual(proc.processed_items[1], ("expired", "obj1"))
def test_shutdown_joins_worker(self):
"""shutdown() signals the worker to stop and joins the thread."""
proc = StubDeferredProcessor()
proc.shutdown()
self.assertFalse(proc._worker.is_alive())
def test_drain_results_returns_list(self):
"""drain_results returns a plain list, not a deque."""
proc = StubDeferredProcessor()
results = proc.drain_results()
self.assertIsInstance(results, list)
class TestCustomObjectClassificationDeferred(unittest.TestCase):
"""Test that CustomObjectClassificationProcessor uses the deferred pattern correctly."""
def _make_processor(self):
config = MagicMock()
model_config = MagicMock()
model_config.name = "test_breed"
model_config.object_config = MagicMock()
model_config.object_config.objects = ["dog"]
model_config.threshold = 0.5
model_config.save_attempts = 10
model_config.object_config.classification_type = "sub_label"
publisher = MagicMock()
requestor = MagicMock()
metrics = MagicMock()
metrics.classification_speeds = {}
metrics.classification_cps = {}
with patch.object(
CustomObjectClassificationProcessor,
"_CustomObjectClassificationProcessor__build_detector",
):
proc = CustomObjectClassificationProcessor(
config, model_config, publisher, requestor, metrics
)
proc.interpreter = None
proc.tensor_input_details = [{"index": 0}]
proc.tensor_output_details = [{"index": 0}]
proc.labelmap = {0: "labrador", 1: "poodle", 2: "none"}
return proc
def test_is_deferred_processor(self):
"""CustomObjectClassificationProcessor should be a DeferredRealtimeProcessorApi."""
proc = self._make_processor()
self.assertIsInstance(proc, DeferredRealtimeProcessorApi)
def test_expire_clears_history(self):
"""expire_object should clear classification history for the object."""
proc = self._make_processor()
proc.classification_history["obj1"] = [("labrador", 0.9, 1.0)]
proc.expire_object("obj1", "front")
time.sleep(0.1)
self.assertNotIn("obj1", proc.classification_history)
def test_drain_results_empty_when_no_model(self):
"""With no interpreter, process_frame saves training images but emits no results."""
proc = self._make_processor()
proc.interpreter = None
frame = np.zeros((150, 100), dtype=np.uint8)
obj_data = {
"id": "obj1",
"label": "dog",
"false_positive": False,
"end_time": None,
"box": [10, 10, 50, 50],
"camera": "front",
}
with patch(
"frigate.data_processing.real_time.custom_classification.write_classification_attempt"
):
proc.process_frame(obj_data, frame)
time.sleep(0.1)
results = proc.drain_results()
self.assertEqual(len(results), 0)
if __name__ == "__main__":
unittest.main()

View File

@ -2,7 +2,6 @@
import os import os
import unittest import unittest
from unittest.mock import MagicMock, patch
from frigate.config.env import ( from frigate.config.env import (
FRIGATE_ENV_VARS, FRIGATE_ENV_VARS,
@ -11,71 +10,6 @@ from frigate.config.env import (
) )
class TestGo2RtcAddStreamSubstitution(unittest.TestCase):
"""Covers the API path: PUT /go2rtc/streams/{stream_name}.
The route shells out to go2rtc via `requests.put`; we mock the HTTP call
and assert that the substituted `src` parameter handles the same mixed
{FRIGATE_*} + literal-brace strings as the config-loading path.
"""
def setUp(self):
self._original_env_vars = dict(FRIGATE_ENV_VARS)
def tearDown(self):
FRIGATE_ENV_VARS.clear()
FRIGATE_ENV_VARS.update(self._original_env_vars)
def _call_route(self, src: str) -> str:
"""Invoke go2rtc_add_stream and return the substituted src param."""
from frigate.api import camera as camera_api
captured = {}
def fake_put(url, params=None, timeout=None):
captured["params"] = params
resp = MagicMock()
resp.ok = True
resp.text = ""
resp.status_code = 200
return resp
with patch.object(camera_api.requests, "put", side_effect=fake_put):
camera_api.go2rtc_add_stream(
request=MagicMock(), stream_name="cam1", src=src
)
return captured["params"]["src"]
def test_mixed_localtime_and_frigate_var(self):
"""%{localtime\\:...} alongside {FRIGATE_USER} substitutes only the var."""
FRIGATE_ENV_VARS["FRIGATE_USER"] = "admin"
src = (
"ffmpeg:rtsp://host/s#raw=-vf "
"drawtext=text=%{localtime\\:%Y-%m-%d}:user={FRIGATE_USER}"
)
self.assertEqual(
self._call_route(src),
"ffmpeg:rtsp://host/s#raw=-vf "
"drawtext=text=%{localtime\\:%Y-%m-%d}:user=admin",
)
def test_unknown_var_falls_back_to_raw_src(self):
"""Existing route behavior: unknown {FRIGATE_*} keeps raw src."""
src = "rtsp://host/{FRIGATE_NONEXISTENT}/stream"
self.assertEqual(self._call_route(src), src)
def test_malformed_placeholder_rejected_via_api(self):
"""Malformed FRIGATE placeholders raise (not silently passed through).
Regression: previously camera.py caught any KeyError and fell back
to the raw src, so `{FRIGATE_FOO:>5}` was silently accepted via the
API while config loading rejected it. The helper now raises
ValueError for malformed syntax to keep the two paths consistent.
"""
with self.assertRaises(ValueError):
self._call_route("rtsp://host/{FRIGATE_FOO:>5}/stream")
class TestEnvString(unittest.TestCase): class TestEnvString(unittest.TestCase):
def setUp(self): def setUp(self):
self._original_env_vars = dict(FRIGATE_ENV_VARS) self._original_env_vars = dict(FRIGATE_ENV_VARS)
@ -109,72 +43,6 @@ class TestEnvString(unittest.TestCase):
with self.assertRaises(KeyError): with self.assertRaises(KeyError):
validate_env_string("{FRIGATE_NONEXISTENT_VAR}") validate_env_string("{FRIGATE_NONEXISTENT_VAR}")
def test_non_frigate_braces_passthrough(self):
"""Braces that are not {FRIGATE_*} placeholders pass through untouched.
Regression test for ffmpeg drawtext expressions like
"%{localtime\\:%Y-%m-%d}" being mangled by str.format().
"""
expr = (
"ffmpeg:rtsp://127.0.0.1/src#raw=-vf "
"drawtext=text=%{localtime\\:%Y-%m-%d_%H\\:%M\\:%S}"
":x=5:fontcolor=white"
)
self.assertEqual(validate_env_string(expr), expr)
def test_double_brace_escape_preserved(self):
"""`{{output}}` collapses to `{output}` (documented go2rtc escape)."""
result = validate_env_string(
"exec:ffmpeg -i /media/file.mp4 -f rtsp {{output}}"
)
self.assertEqual(result, "exec:ffmpeg -i /media/file.mp4 -f rtsp {output}")
def test_double_brace_around_frigate_var(self):
"""`{{FRIGATE_FOO}}` stays literal — escape takes precedence."""
FRIGATE_ENV_VARS["FRIGATE_FOO"] = "bar"
self.assertEqual(validate_env_string("{{FRIGATE_FOO}}"), "{FRIGATE_FOO}")
def test_mixed_frigate_var_and_braces(self):
"""A FRIGATE_ var alongside literal single braces substitutes only the var."""
FRIGATE_ENV_VARS["FRIGATE_USER"] = "admin"
result = validate_env_string(
"drawtext=text=%{localtime}:user={FRIGATE_USER}:x=5"
)
self.assertEqual(result, "drawtext=text=%{localtime}:user=admin:x=5")
def test_triple_braces_around_frigate_var(self):
"""`{{{FRIGATE_FOO}}}` collapses like str.format(): `{bar}`."""
FRIGATE_ENV_VARS["FRIGATE_FOO"] = "bar"
self.assertEqual(validate_env_string("{{{FRIGATE_FOO}}}"), "{bar}")
def test_trailing_double_brace_after_var(self):
"""`{FRIGATE_FOO}}}` collapses like str.format(): `bar}`."""
FRIGATE_ENV_VARS["FRIGATE_FOO"] = "bar"
self.assertEqual(validate_env_string("{FRIGATE_FOO}}}"), "bar}")
def test_leading_double_brace_then_var(self):
"""`{{{FRIGATE_FOO}` collapses like str.format(): `{bar`."""
FRIGATE_ENV_VARS["FRIGATE_FOO"] = "bar"
self.assertEqual(validate_env_string("{{{FRIGATE_FOO}"), "{bar")
def test_malformed_unterminated_placeholder_raises(self):
"""`{FRIGATE_FOO` (no closing brace) raises like str.format() did."""
FRIGATE_ENV_VARS["FRIGATE_FOO"] = "bar"
with self.assertRaises(ValueError):
validate_env_string("prefix-{FRIGATE_FOO")
def test_malformed_format_spec_raises(self):
"""`{FRIGATE_FOO:>5}` (format spec) raises like str.format() did."""
FRIGATE_ENV_VARS["FRIGATE_FOO"] = "bar"
with self.assertRaises(ValueError):
validate_env_string("{FRIGATE_FOO:>5}")
def test_malformed_conversion_raises(self):
"""`{FRIGATE_FOO!r}` (conversion) raises like str.format() did."""
FRIGATE_ENV_VARS["FRIGATE_FOO"] = "bar"
with self.assertRaises(ValueError):
validate_env_string("{FRIGATE_FOO!r}")
class TestEnvVars(unittest.TestCase): class TestEnvVars(unittest.TestCase):
def setUp(self): def setUp(self):

View File

@ -1,385 +0,0 @@
"""Tests for export progress tracking, broadcast, and FFmpeg parsing."""
import io
import unittest
from unittest.mock import MagicMock, patch
from frigate.jobs.export import (
PROGRESS_BROADCAST_MIN_INTERVAL,
ExportJob,
ExportJobManager,
)
from frigate.record.export import PlaybackSourceEnum, RecordingExporter
from frigate.types import JobStatusTypesEnum
def _make_exporter(
end_minus_start: int = 100,
ffmpeg_input_args=None,
ffmpeg_output_args=None,
on_progress=None,
) -> RecordingExporter:
"""Build a RecordingExporter without invoking its real __init__ side
effects (which create directories and require a full FrigateConfig)."""
exporter = RecordingExporter.__new__(RecordingExporter)
exporter.config = MagicMock()
exporter.export_id = "test_export"
exporter.camera = "front"
exporter.user_provided_name = None
exporter.user_provided_image = None
exporter.start_time = 1_000
exporter.end_time = 1_000 + end_minus_start
exporter.playback_source = PlaybackSourceEnum.recordings
exporter.export_case_id = None
exporter.ffmpeg_input_args = ffmpeg_input_args
exporter.ffmpeg_output_args = ffmpeg_output_args
exporter.cpu_fallback = False
exporter.on_progress = on_progress
return exporter
class TestExportJobToDict(unittest.TestCase):
def test_to_dict_includes_progress_fields(self) -> None:
job = ExportJob(camera="front", request_start_time=0, request_end_time=10)
result = job.to_dict()
assert "current_step" in result
assert "progress_percent" in result
assert result["current_step"] == "queued"
assert result["progress_percent"] == 0.0
def test_to_dict_reflects_updated_progress(self) -> None:
job = ExportJob(camera="front", request_start_time=0, request_end_time=10)
job.current_step = "encoding"
job.progress_percent = 42.5
result = job.to_dict()
assert result["current_step"] == "encoding"
assert result["progress_percent"] == 42.5
class TestExpectedOutputDuration(unittest.TestCase):
def test_normal_export_uses_input_duration(self) -> None:
exporter = _make_exporter(end_minus_start=600)
assert exporter._expected_output_duration_seconds() == 600.0
def test_timelapse_uses_setpts_factor(self) -> None:
exporter = _make_exporter(
end_minus_start=1000,
ffmpeg_input_args="-y",
ffmpeg_output_args="-vf setpts=0.04*PTS -r 30",
)
# 1000s input * 0.04 = 40s of output
assert exporter._expected_output_duration_seconds() == 40.0
def test_unknown_factor_falls_back_to_input_duration(self) -> None:
exporter = _make_exporter(
end_minus_start=300,
ffmpeg_input_args="-y",
ffmpeg_output_args="-c:v libx264 -preset veryfast",
)
assert exporter._expected_output_duration_seconds() == 300.0
def test_zero_factor_falls_back_to_input_duration(self) -> None:
exporter = _make_exporter(
end_minus_start=300,
ffmpeg_input_args="-y",
ffmpeg_output_args="-vf setpts=0*PTS",
)
assert exporter._expected_output_duration_seconds() == 300.0
def test_uses_actual_recorded_seconds_when_available(self) -> None:
"""If the DB shows only 120s of saved recordings inside a 1h
requested range, progress should be computed against 120s."""
exporter = _make_exporter(end_minus_start=3600)
exporter._sum_source_duration_seconds = lambda: 120.0 # type: ignore[method-assign]
assert exporter._expected_output_duration_seconds() == 120.0
def test_actual_recorded_seconds_scaled_by_setpts(self) -> None:
"""Recorded duration must still be scaled by the timelapse factor."""
exporter = _make_exporter(
end_minus_start=3600,
ffmpeg_input_args="-y",
ffmpeg_output_args="-vf setpts=0.04*PTS -r 30",
)
exporter._sum_source_duration_seconds = lambda: 600.0 # type: ignore[method-assign]
# 600s * 0.04 = 24s of output
assert exporter._expected_output_duration_seconds() == 24.0
def test_db_failure_falls_back_to_requested_range(self) -> None:
exporter = _make_exporter(end_minus_start=300)
exporter._sum_source_duration_seconds = lambda: None # type: ignore[method-assign]
assert exporter._expected_output_duration_seconds() == 300.0
class TestProgressFlagInjection(unittest.TestCase):
def test_inserts_before_output_path(self) -> None:
exporter = _make_exporter()
cmd = ["ffmpeg", "-i", "input.m3u8", "-c", "copy", "/tmp/output.mp4"]
result = exporter._inject_progress_flags(cmd)
assert result == [
"ffmpeg",
"-i",
"input.m3u8",
"-c",
"copy",
"-progress",
"pipe:2",
"-nostats",
"/tmp/output.mp4",
]
def test_handles_empty_cmd(self) -> None:
exporter = _make_exporter()
assert exporter._inject_progress_flags([]) == []
class TestFfmpegProgressParsing(unittest.TestCase):
"""Verify percentage calculation from FFmpeg ``-progress`` output."""
def _run_with_stderr(
self,
stderr_text: str,
expected_duration_seconds: int = 90,
) -> list[tuple[str, float]]:
"""Helper: run _run_ffmpeg_with_progress against a mocked Popen
whose stderr emits the supplied text. Returns the list of
(step, percent) tuples that the on_progress callback received."""
captured: list[tuple[str, float]] = []
def on_progress(step: str, percent: float) -> None:
captured.append((step, percent))
exporter = _make_exporter(
end_minus_start=expected_duration_seconds,
on_progress=on_progress,
)
fake_proc = MagicMock()
fake_proc.stdin = io.StringIO()
fake_proc.stderr = io.StringIO(stderr_text)
fake_proc.returncode = 0
fake_proc.wait = MagicMock(return_value=0)
with patch("frigate.record.export.sp.Popen", return_value=fake_proc):
returncode, _stderr = exporter._run_ffmpeg_with_progress(
["ffmpeg", "-i", "x.m3u8", "/tmp/out.mp4"], "playlist", step="encoding"
)
assert returncode == 0
return captured
def test_parses_out_time_us_into_percent(self) -> None:
# 90s duration; 45s out_time => 50%
stderr = "out_time_us=45000000\nprogress=continue\n"
captured = self._run_with_stderr(stderr, expected_duration_seconds=90)
# The first call is the synchronous 0.0 emit before Popen runs.
assert captured[0] == ("encoding", 0.0)
assert any(percent == 50.0 for step, percent in captured if step == "encoding")
def test_progress_end_emits_100_percent(self) -> None:
stderr = "out_time_us=10000000\nprogress=end\n"
captured = self._run_with_stderr(stderr, expected_duration_seconds=90)
assert captured[-1] == ("encoding", 100.0)
def test_clamps_overshoot_at_100(self) -> None:
# 150s of output reported against 90s expected duration.
stderr = "out_time_us=150000000\nprogress=continue\n"
captured = self._run_with_stderr(stderr, expected_duration_seconds=90)
encoding_values = [p for s, p in captured if s == "encoding" and p > 0]
assert all(p <= 100.0 for p in encoding_values)
assert encoding_values[-1] == 100.0
def test_ignores_garbage_lines(self) -> None:
stderr = (
"frame= 120 fps= 30 q=23.0 size= 512kB\n"
"out_time_us=not-a-number\n"
"out_time_us=30000000\n"
"progress=continue\n"
)
captured = self._run_with_stderr(stderr, expected_duration_seconds=90)
# We expect 0.0 (from initial emit) plus the 30s/90s = 33.33...% step
encoding_percents = sorted({round(p, 2) for s, p in captured})
assert 0.0 in encoding_percents
assert any(abs(p - (30 / 90 * 100)) < 0.01 for p in encoding_percents)
class TestBroadcastAggregation(unittest.TestCase):
"""Verify ExportJobManager broadcast payload shape and throttling."""
def _make_manager(self) -> tuple[ExportJobManager, MagicMock]:
"""Build a manager with an injected mock publisher. Returns
``(manager, publisher)`` so tests can assert on broadcast payloads
without touching ZMQ at all."""
config = MagicMock()
publisher = MagicMock()
manager = ExportJobManager(
config, max_concurrent=2, max_queued=10, publisher=publisher
)
return manager, publisher
@staticmethod
def _last_payload(publisher: MagicMock) -> dict:
return publisher.publish.call_args.args[0]
def test_empty_jobs_broadcasts_empty_list(self) -> None:
manager, publisher = self._make_manager()
manager._broadcast_all_jobs(force=True)
publisher.publish.assert_called_once()
payload = self._last_payload(publisher)
assert payload["job_type"] == "export"
assert payload["status"] == "queued"
assert payload["results"]["jobs"] == []
def test_single_running_job_payload(self) -> None:
manager, publisher = self._make_manager()
job = ExportJob(camera="front", request_start_time=0, request_end_time=10)
job.status = JobStatusTypesEnum.running
job.current_step = "encoding"
job.progress_percent = 75.0
manager.jobs[job.id] = job
manager._broadcast_all_jobs(force=True)
payload = self._last_payload(publisher)
assert payload["status"] == "running"
assert len(payload["results"]["jobs"]) == 1
broadcast_job = payload["results"]["jobs"][0]
assert broadcast_job["current_step"] == "encoding"
assert broadcast_job["progress_percent"] == 75.0
def test_multiple_jobs_broadcast(self) -> None:
manager, publisher = self._make_manager()
for i, status in enumerate(
(JobStatusTypesEnum.queued, JobStatusTypesEnum.running)
):
job = ExportJob(
id=f"job_{i}",
camera="front",
request_start_time=0,
request_end_time=10,
)
job.status = status
manager.jobs[job.id] = job
manager._broadcast_all_jobs(force=True)
payload = self._last_payload(publisher)
assert payload["status"] == "running"
assert len(payload["results"]["jobs"]) == 2
def test_completed_jobs_are_excluded(self) -> None:
manager, publisher = self._make_manager()
active = ExportJob(id="active", camera="front")
active.status = JobStatusTypesEnum.running
finished = ExportJob(id="done", camera="front")
finished.status = JobStatusTypesEnum.success
manager.jobs[active.id] = active
manager.jobs[finished.id] = finished
manager._broadcast_all_jobs(force=True)
payload = self._last_payload(publisher)
ids = [j["id"] for j in payload["results"]["jobs"]]
assert ids == ["active"]
def test_throttle_skips_rapid_unforced_broadcasts(self) -> None:
manager, publisher = self._make_manager()
job = ExportJob(camera="front")
job.status = JobStatusTypesEnum.running
manager.jobs[job.id] = job
manager._broadcast_all_jobs(force=True)
# Immediately following non-forced broadcasts should be skipped.
for _ in range(5):
manager._broadcast_all_jobs(force=False)
assert publisher.publish.call_count == 1
def test_throttle_allows_broadcast_after_interval(self) -> None:
manager, publisher = self._make_manager()
job = ExportJob(camera="front")
job.status = JobStatusTypesEnum.running
manager.jobs[job.id] = job
with patch("frigate.jobs.export.time.monotonic") as mock_mono:
mock_mono.return_value = 100.0
manager._broadcast_all_jobs(force=True)
mock_mono.return_value = 100.0 + PROGRESS_BROADCAST_MIN_INTERVAL + 0.01
manager._broadcast_all_jobs(force=False)
assert publisher.publish.call_count == 2
def test_force_bypasses_throttle(self) -> None:
manager, publisher = self._make_manager()
job = ExportJob(camera="front")
job.status = JobStatusTypesEnum.running
manager.jobs[job.id] = job
manager._broadcast_all_jobs(force=True)
manager._broadcast_all_jobs(force=True)
assert publisher.publish.call_count == 2
def test_publisher_exceptions_do_not_propagate(self) -> None:
"""A failing publisher must not break the manager: broadcasts are
best-effort since the dispatcher may not be available (tests,
startup races)."""
manager, publisher = self._make_manager()
publisher.publish.side_effect = RuntimeError("comms down")
job = ExportJob(camera="front")
job.status = JobStatusTypesEnum.running
manager.jobs[job.id] = job
# Swallow our own RuntimeError if the manager doesn't; the real
# JobStatePublisher handles its own exceptions internally, so the
# manager can stay naive. But if something bubbles up it should
# not escape _broadcast_all_jobs — enforce that contract here.
try:
manager._broadcast_all_jobs(force=True)
except RuntimeError:
self.fail("_broadcast_all_jobs must tolerate publisher failures")
def test_progress_callback_updates_job_and_broadcasts(self) -> None:
manager, _publisher = self._make_manager()
job = ExportJob(camera="front")
job.status = JobStatusTypesEnum.running
manager.jobs[job.id] = job
callback = manager._make_progress_callback(job)
callback("encoding", 33.0)
assert job.current_step == "encoding"
assert job.progress_percent == 33.0
class TestSchedulesCleanup(unittest.TestCase):
def test_schedule_job_cleanup_removes_after_delay(self) -> None:
config = MagicMock()
manager = ExportJobManager(config, max_concurrent=1, max_queued=1)
job = ExportJob(id="cleanup_me", camera="front")
manager.jobs[job.id] = job
with patch("frigate.jobs.export.threading.Timer") as mock_timer:
manager._schedule_job_cleanup(job.id)
mock_timer.assert_called_once()
delay, fn = mock_timer.call_args.args
assert delay > 0
# Invoke the callback directly to confirm it removes the job.
fn()
assert job.id not in manager.jobs
if __name__ == "__main__":
unittest.main()

View File

@ -1,4 +1,3 @@
import datetime
import sys import sys
import unittest import unittest
from unittest.mock import MagicMock, patch from unittest.mock import MagicMock, patch
@ -75,46 +74,6 @@ class TestMaintainer(unittest.IsolatedAsyncioTestCase):
f"Expected a single warning for unexpected files, got {len(matching)}", f"Expected a single warning for unexpected files, got {len(matching)}",
) )
async def test_drops_quiet_segment_when_only_motion_retention(self):
# Regression: when motion retention is enabled but a segment has no
# motion and no review overlaps it, the segment must still be dropped.
# Otherwise it sits in cache forever, accumulates, and triggers the
# "Unable to keep up with recording segments in cache" warning every
# ~10s as the overflow trim in move_files discards the oldest one.
config = MagicMock(spec=FrigateConfig)
camera_config = MagicMock()
camera_config.record.enabled = True
camera_config.record.continuous.days = 0
camera_config.record.motion.days = 1
camera_config.record.event_pre_capture = 5
config.cameras = {"test_cam": camera_config}
stop_event = MagicMock()
maintainer = RecordingMaintainer(config, stop_event)
now = datetime.datetime.now(datetime.timezone.utc)
start_time = now - datetime.timedelta(seconds=20)
end_time = now - datetime.timedelta(seconds=10)
cache_path = "/tmp/cache/test_cam@20260417150000+0000.mp4"
maintainer.end_time_cache = {cache_path: (end_time, 10.0)}
# Single processed frame well past end_time with no motion/objects.
maintainer.object_recordings_info["test_cam"] = [(now.timestamp(), [], [], [])]
maintainer.audio_recordings_info["test_cam"] = []
maintainer.drop_segment = MagicMock()
maintainer.recordings_publisher = MagicMock()
result = await maintainer.validate_and_move_segment(
"test_cam",
reviews=[],
recording={"start_time": start_time, "cache_path": cache_path},
)
self.assertIsNone(result)
maintainer.drop_segment.assert_called_once_with(cache_path)
if __name__ == "__main__": if __name__ == "__main__":
unittest.main() unittest.main()

View File

@ -116,8 +116,6 @@ class TimelineProcessor(threading.Thread):
), ),
"attribute": "", "attribute": "",
"score": event_data["score"], "score": event_data["score"],
"computed_score": event_data.get("computed_score"),
"top_score": event_data.get("top_score"),
}, },
} }

View File

@ -400,7 +400,6 @@ class TrackedObject:
"start_time": self.obj_data["start_time"], "start_time": self.obj_data["start_time"],
"end_time": self.obj_data.get("end_time", None), "end_time": self.obj_data.get("end_time", None),
"score": self.obj_data["score"], "score": self.obj_data["score"],
"computed_score": self.computed_score,
"box": self.obj_data["box"], "box": self.obj_data["box"],
"area": self.obj_data["area"], "area": self.obj_data["area"],
"ratio": self.obj_data["ratio"], "ratio": self.obj_data["ratio"],

View File

@ -62,12 +62,11 @@ def get_camera_regions_grid(
.where((Event.false_positive == None) | (Event.false_positive == False)) .where((Event.false_positive == None) | (Event.false_positive == False))
.where(Event.start_time > last_update) .where(Event.start_time > last_update)
) )
valid_event_ids = [e["id"] for e in events.dicts()]
event_count = events.count() logger.debug(f"Found {len(valid_event_ids)} new events for {name}")
logger.debug(f"Found {event_count} new events for {name}")
# no new events, return as is # no new events, return as is
if event_count == 0: if not valid_event_ids:
return grid return grid
new_update = datetime.datetime.now().timestamp() new_update = datetime.datetime.now().timestamp()
@ -79,7 +78,7 @@ def get_camera_regions_grid(
Timeline.data, Timeline.data,
] ]
) )
.where(Timeline.source_id << events) .where(Timeline.source_id << valid_event_ids)
.limit(10000) .limit(10000)
.dicts() .dicts()
) )

View File

@ -807,15 +807,10 @@ async def get_video_properties(
) -> dict[str, Any]: ) -> dict[str, Any]:
async def probe_with_ffprobe( async def probe_with_ffprobe(
url: str, url: str,
rtsp_transport: Optional[str] = None,
) -> tuple[bool, int, int, Optional[str], float]: ) -> tuple[bool, int, int, Optional[str], float]:
"""Fallback using ffprobe: returns (valid, width, height, codec, duration).""" """Fallback using ffprobe: returns (valid, width, height, codec, duration)."""
cmd = [ffmpeg.ffprobe_path] cmd = [
if rtsp_transport: ffmpeg.ffprobe_path,
cmd += ["-rtsp_transport", rtsp_transport]
cmd += [
"-rw_timeout",
"5000000",
"-v", "-v",
"quiet", "quiet",
"-print_format", "-print_format",
@ -877,27 +872,13 @@ async def get_video_properties(
cap.release() cap.release()
return valid, width, height, fourcc, duration return valid, width, height, fourcc, duration
is_rtsp = url.startswith("rtsp://") # try cv2 first
if is_rtsp:
# skip cv2 for RTSP: its FFmpeg backend has a hardcoded ~30s internal
# timeout that cannot be shortened per-call, and ffprobe bounded by
# -rw_timeout handles RTSP probing reliably
has_video, width, height, fourcc, duration = await probe_with_ffprobe(url)
else:
# try cv2 first for local files, HTTP, RTMP
has_video, width, height, fourcc, duration = probe_with_cv2(url) has_video, width, height, fourcc, duration = probe_with_cv2(url)
# fallback to ffprobe if needed # fallback to ffprobe if needed
if not has_video or (get_duration and duration < 0): if not has_video or (get_duration and duration < 0):
has_video, width, height, fourcc, duration = await probe_with_ffprobe(url) has_video, width, height, fourcc, duration = await probe_with_ffprobe(url)
# last resort for RTSP: try TCP transport, since default UDP may be blocked
if (not has_video or (get_duration and duration < 0)) and is_rtsp:
has_video, width, height, fourcc, duration = await probe_with_ffprobe(
url, rtsp_transport="tcp"
)
result: dict[str, Any] = {"has_valid_video": has_video} result: dict[str, Any] = {"has_valid_video": has_video}
if has_video: if has_video:
result.update({"width": width, "height": height}) result.update({"width": width, "height": height})

View File

@ -471,16 +471,8 @@ class CameraWatchdog(threading.Thread):
p["cmd"], self.logger, p["logpipe"], ffmpeg_process=p["process"] p["cmd"], self.logger, p["logpipe"], ffmpeg_process=p["process"]
) )
# Prune expired reconnect timestamps
now = datetime.now().timestamp()
while (
self.reconnect_timestamps and self.reconnect_timestamps[0] < now - 3600
):
self.reconnect_timestamps.popleft()
if self.reconnects:
self.reconnects.value = len(self.reconnect_timestamps)
# Update stall metrics based on last processed frame timestamp # Update stall metrics based on last processed frame timestamp
now = datetime.now().timestamp()
processed_ts = ( processed_ts = (
float(self.detection_frame.value) if self.detection_frame else 0.0 float(self.detection_frame.value) if self.detection_frame else 0.0
) )

View File

@ -1,116 +0,0 @@
/**
* Global allowlist of regex patterns that the error collector ignores.
*
* Each entry MUST include a comment explaining what it silences and why.
* The allowlist is filtered at collection time, so failure messages list
* only unfiltered errors.
*
* Per-spec additions go through the `expectedErrors` test fixture parameter
* (see error-collector.ts), not by editing this file. That keeps allowlist
* drift visible per-PR rather than buried in shared infrastructure.
*
* NOTE ON CONSOLE vs REQUEST ERRORS:
* When a network request returns a 5xx response, the browser emits two
* events that the error collector captures:
* [request] "500 Internal Server Error <url>" from onResponse (URL included)
* [console] "Failed to load resource: ..." from onConsole (URL NOT included)
*
* The request-level message includes the URL, so those patterns are specific.
* The console-level message text (from ConsoleMessage.text()) does NOT include
* the URL the URL is stored separately in e.url. Therefore the console
* pattern for HTTP 500s cannot be URL-discriminated, and a single pattern
* covers all such browser echoes. This is safe because every such console
* error is already caught (and specifically matched) by its paired [request]
* entry below.
*/
export const GLOBAL_ALLOWLIST: RegExp[] = [
// -------------------------------------------------------------------------
// Browser echo of HTTP 5xx responses (console mirror of [request] events).
//
// Whenever the browser receives a 5xx response it emits a console error:
// "Failed to load resource: the server responded with a status of 500
// (Internal Server Error)"
// The URL is NOT part of ConsoleMessage.text() — it is stored separately.
// Every console error of this form is therefore paired with a specific
// [request] 500 entry below that names the exact endpoint. Allowlisting
// this pattern here silences the browser echo; the request-level entries
// enforce specificity.
// -------------------------------------------------------------------------
/Failed to load resource: the server responded with a status of 500/,
// -------------------------------------------------------------------------
// Mock infrastructure gaps — API endpoints not yet covered by ApiMocker.
//
// These produce 500s because Vite's preview server has no handler for them.
// Each is a TODO(real-bug): the mock should be extended so these endpoints
// return sensible fixture data in tests.
//
// Only [request] patterns are listed here; the paired [console] mirror is
// covered by the "Failed to load resource" entry above.
// -------------------------------------------------------------------------
// TODO(real-bug): ApiMocker registers "**/api/reviews**" (plural) but the
// app fetches /api/review (singular) for the review list and timeline.
// Affects: review.spec.ts, navigation.spec.ts, live.spec.ts, auth.spec.ts.
// Fix: add route handlers for /api/review and /api/review/** in api-mocker.ts.
/500 Internal Server Error.*\/api\/review(\?|\/|$)/,
// TODO(real-bug): /api/stats/history is not mocked; the system page fetches
// it for the detector/process history charts.
// Fix: add route handler for /api/stats/history in api-mocker.ts.
/500 Internal Server Error.*\/api\/stats\/history/,
// TODO(real-bug): /api/event_ids is not mocked; the explore/search page
// fetches it to resolve event IDs for display.
// Fix: add route handler for /api/event_ids in api-mocker.ts.
/500 Internal Server Error.*\/api\/event_ids/,
// TODO(real-bug): /api/sub_labels?split_joined=1 returns 500; the mock
// registers "**/api/sub_labels" which may not match when a query string is
// present, or route registration order causes the catch-all to win first.
// Fix: change the mock route to "**/api/sub_labels**" in api-mocker.ts.
/500 Internal Server Error.*\/api\/sub_labels/,
// TODO(real-bug): MediaMocker handles /api/*/latest.jpg but the app also
// requests /api/*/latest.webp (webp format) for camera snapshots.
// Affects: live.spec.ts, review.spec.ts, auth.spec.ts, navigation.spec.ts.
// Fix: add route handler for /api/*/latest.webp in MediaMocker.install().
/500 Internal Server Error.*\/api\/[^/]+\/latest\.webp/,
/failed: net::ERR_ABORTED.*\/api\/[^/]+\/latest\.webp/,
// -------------------------------------------------------------------------
// Mock infrastructure gap — WebSocket streams.
//
// Playwright's page.route() does not intercept WebSocket connections.
// The jsmpeg live-stream WS connections to /live/jsmpeg/* always fail
// with a 500 handshake error because the Vite preview server has no WS
// handler. TODO(real-bug): add WsMocker support for jsmpeg WebSocket
// connections, or suppress the connection attempt in the test environment.
// Affects: live.spec.ts (single camera view), auth.spec.ts.
// -------------------------------------------------------------------------
/WebSocket connection to '.*\/live\/jsmpeg\/.*' failed/,
// -------------------------------------------------------------------------
// Benign — lazy-loaded chunk aborts during navigation.
//
// When a test navigates away from a page while the browser is still
// fetching lazily-split JS/CSS asset chunks, the in-flight fetch is
// cancelled (net::ERR_ABORTED). This is normal browser behaviour on
// navigation and does not indicate a real error; the assets load fine
// on a stable connection.
// -------------------------------------------------------------------------
/failed: net::ERR_ABORTED.*\/assets\//,
// -------------------------------------------------------------------------
// Real app bug — Radix UI DialogContent missing accessible title.
//
// TODO(real-bug): A dialog somewhere in the app renders <DialogContent>
// without a <DialogTitle>, violating Radix UI's accessibility contract.
// The warning originates from the bundled main-*.js. Investigate which
// dialog component is missing the title and add a VisuallyHidden DialogTitle.
// Likely candidate: face-library or search-detail dialog in explore page.
// See: https://radix-ui.com/primitives/docs/components/dialog
// -------------------------------------------------------------------------
/`DialogContent` requires a `DialogTitle`/,
];

View File

@ -1,122 +0,0 @@
/**
* Collects console errors, page errors, and failed network requests
* during a Playwright test, with regex-based allowlist filtering.
*
* Usage:
* const collector = installErrorCollector(page, [...GLOBAL_ALLOWLIST]);
* // ... run test ...
* collector.assertClean(); // throws if any non-allowlisted error
*
* The collector is wired into the `frigateApp` fixture so every test
* gets it for free. Tests that intentionally trigger an error pass
* additional regexes via the `expectedErrors` fixture parameter.
*/
import type { Page, Request, Response, ConsoleMessage } from "@playwright/test";
export type CollectedError = {
kind: "console" | "pageerror" | "request";
message: string;
url?: string;
stack?: string;
};
export type ErrorCollector = {
errors: CollectedError[];
assertClean(): void;
};
function isAllowlisted(message: string, allowlist: RegExp[]): boolean {
return allowlist.some((pattern) => pattern.test(message));
}
function firstStackFrame(stack: string | undefined): string | undefined {
if (!stack) return undefined;
const lines = stack
.split("\n")
.map((l) => l.trim())
.filter(Boolean);
// Skip the error message line (line 0); return the first "at ..." frame
return lines.find((l) => l.startsWith("at "));
}
function isSameOrigin(url: string, baseURL: string | undefined): boolean {
if (!baseURL) return true;
try {
return new URL(url).origin === new URL(baseURL).origin;
} catch {
return false;
}
}
export function installErrorCollector(
page: Page,
allowlist: RegExp[],
): ErrorCollector {
const errors: CollectedError[] = [];
const baseURL = (
page.context() as unknown as { _options?: { baseURL?: string } }
)._options?.baseURL;
const onConsole = (msg: ConsoleMessage) => {
if (msg.type() !== "error") return;
const text = msg.text();
if (isAllowlisted(text, allowlist)) return;
errors.push({
kind: "console",
message: text,
url: msg.location().url,
});
};
const onPageError = (err: Error) => {
const text = err.message;
if (isAllowlisted(text, allowlist)) return;
errors.push({
kind: "pageerror",
message: text,
stack: firstStackFrame(err.stack),
});
};
const onResponse = (response: Response) => {
const status = response.status();
if (status < 500) return;
const url = response.url();
if (!isSameOrigin(url, baseURL)) return;
const text = `${status} ${response.statusText()} ${url}`;
if (isAllowlisted(text, allowlist)) return;
errors.push({ kind: "request", message: text, url });
};
const onRequestFailed = (request: Request) => {
const url = request.url();
if (!isSameOrigin(url, baseURL)) return;
const failure = request.failure();
const text = `failed: ${failure?.errorText ?? "unknown"} ${url}`;
if (isAllowlisted(text, allowlist)) return;
errors.push({ kind: "request", message: text, url });
};
page.on("console", onConsole);
page.on("pageerror", onPageError);
page.on("response", onResponse);
page.on("requestfailed", onRequestFailed);
return {
errors,
assertClean() {
if (errors.length === 0) return;
const formatted = errors
.map((e, i) => {
const stack = e.stack ? `\n ${e.stack}` : "";
const url = e.url && e.url !== e.message ? ` (${e.url})` : "";
return ` ${i + 1}. [${e.kind}] ${e.message}${url}${stack}`;
})
.join("\n");
throw new Error(
`Page emitted ${errors.length} unexpected error${errors.length === 1 ? "" : "s"}:\n${formatted}`,
);
},
};
}

View File

@ -1,120 +0,0 @@
/* eslint-disable react-hooks/rules-of-hooks */
/**
* Extended Playwright test fixture with FrigateApp.
*
* Every test imports `test` and `expect` from this file instead of
* @playwright/test directly. The `frigateApp` fixture provides a
* fully mocked Frigate frontend ready for interaction.
*
* The fixture also installs the error collector (see error-collector.ts).
* Any console error, page error, or same-origin failed request that is
* not on the global allowlist or the test's `expectedErrors` list will
* fail the test in the fixture's teardown.
*
* CRITICAL: All route/WS handlers are registered before page.goto()
* to prevent AuthProvider from redirecting to login.html.
*/
import { test as base, expect, type Page } from "@playwright/test";
import {
ApiMocker,
MediaMocker,
type ApiMockOverrides,
} from "../helpers/api-mocker";
import { WsMocker } from "../helpers/ws-mocker";
import { installErrorCollector, type ErrorCollector } from "./error-collector";
import { GLOBAL_ALLOWLIST } from "./error-allowlist";
export class FrigateApp {
public api: ApiMocker;
public media: MediaMocker;
public ws: WsMocker;
public page: Page;
private isDesktop: boolean;
constructor(page: Page, projectName: string) {
this.page = page;
this.api = new ApiMocker(page);
this.media = new MediaMocker(page);
this.ws = new WsMocker();
this.isDesktop = projectName === "desktop";
}
get isMobile() {
return !this.isDesktop;
}
/** Install all mocks with default data. Call before goto(). */
async installDefaults(overrides?: ApiMockOverrides) {
// Mock i18n locale files to prevent 404s
await this.page.route("**/locales/**", async (route) => {
// Let the request through to the built files
return route.fallback();
});
await this.ws.install(this.page);
await this.media.install();
await this.api.install(overrides);
}
/** Navigate to a page. Always call installDefaults() first. */
async goto(path: string) {
await this.page.goto(path);
// Wait for the app to render past the loading indicator
await this.page.waitForSelector("#pageRoot", { timeout: 10_000 });
}
/** Navigate to a page that may show a loading indicator */
async gotoAndWait(path: string, selector: string) {
await this.page.goto(path);
await this.page.waitForSelector(selector, { timeout: 10_000 });
}
}
type FrigateFixtures = {
frigateApp: FrigateApp;
/**
* Per-test additional allowlist regex patterns. Tests that intentionally
* trigger errors (e.g. error-state tests that hit a mocked 500) declare
* their expected errors here so the collector ignores them.
*
* Default is `[]` most tests should not need this.
*/
expectedErrors: RegExp[];
errorCollector: ErrorCollector;
};
export const test = base.extend<FrigateFixtures>({
expectedErrors: [[], { option: true }],
errorCollector: async ({ page, expectedErrors }, use, testInfo) => {
const collector = installErrorCollector(page, [
...GLOBAL_ALLOWLIST,
...expectedErrors,
]);
await use(collector);
if (process.env.E2E_STRICT_ERRORS === "1") {
collector.assertClean();
} else if (collector.errors.length > 0) {
// Soft mode: attach errors to the test report so they're visible
// without failing the run.
await testInfo.attach("collected-errors.txt", {
body: collector.errors
.map((e) => `[${e.kind}] ${e.message}${e.url ? ` (${e.url})` : ""}`)
.join("\n"),
contentType: "text/plain",
});
}
},
frigateApp: async ({ page, errorCollector }, use, testInfo) => {
// Reference the collector so its `use()` runs and teardown fires
void errorCollector;
const app = new FrigateApp(page, testInfo.project.name);
await app.installDefaults();
await use(app);
},
});
export { expect };

View File

@ -1,77 +0,0 @@
/**
* Camera activity WebSocket payload factory.
*
* The camera_activity topic payload is double-serialized:
* the WS message contains { topic: "camera_activity", payload: JSON.stringify(activityMap) }
*/
export interface CameraActivityState {
config: {
enabled: boolean;
detect: boolean;
record: boolean;
snapshots: boolean;
audio: boolean;
audio_transcription: boolean;
notifications: boolean;
notifications_suspended: number;
autotracking: boolean;
alerts: boolean;
detections: boolean;
object_descriptions: boolean;
review_descriptions: boolean;
};
motion: boolean;
objects: Array<{
label: string;
score: number;
box: [number, number, number, number];
area: number;
ratio: number;
region: [number, number, number, number];
current_zones: string[];
id: string;
}>;
audio_detections: Array<{
label: string;
score: number;
}>;
}
function defaultCameraActivity(): CameraActivityState {
return {
config: {
enabled: true,
detect: true,
record: true,
snapshots: true,
audio: false,
audio_transcription: false,
notifications: false,
notifications_suspended: 0,
autotracking: false,
alerts: true,
detections: true,
object_descriptions: false,
review_descriptions: false,
},
motion: false,
objects: [],
audio_detections: [],
};
}
export function cameraActivityPayload(
cameras: string[],
overrides?: Partial<Record<string, Partial<CameraActivityState>>>,
): string {
const activity: Record<string, CameraActivityState> = {};
for (const name of cameras) {
activity[name] = {
...defaultCameraActivity(),
...overrides?.[name],
} as CameraActivityState;
}
// Double-serialize: the WS payload is a JSON string
return JSON.stringify(activity);
}

View File

@ -1 +0,0 @@
[{"id": "case-001", "name": "Package Theft Investigation", "description": "Review of suspicious activity near the front porch", "created_at": 1775407931.3863528, "updated_at": 1775483531.3863528}]

File diff suppressed because one or more lines are too long

View File

@ -1,76 +0,0 @@
/**
* FrigateConfig factory for E2E tests.
*
* Uses a real config snapshot generated from the Python backend's FrigateConfig
* model. This guarantees all fields are present and match what the app expects.
* Tests override specific fields via DeepPartial.
*/
import { readFileSync } from "node:fs";
import { resolve, dirname } from "node:path";
import { fileURLToPath } from "node:url";
const __dirname = dirname(fileURLToPath(import.meta.url));
const configSnapshot = JSON.parse(
readFileSync(resolve(__dirname, "config-snapshot.json"), "utf-8"),
);
export type DeepPartial<T> = {
[P in keyof T]?: T[P] extends object ? DeepPartial<T[P]> : T[P];
};
function deepMerge<T extends Record<string, unknown>>(
base: T,
overrides?: DeepPartial<T>,
): T {
if (!overrides) return base;
const result = { ...base };
for (const key of Object.keys(overrides) as (keyof T)[]) {
const val = overrides[key];
if (
val !== undefined &&
typeof val === "object" &&
val !== null &&
!Array.isArray(val) &&
typeof base[key] === "object" &&
base[key] !== null &&
!Array.isArray(base[key])
) {
result[key] = deepMerge(
base[key] as Record<string, unknown>,
val as DeepPartial<Record<string, unknown>>,
) as T[keyof T];
} else if (val !== undefined) {
result[key] = val as T[keyof T];
}
}
return result;
}
// The base config is a real snapshot from the Python backend.
// Apply test-specific overrides: friendly names, camera groups, version.
export const BASE_CONFIG = {
...configSnapshot,
version: "0.15.0-test",
cameras: {
...configSnapshot.cameras,
front_door: {
...configSnapshot.cameras.front_door,
friendly_name: "Front Door",
},
backyard: {
...configSnapshot.cameras.backyard,
friendly_name: "Backyard",
},
garage: {
...configSnapshot.cameras.garage,
friendly_name: "Garage",
},
},
};
export function configFactory(
overrides?: DeepPartial<typeof BASE_CONFIG>,
): typeof BASE_CONFIG {
return deepMerge(BASE_CONFIG, overrides);
}

View File

@ -1,54 +0,0 @@
/**
* Debug replay status factory.
*
* The Replay page polls /api/debug_replay/status every 1s via SWR.
* The no-session state shows an empty state; the active state
* renders the live camera image + debug toggles + objects/messages
* tabs. Used by replay.spec.ts.
*/
export type DebugReplayStatus = {
active: boolean;
replay_camera: string | null;
source_camera: string | null;
start_time: number | null;
end_time: number | null;
live_ready: boolean;
};
export function noSessionStatus(): DebugReplayStatus {
return {
active: false,
replay_camera: null,
source_camera: null,
start_time: null,
end_time: null,
live_ready: false,
};
}
export function activeSessionStatus(
opts: {
camera?: string;
sourceCamera?: string;
startTime?: number;
endTime?: number;
liveReady?: boolean;
} = {},
): DebugReplayStatus {
const {
camera = "front_door",
sourceCamera = "front_door",
startTime = Date.now() / 1000 - 3600,
endTime = Date.now() / 1000 - 1800,
liveReady = true,
} = opts;
return {
active: true,
replay_camera: camera,
source_camera: sourceCamera,
start_time: startTime,
end_time: endTime,
live_ready: liveReady,
};
}

View File

@ -1 +0,0 @@
[{"id": "event-person-001", "label": "person", "sub_label": null, "camera": "front_door", "start_time": 1775487131.3863528, "end_time": 1775487161.3863528, "false_positive": false, "zones": ["front_yard"], "thumbnail": null, "has_clip": true, "has_snapshot": true, "retain_indefinitely": false, "plus_id": null, "model_hash": "abc123", "detector_type": "cpu", "model_type": "ssd", "data": {"top_score": 0.92, "score": 0.92, "region": [0.1, 0.1, 0.5, 0.8], "box": [0.2, 0.15, 0.45, 0.75], "area": 0.18, "ratio": 0.6, "type": "object", "description": "A person walking toward the front door", "average_estimated_speed": 1.2, "velocity_angle": 45.0, "path_data": [[[0.2, 0.5], 0.0], [[0.3, 0.5], 1.0]]}}, {"id": "event-car-001", "label": "car", "sub_label": null, "camera": "backyard", "start_time": 1775483531.3863528, "end_time": 1775483576.3863528, "false_positive": false, "zones": ["driveway"], "thumbnail": null, "has_clip": true, "has_snapshot": true, "retain_indefinitely": false, "plus_id": null, "model_hash": "def456", "detector_type": "cpu", "model_type": "ssd", "data": {"top_score": 0.87, "score": 0.87, "region": [0.3, 0.2, 0.9, 0.7], "box": [0.35, 0.25, 0.85, 0.65], "area": 0.2, "ratio": 1.25, "type": "object", "description": "A car parked in the driveway", "average_estimated_speed": 0.0, "velocity_angle": 0.0, "path_data": []}}, {"id": "event-person-002", "label": "person", "sub_label": null, "camera": "garage", "start_time": 1775479931.3863528, "end_time": 1775479951.3863528, "false_positive": false, "zones": [], "thumbnail": null, "has_clip": false, "has_snapshot": true, "retain_indefinitely": false, "plus_id": null, "model_hash": "ghi789", "detector_type": "cpu", "model_type": "ssd", "data": {"top_score": 0.78, "score": 0.78, "region": [0.0, 0.0, 0.6, 0.9], "box": [0.1, 0.05, 0.5, 0.85], "area": 0.32, "ratio": 0.5, "type": "object", "description": null, "average_estimated_speed": 0.5, "velocity_angle": 90.0, "path_data": [[[0.1, 0.4], 0.0]]}}]

View File

@ -1 +0,0 @@
[{"id": "export-001", "camera": "front_door", "name": "Front Door - Person Alert", "date": 1775490731.3863528, "video_path": "/exports/export-001.mp4", "thumb_path": "/exports/export-001-thumb.jpg", "in_progress": false, "export_case_id": null}, {"id": "export-002", "camera": "backyard", "name": "Backyard - Car Detection", "date": 1775483531.3863528, "video_path": "/exports/export-002.mp4", "thumb_path": "/exports/export-002-thumb.jpg", "in_progress": false, "export_case_id": "case-001"}, {"id": "export-003", "camera": "garage", "name": "Garage - In Progress", "date": 1775492531.3863528, "video_path": "/exports/export-003.mp4", "thumb_path": "/exports/export-003-thumb.jpg", "in_progress": true, "export_case_id": null}]

View File

@ -1,45 +0,0 @@
/**
* Face library factories.
*
* The /api/faces endpoint returns a record keyed by collection name
* with the list of face image filenames. Grouped training attempts
* live under the "train" key with filenames of the form
* `${event_id}-${timestamp}-${label}-${score}.webp`.
*
* Used by face-library.spec.ts and chat.spec.ts (attachment chip).
*/
export type FacesMock = Record<string, string[]>;
export function basicFacesMock(): FacesMock {
return {
alice: ["alice-1.webp", "alice-2.webp"],
bob: ["bob-1.webp"],
charlie: ["charlie-1.webp"],
};
}
export function emptyFacesMock(): FacesMock {
return {};
}
/**
* Adds a grouped recent-recognition training attempt to an existing
* faces mock. The grouping key on the backend is the event id so
* images with the same event-id prefix render as one dialog-able card.
*/
export function withGroupedTrainingAttempt(
base: FacesMock,
opts: {
eventId: string;
attempts: Array<{ timestamp: number; label: string; score: number }>;
},
): FacesMock {
const trainImages = opts.attempts.map(
(a) => `${opts.eventId}-${a.timestamp}-${a.label}-${a.score}.webp`,
);
return {
...base,
train: [...(base.train ?? []), ...trainImages],
};
}

View File

@ -1,426 +0,0 @@
#!/usr/bin/env python3
"""Generate E2E mock data from backend Pydantic and Peewee models.
Run from the repo root:
PYTHONPATH=/workspace/frigate python3 web/e2e/fixtures/mock-data/generate-mock-data.py
Strategy:
- FrigateConfig: instantiate the Pydantic config model, then model_dump()
- API responses: instantiate Pydantic response models (ReviewSegmentResponse,
EventResponse, ExportModel, ExportCaseModel) to validate all required fields
- If the backend adds a required field, this script fails at instantiation time
- The Peewee model field list is checked to detect new columns that would
appear in .dicts() API responses but aren't in our mock data
"""
import json
import sys
import time
import warnings
from datetime import datetime, timedelta
from pathlib import Path
warnings.filterwarnings("ignore")
OUTPUT_DIR = Path(__file__).parent
NOW = time.time()
HOUR = 3600
CAMERAS = ["front_door", "backyard", "garage"]
def check_pydantic_fields(pydantic_class, mock_keys, model_name):
"""Verify mock data covers all fields declared in the Pydantic response model.
The Pydantic response model is what the frontend actually receives.
Peewee models may have extra legacy columns that are filtered out by
FastAPI's response_model validation.
"""
required_fields = set()
for name, field_info in pydantic_class.model_fields.items():
required_fields.add(name)
missing = required_fields - mock_keys
if missing:
print(
f" ERROR: {model_name} response model has fields not in mock data: {missing}",
file=sys.stderr,
)
print(
f" Add these fields to the mock data in this script.",
file=sys.stderr,
)
sys.exit(1)
extra = mock_keys - required_fields
if extra:
print(
f" NOTE: {model_name} mock data has extra fields (not in response model): {extra}",
)
def generate_config():
"""Generate FrigateConfig from the Python backend model."""
from frigate.config import FrigateConfig
config = FrigateConfig.model_validate_json(
json.dumps(
{
"mqtt": {"host": "mqtt"},
"cameras": {
cam: {
"ffmpeg": {
"inputs": [
{
"path": f"rtsp://10.0.0.{i+1}:554/video",
"roles": ["detect"],
}
]
},
"detect": {"height": 720, "width": 1280, "fps": 5},
}
for i, cam in enumerate(CAMERAS)
},
"camera_groups": {
"default": {
"cameras": CAMERAS,
"icon": "generic",
"order": 0,
},
"outdoor": {
"cameras": ["front_door", "backyard"],
"icon": "generic",
"order": 1,
},
},
}
)
)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
snapshot = config.model_dump()
# Runtime-computed fields not in the Pydantic dump
all_attrs = set()
for attrs in snapshot.get("model", {}).get("attributes_map", {}).values():
all_attrs.update(attrs)
snapshot["model"]["all_attributes"] = sorted(all_attrs)
snapshot["model"]["colormap"] = {}
return snapshot
def generate_reviews():
"""Generate ReviewSegmentResponse[] validated against Pydantic + Peewee."""
from frigate.api.defs.response.review_response import ReviewSegmentResponse
reviews = [
ReviewSegmentResponse(
id="review-alert-001",
camera="front_door",
severity="alert",
start_time=datetime.fromtimestamp(NOW - 2 * HOUR),
end_time=datetime.fromtimestamp(NOW - 2 * HOUR + 30),
has_been_reviewed=False,
thumb_path="/clips/front_door/review-alert-001-thumb.jpg",
data=json.dumps(
{
"audio": [],
"detections": ["person-abc123"],
"objects": ["person"],
"sub_labels": [],
"significant_motion_areas": [],
"zones": ["front_yard"],
}
),
),
ReviewSegmentResponse(
id="review-alert-002",
camera="backyard",
severity="alert",
start_time=datetime.fromtimestamp(NOW - 3 * HOUR),
end_time=datetime.fromtimestamp(NOW - 3 * HOUR + 45),
has_been_reviewed=True,
thumb_path="/clips/backyard/review-alert-002-thumb.jpg",
data=json.dumps(
{
"audio": [],
"detections": ["car-def456"],
"objects": ["car"],
"sub_labels": [],
"significant_motion_areas": [],
"zones": ["driveway"],
}
),
),
ReviewSegmentResponse(
id="review-detect-001",
camera="garage",
severity="detection",
start_time=datetime.fromtimestamp(NOW - 4 * HOUR),
end_time=datetime.fromtimestamp(NOW - 4 * HOUR + 20),
has_been_reviewed=False,
thumb_path="/clips/garage/review-detect-001-thumb.jpg",
data=json.dumps(
{
"audio": [],
"detections": ["person-ghi789"],
"objects": ["person"],
"sub_labels": [],
"significant_motion_areas": [],
"zones": [],
}
),
),
ReviewSegmentResponse(
id="review-detect-002",
camera="front_door",
severity="detection",
start_time=datetime.fromtimestamp(NOW - 5 * HOUR),
end_time=datetime.fromtimestamp(NOW - 5 * HOUR + 15),
has_been_reviewed=False,
thumb_path="/clips/front_door/review-detect-002-thumb.jpg",
data=json.dumps(
{
"audio": [],
"detections": ["car-jkl012"],
"objects": ["car"],
"sub_labels": [],
"significant_motion_areas": [],
"zones": ["front_yard"],
}
),
),
]
result = [r.model_dump(mode="json") for r in reviews]
# Verify mock data covers all Pydantic response model fields
check_pydantic_fields(
ReviewSegmentResponse, set(result[0].keys()), "ReviewSegment"
)
return result
def generate_events():
"""Generate EventResponse[] validated against Pydantic + Peewee."""
from frigate.api.defs.response.event_response import EventResponse
events = [
EventResponse(
id="event-person-001",
label="person",
sub_label=None,
camera="front_door",
start_time=NOW - 2 * HOUR,
end_time=NOW - 2 * HOUR + 30,
false_positive=False,
zones=["front_yard"],
thumbnail=None,
has_clip=True,
has_snapshot=True,
retain_indefinitely=False,
plus_id=None,
model_hash="abc123",
detector_type="cpu",
model_type="ssd",
data={
"top_score": 0.92,
"score": 0.92,
"region": [0.1, 0.1, 0.5, 0.8],
"box": [0.2, 0.15, 0.45, 0.75],
"area": 0.18,
"ratio": 0.6,
"type": "object",
"description": "A person walking toward the front door",
"average_estimated_speed": 1.2,
"velocity_angle": 45.0,
"path_data": [[[0.2, 0.5], 0.0], [[0.3, 0.5], 1.0]],
},
),
EventResponse(
id="event-car-001",
label="car",
sub_label=None,
camera="backyard",
start_time=NOW - 3 * HOUR,
end_time=NOW - 3 * HOUR + 45,
false_positive=False,
zones=["driveway"],
thumbnail=None,
has_clip=True,
has_snapshot=True,
retain_indefinitely=False,
plus_id=None,
model_hash="def456",
detector_type="cpu",
model_type="ssd",
data={
"top_score": 0.87,
"score": 0.87,
"region": [0.3, 0.2, 0.9, 0.7],
"box": [0.35, 0.25, 0.85, 0.65],
"area": 0.2,
"ratio": 1.25,
"type": "object",
"description": "A car parked in the driveway",
"average_estimated_speed": 0.0,
"velocity_angle": 0.0,
"path_data": [],
},
),
EventResponse(
id="event-person-002",
label="person",
sub_label=None,
camera="garage",
start_time=NOW - 4 * HOUR,
end_time=NOW - 4 * HOUR + 20,
false_positive=False,
zones=[],
thumbnail=None,
has_clip=False,
has_snapshot=True,
retain_indefinitely=False,
plus_id=None,
model_hash="ghi789",
detector_type="cpu",
model_type="ssd",
data={
"top_score": 0.78,
"score": 0.78,
"region": [0.0, 0.0, 0.6, 0.9],
"box": [0.1, 0.05, 0.5, 0.85],
"area": 0.32,
"ratio": 0.5,
"type": "object",
"description": None,
"average_estimated_speed": 0.5,
"velocity_angle": 90.0,
"path_data": [[[0.1, 0.4], 0.0]],
},
),
]
result = [e.model_dump(mode="json") for e in events]
check_pydantic_fields(EventResponse, set(result[0].keys()), "Event")
return result
def generate_exports():
"""Generate ExportModel[] validated against Pydantic + Peewee."""
from frigate.api.defs.response.export_response import ExportModel
exports = [
ExportModel(
id="export-001",
camera="front_door",
name="Front Door - Person Alert",
date=NOW - 1 * HOUR,
video_path="/exports/export-001.mp4",
thumb_path="/exports/export-001-thumb.jpg",
in_progress=False,
export_case_id=None,
),
ExportModel(
id="export-002",
camera="backyard",
name="Backyard - Car Detection",
date=NOW - 3 * HOUR,
video_path="/exports/export-002.mp4",
thumb_path="/exports/export-002-thumb.jpg",
in_progress=False,
export_case_id="case-001",
),
ExportModel(
id="export-003",
camera="garage",
name="Garage - In Progress",
date=NOW - 0.5 * HOUR,
video_path="/exports/export-003.mp4",
thumb_path="/exports/export-003-thumb.jpg",
in_progress=True,
export_case_id=None,
),
]
result = [e.model_dump(mode="json") for e in exports]
check_pydantic_fields(ExportModel, set(result[0].keys()), "Export")
return result
def generate_cases():
"""Generate ExportCaseModel[] validated against Pydantic + Peewee."""
from frigate.api.defs.response.export_case_response import ExportCaseModel
cases = [
ExportCaseModel(
id="case-001",
name="Package Theft Investigation",
description="Review of suspicious activity near the front porch",
created_at=NOW - 24 * HOUR,
updated_at=NOW - 3 * HOUR,
),
]
result = [c.model_dump(mode="json") for c in cases]
check_pydantic_fields(ExportCaseModel, set(result[0].keys()), "ExportCase")
return result
def generate_review_summary():
"""Generate ReviewSummary for the calendar filter."""
today = datetime.now().strftime("%Y-%m-%d")
yesterday = (datetime.now() - timedelta(days=1)).strftime("%Y-%m-%d")
return {
today: {
"day": today,
"reviewed_alert": 1,
"reviewed_detection": 0,
"total_alert": 2,
"total_detection": 2,
},
yesterday: {
"day": yesterday,
"reviewed_alert": 3,
"reviewed_detection": 2,
"total_alert": 3,
"total_detection": 4,
},
}
def write_json(filename, data):
path = OUTPUT_DIR / filename
path.write_text(json.dumps(data, default=str))
print(f" {path.name} ({path.stat().st_size} bytes)")
def main():
print("Generating E2E mock data from backend models...")
print(" Validating against Pydantic response models + Peewee DB columns")
print()
write_json("config-snapshot.json", generate_config())
write_json("reviews.json", generate_reviews())
write_json("events.json", generate_events())
write_json("exports.json", generate_exports())
write_json("cases.json", generate_cases())
write_json("review-summary.json", generate_review_summary())
print()
print("All mock data validated against backend schemas.")
print("If this script fails, update the mock data to match the new schema.")
if __name__ == "__main__":
main()

View File

@ -1,39 +0,0 @@
/**
* User profile factories for E2E tests.
*/
export interface UserProfile {
username: string;
role: string;
allowed_cameras: string[] | null;
}
export function adminProfile(overrides?: Partial<UserProfile>): UserProfile {
return {
username: "admin",
role: "admin",
allowed_cameras: null,
...overrides,
};
}
export function viewerProfile(overrides?: Partial<UserProfile>): UserProfile {
return {
username: "viewer",
role: "viewer",
allowed_cameras: null,
...overrides,
};
}
export function restrictedProfile(
cameras: string[],
overrides?: Partial<UserProfile>,
): UserProfile {
return {
username: "restricted",
role: "viewer",
allowed_cameras: cameras,
...overrides,
};
}

View File

@ -1 +0,0 @@
{"2026-04-06": {"day": "2026-04-06", "reviewed_alert": 1, "reviewed_detection": 0, "total_alert": 2, "total_detection": 2}, "2026-04-05": {"day": "2026-04-05", "reviewed_alert": 3, "reviewed_detection": 2, "total_alert": 3, "total_detection": 4}}

View File

@ -1 +0,0 @@
[{"id": "review-alert-001", "camera": "front_door", "start_time": "2026-04-06T09:52:11.386353", "end_time": "2026-04-06T09:52:41.386353", "has_been_reviewed": false, "severity": "alert", "thumb_path": "/clips/front_door/review-alert-001-thumb.jpg", "data": {"audio": [], "detections": ["person-abc123"], "objects": ["person"], "sub_labels": [], "significant_motion_areas": [], "zones": ["front_yard"]}}, {"id": "review-alert-002", "camera": "backyard", "start_time": "2026-04-06T08:52:11.386353", "end_time": "2026-04-06T08:52:56.386353", "has_been_reviewed": true, "severity": "alert", "thumb_path": "/clips/backyard/review-alert-002-thumb.jpg", "data": {"audio": [], "detections": ["car-def456"], "objects": ["car"], "sub_labels": [], "significant_motion_areas": [], "zones": ["driveway"]}}, {"id": "review-detect-001", "camera": "garage", "start_time": "2026-04-06T07:52:11.386353", "end_time": "2026-04-06T07:52:31.386353", "has_been_reviewed": false, "severity": "detection", "thumb_path": "/clips/garage/review-detect-001-thumb.jpg", "data": {"audio": [], "detections": ["person-ghi789"], "objects": ["person"], "sub_labels": [], "significant_motion_areas": [], "zones": []}}, {"id": "review-detect-002", "camera": "front_door", "start_time": "2026-04-06T06:52:11.386353", "end_time": "2026-04-06T06:52:26.386353", "has_been_reviewed": false, "severity": "detection", "thumb_path": "/clips/front_door/review-detect-002-thumb.jpg", "data": {"audio": [], "detections": ["car-jkl012"], "objects": ["car"], "sub_labels": [], "significant_motion_areas": [], "zones": ["front_yard"]}}]

View File

@ -1,76 +0,0 @@
/**
* FrigateStats factory for E2E tests.
*/
import type { DeepPartial } from "./config";
function cameraStats(_name: string) {
return {
audio_dBFPS: 0,
audio_rms: 0,
camera_fps: 5.0,
capture_pid: 100,
detection_enabled: 1,
detection_fps: 5.0,
ffmpeg_pid: 101,
pid: 102,
process_fps: 5.0,
skipped_fps: 0,
connection_quality: "excellent" as const,
expected_fps: 5,
reconnects_last_hour: 0,
stalls_last_hour: 0,
};
}
export const BASE_STATS = {
cameras: {
front_door: cameraStats("front_door"),
backyard: cameraStats("backyard"),
garage: cameraStats("garage"),
},
cpu_usages: {
"1": { cmdline: "frigate.app", cpu: "5.0", cpu_average: "4.5", mem: "2.1" },
},
detectors: {
cpu: {
detection_start: 0,
inference_speed: 75.5,
pid: 200,
},
},
gpu_usages: {},
npu_usages: {},
processes: {},
service: {
last_updated: Date.now() / 1000,
storage: {
"/media/frigate/recordings": {
free: 50000000000,
total: 100000000000,
used: 50000000000,
mount_type: "ext4",
},
"/tmp/cache": {
free: 500000000,
total: 1000000000,
used: 500000000,
mount_type: "tmpfs",
},
},
uptime: 86400,
latest_version: "0.15.0",
version: "0.15.0-test",
},
camera_fps: 15.0,
process_fps: 15.0,
skipped_fps: 0,
detection_fps: 15.0,
};
export function statsFactory(
overrides?: DeepPartial<typeof BASE_STATS>,
): typeof BASE_STATS {
if (!overrides) return BASE_STATS;
return { ...BASE_STATS, ...overrides } as typeof BASE_STATS;
}

Some files were not shown because too many files have changed in this diff Show More